python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import torch
from contextlib import contextmanager
from functools import partial
from torch.cuda.amp import autocast
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
@contextmanager
def null_context():
yield
def linear_attention_normalization(q, k, causal=False):
if not causal:
return torch.einsum('...nm,...m->...n', q, k.sum(dim=-2))
else:
return torch.einsum('...nm,...nm->...n', q, k.cumsum(dim=-2))
# efficient causal linear attention, created by EPFL
def causal_linear_attention(q, k, v, need_weights=False):
from fast_transformers.causal_product import causal_dot_product
autocast_enabled = torch.is_autocast_enabled()
is_half = isinstance(q, torch.cuda.HalfTensor)
assert not is_half or APEX_AVAILABLE, 'half tensors can only be used if nvidia apex is available'
cuda_context = null_context if not autocast_enabled else partial(autocast, enabled = False)
causal_dot_product_fn = amp.float_function(causal_dot_product) if is_half else causal_dot_product
with cuda_context():
if autocast_enabled:
q, k, v = map(lambda t: t.float(), (q, k, v))
q_k_v = causal_dot_product_fn(q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1)).squeeze(1)
if need_weights:
attn = torch.einsum('...im,...jm', q, k)
causal_mask = torch.triu(torch.ones(q.shape[-2], k.shape[-2], dtype=torch.bool,
device=k.device), diagonal=1)
attn.masked_fill_(causal_mask, 0.0)
else:
attn = None
return q_k_v, attn
# non-causal linear attention
def linear_attention(q, k, v, need_weights=False):
k_v = torch.einsum('...nm,...nd->...md', k, v)
q_k_v = torch.einsum('...nm,...md->...nd', q, k_v)
attn = None if not need_weights else torch.einsum('...im,...jm->...ij', q, k)
return q_k_v, attn
| fly-master | src/models/attention/scatterbrain_utils.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/full_attention.py
class FullAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, softmax_temp=None, attention_dropout=0.0, device=None, dtype=None):
super().__init__()
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=True):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many query each sequence in the batch consists of
"""
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
# Scale the query instead of applying the softmax temperature to the
# dot products
query = query * softmax_temp
# Compute the unnormalized attention and apply the masks
QK = torch.einsum("bthe,bshe->bhts", query, key)
if attn_mask is not None and not attn_mask.all_ones:
QK.masked_fill_(~attn_mask.bool_matrix, float('-inf'))
if key_padding_mask is not None and not key_padding_mask.all_ones:
QK.masked_fill_(rearrange(~key_padding_mask.bool_matrix, 'b s -> b 1 1 s'),
float('-inf'))
# Compute the attention and the weighted average
attn = torch.softmax(QK, dim=-1)
A = self.dropout(attn)
output = torch.einsum("bhts,bshd->bthd", A, value)
return output, attn if need_weights else None
| fly-master | src/models/attention/full_attention.py |
# Adapted from https://github.com/giannisdaras/smyrf/blob/master/smyrf/torch/attn.py
import math
import torch
import torch.nn as nn
from einops import rearrange, repeat
from src.utils.padding import pad_to_multiple
from src.ops.permutation import invert_permutation
from src.models.attention.hash_utils import XBOXPLUS, lsh_clustering
from src.models.attention.batching_utils import batched_index_select
from src.models.attention.reformer_attention import max_neg_value
from src.models.modules.masking import LengthMask
from src.models.attention.mask_utils import pad_mask
class SmyrfAttention(nn.Module):
def __init__(self, n_hashes, q_cluster_size, k_cluster_size,
r=1, # LSH clustering
softmax_temp=None, attention_dropout=0., device=None, dtype=None):
super().__init__()
self.n_hashes = n_hashes
self.q_cluster_size = q_cluster_size
self.k_cluster_size = k_cluster_size
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.hash_fn = XBOXPLUS()
self.clustering_params = {'r': r, 'n_hashes': self.n_hashes}
def hash_vectors(self, query, key, key_padding_mask=None):
# XBOX+ transform
self.hash_fn.set_norms(query, key)
query_t = self.hash_fn.Q(query)
key_t = self.hash_fn.K(key)
num_clusters = query_t.shape[-2] // self.q_cluster_size
assert num_clusters == (key_t.shape[-2] // self.k_cluster_size), 'Unequal number of clusters for query and key.'
q_positions, k_positions = lsh_clustering(query_t, key_t, **self.clustering_params,
key_padding_mask=key_padding_mask)
return q_positions, k_positions
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
if attn_mask is not None:
raise NotImplementedError('Support for attn_mask is not implemented')
_, q_seqlen_og, _, _ = query.shape
_, k_seqlen_og, _, _ = key.shape
query = pad_to_multiple(query, self.q_cluster_size, dims=1)
key = pad_to_multiple(key, self.k_cluster_size, dims=1)
value = pad_to_multiple(value, self.k_cluster_size, dims=1)
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
# pad the masks
if S > k_seqlen_og:
if key_padding_mask is None:
key_padding_mask = LengthMask(key.new_full((key.shape[0],), k_seqlen_og,
dtype=torch.long), max_len=S)
else:
key_padding_mask = pad_mask(key_padding_mask, pad_length=S - k_seqlen_og,
left=False, value=False)
query = rearrange(query, 'b t h e -> (b h) t e')
key = rearrange(key, 'b t h e -> (b h) t e')
value = rearrange(value, 'b s h d -> (b h) s d')
bs = query.shape[0]
if key_padding_mask is not None and not key_padding_mask.all_ones:
# Repeat for all heads
key_padding_mask_bool = repeat(key_padding_mask.bool_matrix, 'b s -> (b h) s', h=H)
else:
key_padding_mask_bool = None
with torch.no_grad():
q_positions, k_positions = self.hash_vectors(query, key,
rearrange(key_padding_mask_bool,
'b s -> 1 b s')
if key_padding_mask_bool is not None else None)
# sort queries, keys, values
def sort_to_buckets(x, perm, bucketsz):
return rearrange(batched_index_select(rearrange(x, 'b s d -> 1 b s d'), perm),
'h b (nbuckets bucketsz) d -> h b nbuckets bucketsz d',
bucketsz=bucketsz)
s_query = sort_to_buckets(query, q_positions, self.q_cluster_size)
s_key = sort_to_buckets(key, k_positions, self.k_cluster_size)
s_value = sort_to_buckets(value, k_positions, self.k_cluster_size)
inner = torch.einsum('...id,...jd->...ij', s_query, s_key) * softmax_temp
masked_value = max_neg_value(inner)
# mask out attention to padded tokens
if key_padding_mask is not None and not key_padding_mask.all_ones:
s_key_padding_mask = sort_to_buckets(rearrange(key_padding_mask_bool,
'b s -> b s 1'),
k_positions, self.k_cluster_size)
s_key_padding_mask = rearrange(s_key_padding_mask,
'... bucketsz 1 -> ... 1 bucketsz')
inner.masked_fill_(~s_key_padding_mask, masked_value)
q_rev_positions = invert_permutation(q_positions)
# free memory
if not need_weights:
del q_positions, k_positions
# softmax denominator
dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
# softmax
dots = torch.exp(inner - dots_logsumexp)
# If the whole row within this bucket is masked out, then inner is the uniform distribution.
# We actually want it to be zero.
if key_padding_mask is not None and not key_padding_mask.all_ones:
full_row_mask = (inner <= masked_value).all(dim=-1, keepdim=True)
dots = dots.masked_fill(full_row_mask, 0.0)
# dropout
dropped_dots = self.dropout(dots)
# n_hashes outs
so = torch.einsum('...ij,...jd->...id', dropped_dots, s_value)
# undo sort
def unsort_from_buckets(s_x, perm_inverse):
b_x = rearrange(s_x, 'h b nbuckets bucketsz d -> h b (nbuckets bucketsz) d')
return batched_index_select(b_x, perm_inverse)
o = unsort_from_buckets(so, q_rev_positions)
logits = unsort_from_buckets(dots_logsumexp, q_rev_positions)
# free memory
del q_rev_positions
probs = torch.exp(logits - torch.logsumexp(logits, dim=0, keepdim=True))
out = torch.sum(o * probs, dim=0)
out = rearrange(out, '(b h) t d -> b t h d', h=H)
out = out[:, :q_seqlen_og]
attn = None
if need_weights:
q_pos_2d = rearrange(q_positions, 'h b (nbuckets bucketsz) -> h b nbuckets bucketsz 1',
bucketsz=self.q_cluster_size)
k_pos_2d = rearrange(k_positions, 'h b (nbuckets bucketsz) -> h b nbuckets 1 bucketsz',
bucketsz=self.k_cluster_size)
pos_2d = rearrange(q_pos_2d * S + k_pos_2d,
'h b nbuckets qbucketsz kbucketsz -> h b (nbuckets qbucketsz kbucketsz)')
unsorted_dots = torch.zeros(self.n_hashes, bs, T * S, device=query.device)
unsorted_dots.scatter_(-1, pos_2d, dots.view_as(pos_2d))
del pos_2d
unsorted_dots = rearrange(unsorted_dots,
'h b (q_seqlen k_seqlen) -> h b q_seqlen k_seqlen',
q_seqlen=T)
attn = torch.sum(unsorted_dots * probs, dim=0)
attn = rearrange(attn, '(b h) t s -> b h t s', h=H)[:, :, :q_seqlen_og, :k_seqlen_og]
return out, attn
| fly-master | src/models/attention/smyrf_attention.py |
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/feature_maps/fourier_features.py
import math
import torch
from einops import rearrange, repeat
from fast_transformers.feature_maps.base import FeatureMap
from src.models.attention.projection_utils import gaussian_orthogonal_random_matrix
from src.models.attention.performer_utils import softmax_kernel
class PerformerFeatures(FeatureMap):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
query_dims: int, The input query dimensions in order to sample
the noise matrix
n_features: int, The size of the feature map (should be divisible by 2)
(default: query_dims)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dims))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
redraw: int, Redraw the random matrix every 'redraw' times
(default: 1)
deterministic_eval: bool, Only redraw the random matrix during training
(default: False)
"""
def __init__(self, query_dims, n_features=None, ortho_scaling=0, softmax_temp=None,
orthogonal=False, redraw=1, deterministic_eval=False, eps=1e-4,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(query_dims)
self.n_features = n_features or int(query_dims * math.log(query_dims))
self.ortho_scaling = ortho_scaling
# TODO: we're not using @orthogonal atm
self.orthogonal = orthogonal
# TODO: we're not using @softmax_temp atm
self.softmax_temp = 1 / math.sqrt(query_dims) if softmax_temp is None else softmax_temp
# self.redraw = redraw
# TODO: not redrawing atm, so I'm setting it to an irrational number
self.redraw = math.pi
self.deterministic_eval = deterministic_eval
self.eps = eps # Stabilizer for softmax kernel
# Make a buffer for storing the sampled projection_matrix
self.register_buffer("projection_matrix", torch.zeros(self.query_dims, self.n_features,
**factory_kwargs))
self.factory_kwargs = factory_kwargs
self._calls = -1
def new_feature_map(self, device):
# If we are not training skip the generation of a new feature map
if self.deterministic_eval and not self.training:
return
# Only redraw the new feature map every self.redraw times
self._calls += 1
if (self._calls % self.redraw) != 0:
return
projection_matrix = gaussian_orthogonal_random_matrix(nrows=self.n_features,
ncols=self.query_dims,
scaling=self.ortho_scaling,
device=device,
dtype=self.factory_kwargs['dtype'])
self.register_buffer("projection_matrix", projection_matrix)
def forward_queries(self, x):
return softmax_kernel(x, projection_matrix=self.projection_matrix, is_query=True,
eps=self.eps)
def forward_keys(self, x):
return softmax_kernel(x, projection_matrix=self.projection_matrix, is_query=False,
eps=self.eps)
| fly-master | src/models/attention/performer_feature_map.py |
import torch
from einops import rearrange, repeat
# Should do the same thing as https://github.com/openai/triton/blob/8bedcce9befbbe95d8fe0a082718edc4050e2831/python/triton/testing.py#L22
# but faster.
def sparsify_tensor(x, mask):
"""
Arguments:
x: (..., n_head, T, S)
mask: (n_head, T // block_size, S // block_size1), with dtype torch.long
Return:
x_sparse: (..., nnz(mask), block_size, block_size1)
"""
block_size, block_size1 = x.shape[-2] // mask.shape[-2], x.shape[-1] // mask.shape[-1]
x_reshaped = rearrange(x, '... h (p blk_sz) (r blk_sz1) -> ... (h p r) blk_sz blk_sz1',
blk_sz=block_size, blk_sz1=block_size1)
return x_reshaped[..., mask.flatten().bool().to(x.device), :, :]
def densify_tensor(x, mask, value=0.0):
"""
Arguments:
x: (..., nnz, block_size, block_size1)
mask: (n_head, p, r), with dtype torch.long
Return:
x_dense: (..., n_head, p * block_size, r * block_size1)
"""
mask = mask.bool().to(x.device)
batch_shape = x.shape[:-3]
nnz, block_size, block_size1 = x.shape[-3:]
n_head, p, r = mask.shape
assert nnz == mask.sum(), 'Mask has a different number of nonzero blocks than input'
x_dense = torch.full((*batch_shape, n_head * p * r, block_size, block_size1), value,
dtype=x.dtype, device=x.device)
x_dense[..., mask.flatten(), :, :] = x
return rearrange(x_dense, '... (h p r) blk_sz blk_sz1 -> ... h (p blk_sz) (r blk_sz1)', p=p, r=r)
# Should do the same thing as https://github.com/openai/triton/blob/8bedcce9befbbe95d8fe0a082718edc4050e2831/python/triton/testing.py#L52
# but faster.
def mask_tensor(x, mask, value=0.0):
"""
Arguments:
x: (batch_size, n_head, T, S)
mask: (n_head, T // block_size, S // block_size), with dtype torch.long
Return:
x_sparse: (batch_size, nnz(mask), block_size, block_size)
"""
block_size, block_size1 = x.shape[-2] // mask.shape[-2], x.shape[-1] // mask.shape[-1]
n_head, p, r = mask.shape
out = rearrange(x.clone(), '... h (p blk_sz) (r blk_sz1) -> ... (h p r) blk_sz blk_sz1',
blk_sz=block_size, blk_sz1=block_size1)
out[..., ~mask.flatten().bool().to(x.device), :, :] = value
return rearrange(out, '... (h p r) blk_sz blk_sz1 -> ... h (p blk_sz) (r blk_sz1)', p=p, r=r)
def sparsify_broadcast_tensor(x, mask):
"""
Arguments:
x: (batch_size, n_head, T)
mask: (n_head, T // block_size, S // block_size), with dtype torch.long
block_size: int in {16, 32, 64, 128}
Return:
x_sparse: (batch_size, nnz(mask), block_size, 1)
"""
block_size = x.shape[-1] // mask.shape[-2]
# x_expanded = repeat(x, 'b h (p blk_sz) -> b (h p r) blk_sz 1', blk_sz=block_size, r=mask.shape[2])
# return x_expanded[:, mask.flatten().bool().to(x.device)]
x_reshaped = rearrange(x, 'b h (p blk_sz) -> b h p blk_sz 1', blk_sz=block_size)
h_idx, row_idx, _ = torch.nonzero(mask, as_tuple=True)
return x_reshaped[:, h_idx, row_idx]
def block_frob_sqnorm_estimate(A, B, block_size, n_projs=None):
"""
Estimate the Frobenius squared norm of the blocks of the matrix product A @ B, without
materializing the product.
Arguments:
A: (m * block_size, k * block_size)
B: (k * block_size, n * block_size)
block_size: int
n_projs: int, the number of random projections. Defaults to block_size // 4.
Return:
F: (m, n), where F[i, j] is the estimate of the Frobenius squared norm of the (i, j) block of
C = A @ B.
"""
if n_projs is None:
n_projs = block_size // 4
A_block = rearrange(A, '(m blk_sz) (k blk_sz1) -> m blk_sz k blk_sz1',
blk_sz=block_size, blk_sz1=block_size)
B_block = rearrange(B, '(k blk_sz) (n blk_sz1) -> k blk_sz n blk_sz1',
blk_sz=block_size, blk_sz1=block_size)
proj = torch.randn(k, block_size, n, n_projs, device=B.device) / n_projs ** 0.5
C_block_sqnorm_estimate = torch.linalg.norm(
torch.einsum('m s k t, k t n p -> m n s p', A_block, B_block @ proj),
dim=(-1, -2)
) ** 2
return C_block_sqnorm_estimate
if __name__ == '__main__':
block_size = 32
m = 5
n = 7
k = 3
n_projs = 8
# We use values from pretrained weights instead of random values just because random iid values
# for A and B will yield Frob squared norm that are all of the same magnitude.
# Pretrained weights are a bit more interesting
import torchvision
resnet18 = torchvision.models.resnet18(pretrained=True)
A = resnet18.layer4[0].conv1.weight[:m * block_size, :k * block_size, 0, 0]
B = resnet18.layer4[1].conv1.weight[:k * block_size, :n * block_size, 0, 0]
C = A @ B
C_block = rearrange(C, '(m blk_sz) (n blk_sz1) -> m n blk_sz blk_sz1',
blk_sz=block_size, blk_sz1=block_size)
C_frob_sqnorm = torch.linalg.norm(C) ** 2
proj = torch.randn(n * block_size, n_projs) / n_projs ** 0.5
C_frob_sqnorm_estimate = torch.linalg.norm(A @ (B @ proj)) ** 2
C_block_sqnorm = torch.linalg.norm(C_block, dim=(-1, -2)) ** 2
C_block_sqnorm_estimate = block_frob_sqnorm_estimate(A, B, block_size)
print(C_block_sqnorm)
print(C_block_sqnorm_estimate)
| fly-master | src/models/attention/blocksparse_utils.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from fast_transformers.local_product import local_dot_product, local_weighted_average
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.feature_maps_sb import SBPerformerFeatures
from src.models.attention.scatterbrain_utils import linear_attention_normalization
from src.models.attention.scatterbrain_utils import causal_linear_attention, linear_attention
class SBLocalAttention(nn.Module):
"""Implement fast local attention where a query can only attend to
neighboring keys.
In this attention module the query Q_i can only attend to a key K_j if
|i-j| < local_context/2.
Arguments
---------
local_context: The neighborhood to consider for local attention.
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, local_context, dim_heads, nb_features=None, ortho_scaling=0,
causal=False, softmax_temp=None, attention_dropout=0.0, softmax_eps=0.0,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.feature_map = SBPerformerFeatures(dim_heads, nb_features, ortho_scaling=ortho_scaling,
softmax_temp=softmax_temp, eps=softmax_eps,
**factory_kwargs)
self.local_context = local_context
self.causal = causal
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.softmax_eps = softmax_eps
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False,
return_attn_unnormalized=False):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of
"""
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
# TODO: check causal
if attn_mask is None:
attn_mask_additive_matrix = torch.zeros(T, S, device=query.device)
else:
attn_mask_additive_matrix = attn_mask.additive_matrix_finite
if key_padding_mask is None:
key_padding_mask_lengths = torch.full(size=(B,), fill_value=S, dtype=torch.long,
device=key.device)
else:
key_padding_mask_lengths = key_padding_mask.lengths
# Permute the dimensions to BHTE instead of BTHE
query = rearrange(query, 'b t h e -> b h t e').contiguous()
key = rearrange(key, 'b s h e -> b h s e').contiguous()
value = rearrange(value, 'b s h d -> b h s d').contiguous()
self.feature_map.new_feature_map(query.device)
q_prime, q_prime_log_scale = self.feature_map.forward_queries(query)
k_prime, k_prime_log_scale = self.feature_map.forward_keys(key)
prime_log_scale = q_prime_log_scale + k_prime_log_scale
m = q_prime.shape[-1]
if key_padding_mask is not None and not key_padding_mask.all_ones:
k_prime = k_prime.masked_fill(~rearrange(key_padding_mask.bool_matrix,
'b s -> b 1 s 1'), 0.0)
attn_fn = linear_attention if not self.causal else causal_linear_attention
q_prime_k_prime_1 = linear_attention_normalization(q_prime, k_prime, causal=self.causal)
q_prime_k_prime_v, attn_prime = attn_fn(q_prime, k_prime, value, need_weights=need_weights)
QK = softmax_temp * local_dot_product(
query, key, attn_mask_additive_matrix, key_padding_mask_lengths,
self.local_context
)
dots_prime = local_dot_product(
q_prime, k_prime, attn_mask_additive_matrix, key_padding_mask_lengths,
self.local_context
)
# local_dot_product fills in -1e24 for invalid locations. We want to set them to zero.
# dots_prime[dots_prime <= -1e24] = 0.0
i = rearrange(torch.arange(T, device=query.device), 't -> 1 1 t 1')
j = torch.arange(self.local_context, device=query.device)
local_idx = i - self.local_context // 2 + j
valid_idx_mask = ((local_idx >= 0)
& (local_idx < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1')))
dots_prime.masked_fill_(~valid_idx_mask, 0.0)
assert torch.all(dots_prime >= 0)
# Compute the normalization first
QK_lse = torch.logsumexp(QK, dim=-1, keepdim=True)
dots_prime_sum = dots_prime.sum(dim=-1, keepdim=True)
lr_log_normalization = torch.log((rearrange(q_prime_k_prime_1, 'b h s -> b h s 1')
- dots_prime_sum).clamp_min_(1e-24)) + prime_log_scale
log_normalization = torch.logaddexp(QK_lse, lr_log_normalization)
prime_scale = torch.exp(prime_log_scale - log_normalization)
# When we drop out, we want that location in the attn matrix to be zero.
# So here we dropout just torch.exp(QK) and leave -dots_prime, so that when we add it back
# to attn_prime it's equivalent to setting that location to zero.
dots = self.dropout(torch.exp(QK - log_normalization)) - dots_prime * prime_scale
out_local = local_weighted_average(dots, value)
out = out_local + q_prime_k_prime_v * prime_scale
attn = None
if need_weights:
attn_local = torch.zeros(B, H, T, S, device=query.device)
k = torch.arange(S, device=key.device)
idx = k - i
local_mask = ((idx >= -(self.local_context // 2))
& (idx < (self.local_context + 1) // 2)
& (k < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1')))
attn_local.masked_scatter_(local_mask, dots.masked_select(valid_idx_mask))
attn = attn_local + attn_prime * prime_scale
if return_attn_unnormalized: # For testing purpose
attn = (attn, attn * torch.exp(log_normalization),
attn_prime * torch.exp(prime_log_scale))
return rearrange(out, 'b h t d -> b t h d'), attn
| fly-master | src/models/attention/sblocal_attention.py |
# Adapted from https://github.com/giannisdaras/smyrf/blob/master/smyrf/torch/utils.py
''' Utility functions for smyrf '''
import torch
import torch.nn.functional as F
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def random_flip(x):
flips = torch.ceil((torch.rand(x.shape, device=x.device) - 0.5)).to(torch.uint8)
return flips * x
def sign_randomness(fn):
def do(*args, **kwargs):
return random_flip(fn(*args, **kwargs))
return do
@sign_randomness
def hadamard_transform(u, normalize=False):
batch_size, n = u.shape
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
x = u[..., np.newaxis]
for d in range(m)[::-1]:
x = torch.cat((x[..., ::2, :] + x[..., 1::2, :], x[..., ::2, :] - x[..., 1::2, :]), dim=-1)
return x.squeeze(-2) / 2**(m / 2) if normalize else x.squeeze(-2)
def inversion_number(arr1, arr2):
'''
Counts "relative" mistakes.
'''
mapping = {}
count = 0
not_found = 0
for i, elem in enumerate(arr2):
mapping[elem] = i
for i, elem_a in enumerate(arr1):
if not elem_a in mapping:
not_found += 1
count += len(arr1[i+1:])
continue
for elem_b in arr1[i+1:]:
mapped_a = mapping[elem_a]
if not elem_b in mapping:
count += 1
continue
mapped_b = mapping[elem_b]
if mapped_a > mapped_b:
count += 1
return count, not_found
def two_dimensional(fn):
def do(self, x, *args, **kwargs):
if len(x.shape) == 2:
return fn(self, x, *args, **kwargs)
else:
x = x.reshape(-1, x.shape[-1])
return fn(self, x, *args, **kwargs)
return do
def sort_key_val(t1, t2, dim=-1, n_buckets=1):
'''
Sort t2 based on t1.
'''
values, indices = t1.sort(dim=dim)
t2 = t2.expand_as(t1)
return values, t2.gather(dim, indices)
def uniform(a, b, shape, device='cuda'):
'''
Draws shape samples from a uniform distribution U(a, b).
'''
return (b - a) * torch.rand(shape, device=device) + a
''' Preprocessing functions for ALSH '''
class AsymmetricTransform:
def Q(self, *args, **kwargs):
raise NotImplementedError('Query transform not implemented')
def K(self, *args, **kwargs):
raise NotImplementedError('Key transform not implemented')
class L2LSH(AsymmetricTransform):
def K(self, vec):
# Normalize x = vec / max_norm
norms = vec.norm(p=2, dim=-1).unsqueeze(-1)
max_norm = torch.max(norms, dim=0)[0]
x = vec / max_norm
# compute new_norms
norms = x.norm(p=2,dim=-1).unsqueeze(-1)
# transform: x = [x; norm_x**2, norm_x**4]
return torch.cat((x, norms**2, norms**4, norms**8), -1)
def Q(self, vec):
# normalize queries
x = (vec - vec.mean(dim=-1).unsqueeze(-1)) / vec.std(dim=-1).unsqueeze(-1)
device = vec.device
ext = torch.empty(x.shape[:-1] + (1,), device=device).fill_(0.5)
return torch.cat((x, ext, ext, ext), -1)
class XBOX(AsymmetricTransform):
def K(self, x):
norms = x.norm(p=2, dim=-1).unsqueeze(-1)
max_norm = torch.max(norms, dim=1).values.unsqueeze(-1)
ext = torch.sqrt(max_norm**2 - norms**2)
return torch.cat((x, ext), -1)
def Q(self, x):
zero = torch.tensor([0.0], device=x.device).repeat(x.shape[:-1], 1).unsqueeze(-1)
return torch.cat((x, zero), -1)
class XBOXPLUS(AsymmetricTransform):
def set_norms(self, queries, keys):
self.q_norm_sq = queries.norm(p=2, dim=-1, keepdim=True).square()
self.k_norm_sq = keys.norm(p=2, dim=-1, keepdim=True).square()
MQ_sq = torch.amax(self.q_norm_sq, dim=-2, keepdim=True)
MK_sq = torch.amax(self.k_norm_sq, dim=-2, keepdim=True)
self.MQ_sq_MK_sq = MQ_sq + MK_sq
def K(self, x):
ext = (self.MQ_sq_MK_sq - self.k_norm_sq).sqrt()
return torch.cat([x, ext, torch.zeros_like(ext)], dim=-1)
def Q(self, x):
ext = (self.MQ_sq_MK_sq - self.q_norm_sq).sqrt()
return torch.cat([x, torch.zeros_like(ext), ext], dim=-1)
class XBOXMax(AsymmetricTransform):
def set_norms(self, queries, keys):
self.q_norm_sq = queries.norm(p=2, dim=-1, keepdim=True).square()
self.k_norm_sq = keys.norm(p=2, dim=-1, keepdim=True).square()
MQ_sq = torch.amax(self.q_norm_sq, dim=-2, keepdim=True)
MK_sq = torch.amax(self.k_norm_sq, dim=-2, keepdim=True)
self.MQ_sq_MK_sq_max = torch.maximum(MQ_sq, MK_sq)
def K(self, x):
ext = (self.MQ_sq_MK_sq_max - self.k_norm_sq).sqrt()
return torch.cat([x, ext, torch.zeros_like(ext)], dim=-1)
def Q(self, x):
ext = (self.MQ_sq_MK_sq_max - self.k_norm_sq).sqrt()
return torch.cat([x, torch.zeros_like(ext), ext], dim=-1)
class H2LSH(AsymmetricTransform):
'''
"Advanced" xbox for queries. Technique: H2-ALSH.
Based on paper: Accurate and Fast ALSH (KDD 2018)
'''
def K(self, x):
norms = x.norm(p=2, dim=-1).unsqueeze(-1)
max_norm = torch.max(norms, dim=0)[0]
self.max_norm = max_norm
ext = torch.sqrt(max_norm**2 - norms**2)
return torch.cat((x, ext), -1)
def Q(self, x):
assert hasattr(self, 'max_norm'), 'Max norm not set'
zero = torch.tensor([0.0], device=x.device).repeat(x.shape[0], 1)
res = torch.cat((self.max_norm * x, zero), -1)
del self.max_norm
return res
''' Hashing '''
class LSH:
def __call__(self, *args, **kwargs):
raise NotImplementedError('LSH scheme not implemented')
def compute_hash_agreement(self, q_hash, k_hash):
return (q_hash == k_hash).min(dim=-1)[0].sum(dim=-1)
class VoronoiLSH(LSH):
def __init__(self, L, K, dim, device='cuda'):
'''
We repeat L times the following process.
Choose K gaussians. Compute the inner product, keep the index of
the maximum.
L: increases the probability of collision for near ones.
K: decreases the probability of collision for far ones.
Suggested values:
-> K = ln(N) / ln(2)
-> L = sqrt(N)
'''
self.gaussians = torch.randn(dim, K * L, device=device)
self.K = K
self.L = L
self.dim = dim
def __call__(self, vecs):
products = vecs @ self.gaussians
return torch.argmax(products.reshape(-1, self.L, self.K), dim=-1)
class CrossPolytopeLSH(LSH):
def __init__(self, L, K, dim, device='cuda'):
self.L = L
self.K = K
self.dim = dim
def __call__(self, vecs):
x = vecs.repeat([self.L * self.K, 1])
x = hadamard_transform(x, normalize=True)
x = hadamard_transform(x)
x = x.reshape(self.L, self.K, -1, vecs.shape[-1])
indices = torch.argmax(x, dim=-1).permute(2, 0, 1)
return indices
def lsh_clustering(queries, keys, n_hashes, r=1, key_padding_mask=None):
"""
LSH clustering based on Euclidean distance.
"""
e2lsh = E2LSH(n_hashes=n_hashes, dim=queries.shape[-1], r=r, device=queries.device)
queries_hashed = e2lsh(queries)
keys_hashed = e2lsh(keys)
if key_padding_mask is not None:
keys_hashed.masked_fill_(~key_padding_mask, float('inf'))
# HACK: if queries and keys have the same length, we assume it's self-attention.
# By right we shouldn't change queries_hashed, but the original SMYRF code does it.
if queries.shape[-2] == key_padding_mask.shape[-1]:
queries_hashed.masked_fill_(~key_padding_mask, float('inf'))
return queries_hashed.argsort(dim=-1), keys_hashed.argsort(dim=-1)
class E2LSH(LSH):
def __init__(self, n_hashes, dim, r, device='cuda'):
super(E2LSH, self).__init__()
self.alpha = torch.normal(0, 1, (dim, n_hashes), device=device)
self.beta = uniform(0, r, shape=(1, n_hashes), device=device)
self.dim = dim
self.r = r
def __call__(self, vecs):
'''
L2 Sensitive Hashing based on p-stable distributions.
Also known as E2LSH.
Args:
vecs: (bs, N, dim) (dtype: torch.float32)
Output:
buckets: (n_hashes, bs, N) (dtype: torch.int32)
'''
projection = vecs @ self.alpha
projection_shift = projection + self.beta
projection_rescale = projection_shift / self.r
return projection_shift.permute(2, 0, 1)
class QLSH(LSH):
def __init__(self, L, K, dim, r=4, device='cuda'):
self.alpha = torch.normal(0, 1, (dim, L * K), device=device)
self.dim = dim
self.L = L
self.K = K
self.r = r
@two_dimensional
def __call__(self, queries, keys):
q_projection = (queries @ self.alpha).reshape(-1, self.L, self.K)
k_projection = (keys @ self.alpha).reshape(-1, self.L, self.K)
return self.compute_hash_agreement(q_projection, k_projection)
def compute_hash_agreement(self, q_projection, k_projection):
diff = k_projection - q_projection
left_part = diff >= (- self.r / 2)
right_part = diff <= (self.r / 2)
truth_table = (left_part * right_part).min(dim=-1)[0].sum(dim=-1)
return truth_table
def color_clusters(q_pos, k_pos, q_cluster_size, k_cluster_size):
print('Coloring clusters...')
q_pos_sorted = q_pos.argsort(dim=-1).reshape(-1, q_cluster_size)
k_pos_sorted = k_pos.argsort(dim=-1).reshape(-1, k_cluster_size)
n_clusters = q_pos.shape[0] // q_cluster_size
# mark each vector with a cluster index
for i in range(n_clusters):
q_pos[q_pos_sorted[i]] = i + 1
k_pos[k_pos_sorted[i]] = i + 1
# create boolean array where (i, j) says if (i, j) in the same vector
bool_arr = (q_pos.unsqueeze(1) == k_pos).type(torch.int32)
# mark this array with the color of each row
for i in range(q_pos.shape[0]):
bool_arr[i] *= q_pos[i]
return bool_arr
| fly-master | src/models/attention/hash_utils.py |
# Adapted from https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/softmax.py
import triton.language as tl
import triton
import torch
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3] * meta['BLOCK'])})
@triton.jit
def _forward(
X, OUT, LUT, sizemax, stride_zx, stride_zout, stride_hout, **meta
):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=-float('inf'))
x = x.to(tl.float32)
# computation
c = tl.max(x, axis=0)
out = tl.log(tl.sum(tl.exp(x - c), axis=0)) + c
# pointers to OUT
pout = OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK + rxm
tl.store(pout, out)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[5] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[5]) * meta['BLOCK']})
@triton.jit
def _backward(X, OUT, DX, DOUT, LUT, sizemax, stride_zx, stride_zout, stride_hout,
stride_zdx, stride_zdout, stride_hdout, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pdx = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pout = OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK + rxm
pdout = DOUT + pidz * stride_zdout + headid * stride_hdout + rowid * BLOCK + rxm
# Load
x = tl.load(px, mask=check, other=-float('inf'))
out = tl.load(pout)
dout = tl.load(pdout)
x = x.to(tl.float32)
out = out.to(tl.float32)
dout = dout.to(tl.float32)
# Computation
# [2021-09-14] TD: -(out - x) works but x - out segfaults, I think bc of a bug in broadcasting
dx = dout * tl.exp(-(out - x))
tl.store(pdx, dx, mask=check)
class _logsumexp(torch.autograd.Function):
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero(as_tuple=False)[:, 0]
rows = layout.nonzero(as_tuple=False)[:, 1]
columns = layout.nonzero(as_tuple=False)[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
n_head = layout.shape[0]
n_row = layout.shape[1] * block
return lut, int(sizes.max()), n_head, n_row
@staticmethod
def forward(ctx, x, spdims, block, lut, maxlut, n_head, n_row, bench, time):
out = torch.zeros((x.shape[0], n_head, n_row), dtype=x.dtype, device=x.device)
# run kernel
M = x.shape[0]
meta = {'BLOCK': block}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, out, lut, maxlut, x.stride(0), out.stride(0), out.stride(1),
force_nc_cache=True, **meta)
# save to context
ctx.save_for_backward(x, out, lut)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
return out
@staticmethod
def backward(ctx, dout):
# retrieve from context
x, out, lut = ctx.saved_tensors
dx = torch.zeros_like(x)
# run kernel
M = x.shape[0]
grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
_backward[grid](x, out, dx, dout, lut, ctx.maxlut, x.stride(0), out.stride(0),
out.stride(1), dx.stride(0), dout.stride(0), dout.stride(1),
force_nc_cache=True, BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None
class logsumexp:
apply_logsumexp = _logsumexp.apply
def make_lut(self, device):
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _logsumexp.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self, x):
time_y = [None]
lut, maxlut, n_head, n_row = self.make_lut(x.device)
x = logsumexp.apply_logsumexp(
x, self.spdims, self.block, lut, maxlut, n_head, n_row, self.bench, time_y
)
return x
| fly-master | src/models/attention/blocksparse_logsumexp.py |
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/feature_maps/fourier_features.py
import math
import torch
from einops import rearrange
from src.models.attention.projection_utils import gaussian_orthogonal_random_matrix
# from fast_transformers.feature_maps.base import FeatureMap
# For compatibility with our old code (using pytorch_fast_transformers 0.3.0), we copy the
# FeatureMap class here.
# Will be removed once we migrate to our new code
class FeatureMap(torch.nn.Module):
"""Define the FeatureMap interface."""
def __init__(self, query_dims):
super().__init__()
self.query_dims = query_dims
def new_feature_map(self, device):
"""Create a new instance of this feature map. In particular, if it is a
random feature map sample new parameters."""
raise NotImplementedError()
def forward_queries(self, x):
"""Encode the queries `x` using this feature map."""
return self(x)
def forward_keys(self, x):
"""Encode the keys `x` using this feature map."""
return self(x)
def forward(self, x):
"""Encode x using this feature map. For symmetric feature maps it
suffices to define this function, but for asymmetric feature maps one
needs to define the `forward_queries` and `forward_keys` functions."""
raise NotImplementedError()
@classmethod
def factory(cls, *args, **kwargs):
"""Return a function that when called with the query dimensions returns
an instance of this feature map.
It is inherited by the subclasses so it is available in all feature
maps.
"""
def inner(query_dims):
return cls(query_dims, *args, **kwargs)
return inner
def softmax_kernel(data, *, projection_matrix, is_query, softmax_temp=None, eps=0., cosh=True,
return_log=False):
"""For key, we expect shape (..., S, D) where S is the sequence dimension
return_log: return the log of the features (i.e. don't apply exp at the end).
"""
if return_log and eps != 0:
raise NotImplementedError('return_log is not compatible with nonzero eps')
d = data.shape[-1]
m = projection_matrix.shape[0] if not cosh else 2 * projection_matrix.shape[0]
if softmax_temp is None:
softmax_temp = 1 / math.sqrt(d)
data_normalizer = math.sqrt(softmax_temp)
projection_matrix = projection_matrix.type_as(data)
data_dash = torch.einsum('...id,jd->...ij', data, data_normalizer * projection_matrix)
diag_data = data.square().sum(dim=-1, keepdim=True) / 2 * (data_normalizer ** 2)
if cosh:
# We use the cosh feature map from the Performer paper, which effectively means
# concatenating data_dash and -data_dash
data_dash = torch.cat([data_dash, -data_dash], dim=-1)
if is_query:
log_scale = -diag_data + torch.amax(data_dash, dim=-1, keepdim=True) - math.log(m) / 2
# TD: The correct scaling is torch.exp(data_dash - diag_data)
data_dash_log = data_dash - torch.amax(data_dash, dim=-1, keepdim=True)
if not return_log:
data_dash = torch.exp(data_dash_log) + eps / math.sqrt(m)
else:
data_dash_m_diag = data_dash - diag_data - math.log(m) / 2
log_scale = torch.amax(data_dash_m_diag, dim=(-1, -2), keepdim=True)
data_dash_log = data_dash_m_diag - log_scale
if not return_log:
data_dash = torch.exp(data_dash_log) + eps / math.sqrt(m)
return (data_dash if not return_log else data_dash_log).type_as(data), log_scale
class SBPerformerFeatures(FeatureMap):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
query_dims: int, The input query dimensions in order to sample
the noise matrix
n_features: int, The size of the feature map (should be divisible by 2)
(default: query_dims)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dims))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
redraw: int, Redraw the random matrix every 'redraw' times
(default: 1)
deterministic_eval: bool, Only redraw the random matrix during training
(default: False)
"""
def __init__(self, query_dims, n_features=None, ortho_scaling=0, softmax_temp=None,
orthogonal=False, cosh=True, redraw=1, deterministic_eval=False, eps=0.0,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(query_dims)
self.n_features = n_features or int(query_dims * math.log(query_dims))
self.ortho_scaling = ortho_scaling
# TODO: we're not using @orthogonal atm
self.orthogonal = orthogonal
self.softmax_temp = 1 / math.sqrt(query_dims) if softmax_temp is None else softmax_temp
self.cosh = cosh
# self.redraw = redraw
# TODO: not redrawing atm, so I'm setting it to an irrational number
self.redraw = math.pi
self.deterministic_eval = deterministic_eval
self.eps = eps # Stabilizer for softmax kernel
# Make a buffer for storing the sampled projection_matrix
self.register_buffer("projection_matrix", torch.zeros(self.query_dims, self.n_features,
**factory_kwargs))
self.factory_kwargs = factory_kwargs
self._calls = -1
def new_feature_map(self, device):
# If we are not training skip the generation of a new feature map
if self.deterministic_eval and not self.training:
return
# Only redraw the new feature map every self.redraw times
self._calls += 1
if (self._calls % self.redraw) != 0:
return
# We use the cosh feature map so the number of rows is halved
nb_rows = self.n_features if not self.cosh else self.n_features // 2
projection_matrix = gaussian_orthogonal_random_matrix(nrows=nb_rows,
ncols=self.query_dims,
scaling=self.ortho_scaling,
device=device,
dtype=self.factory_kwargs['dtype'])
self.register_buffer("projection_matrix", projection_matrix)
def forward_queries(self, x, return_log=False):
return softmax_kernel(x, projection_matrix=self.projection_matrix, is_query=True,
softmax_temp=self.softmax_temp, eps=self.eps, cosh=self.cosh,
return_log=return_log)
def forward_keys(self, x, return_log=False):
return softmax_kernel(x, projection_matrix=self.projection_matrix, is_query=False,
softmax_temp=self.softmax_temp, eps=self.eps, cosh=self.cosh,
return_log=return_log)
| fly-master | src/models/attention/feature_maps_sb.py |
# Adapted from https://github.com/lucidrains/performer-pytorch/blob/main/performer_pytorch/performer_pytorch.py
import math
import torch
from torch import nn
from einops import rearrange
from functools import partial
from src.models.attention.projection_utils import gaussian_orthogonal_random_matrix
from src.models.attention.performer_utils import (
softmax_kernel, generalized_kernel,
causal_linear_attention, causal_linear_attention_noncuda, linear_attention
)
# helpers
def default(val, d):
return val if val is not None else d
class PerformerAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
"""
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0, generalized_attention=False,
kernel_fn=nn.ReLU(), no_projection=False, softmax_temp=None, softmax_eps=1e-4,
normalization_eps=1e-6, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix,
nrows=self.nb_features, ncols=dim_heads,
scaling=ortho_scaling, **factory_kwargs)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.softmax_temp = softmax_temp
self.softmax_eps = softmax_eps # Stabilizer for softmax kernel
self.normalization_eps = normalization_eps # Stabilizer for normalization step
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many query each sequence in the batch consists of
"""
query = rearrange(query, 'b t h e -> b h t e')
key = rearrange(key, 'b s h e -> b h s e')
value = rearrange(value, 'b s h d -> b h s d')
if self.no_projection:
query = query.softmax(dim=-1)
key = torch.exp(key) if self.causal else key.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.kernel_fn,
projection_matrix=self.projection_matrix)
query, key = map(create_kernel, (query, key))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.projection_matrix,
softmax_temp=self.softmax_temp, eps=self.softmax_eps)
query = create_kernel(query, is_query=True)
key = create_kernel(key, is_query=False)
if key_padding_mask is not None and not key_padding_mask.all_ones:
# performer-pytorch chooses to zero out the value instead of the key
# https://github.com/lucidrains/performer-pytorch/blob/457dade217c900b6c972c77731c7bbbf55cf5b8a/performer_pytorch/performer_pytorch.py#L393
value = value.masked_fill(rearrange(~key_padding_mask.bool_matrix, 'b s -> b 1 s 1'),
0.0)
# Apply the key padding mask and make sure that the attn_mask is
# all_ones or is causal
causal = attn_mask is not None and attn_mask.lower_triangular
if not (attn_mask is None or attn_mask.all_ones or causal):
raise RuntimeError(("PerformerAttention does not support arbitrary attention masks"))
if causal:
assert query.shape[1] == key.shape[1], 'query and key must have the same sequence length'
try:
import fast_transformers.causal_product.causal_product_cuda
attn_fn = causal_linear_attention
except ImportError:
print('unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
attn_fn = causal_linear_attention_noncuda
else:
attn_fn = linear_attention
out, attn = attn_fn(query, key, value, eps=self.normalization_eps,
need_weights=need_weights)
out = rearrange(out, 'b h s d -> b s h d')
return out, attn
| fly-master | src/models/attention/performer_attention.py |
import torch
import torch.nn as nn
import hydra
from einops import rearrange
class CombinationAttention(nn.Module):
"""
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, d_head, n_heads, attn_cfg_0, attn_cfg_1, gating=True,
softmax_temp=None, device=None, dtype=None):
"""
gating: whether to use sigmoid gating, otherwise we simply average the two attentions.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_head = d_head
self.n_heads = n_heads
self.gating = gating
self.attn_0 = hydra.utils.instantiate(attn_cfg_0, softmax_temp=softmax_temp, **factory_kwargs)
self.attn_1 = hydra.utils.instantiate(attn_cfg_1, softmax_temp=softmax_temp, **factory_kwargs)
if gating:
self.gate = nn.Conv1d(n_heads, n_heads, kernel_size=d_head, groups=n_heads)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of
"""
out_0, attn_0 = self.attn_0(query, key, value, attn_mask=attn_mask,
key_padding_mask=key_padding_mask, need_weights=need_weights)
out_1, attn_1 = self.attn_1(query, key, value, attn_mask=attn_mask,
key_padding_mask=key_padding_mask, need_weights=need_weights)
if self.gating:
g = torch.sigmoid(rearrange(self.gate(rearrange(query, 'b t h e -> (b t) h e')),
'(b t) h 1 -> b t h 1', t=query.shape[1]))
else:
g = 0.5
out = torch.lerp(out_0, out_1, g)
if attn_0 is None or attn_1 is None:
attn = None
else:
attn = torch.lerp(attn_0, attn_1, rearrange(g, 'b t h 1 -> b h t 1'))
return out, attn
| fly-master | src/models/attention/combination_attention.py |
# Adapted from https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import wraps
from einops import rearrange, repeat
from src.utils.padding import pad_to_multiple
from src.ops.permutation import invert_permutation
from src.models.attention.hash_utils import sort_key_val
from src.models.attention.batching_utils import batched_index_select
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.mask_utils import pad_mask
#constants
TOKEN_SELF_ATTN_VALUE = -5e4 # carefully set for half precision to work
# helper fns
def chunked_sum(tensor, chunks=1):
*orig_size, last_dim = tensor.shape
tensor = tensor.reshape(-1, last_dim)
summed_tensors = [c.sum(dim=-1) for c in tensor.chunk(chunks, dim=0)]
return torch.cat(summed_tensors, dim=0).reshape(orig_size)
def default(val, default_val):
return default_val if val is None else val
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cache_method_decorator(cache_attr, cache_namespace, reexecute=False):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, key_namespace=None, fetch=False, set_cache=True, **kwargs):
namespace_str = str(default(key_namespace, ''))
_cache = getattr(self, cache_attr)
_keyname = f'{cache_namespace}:{namespace_str}'
if fetch:
val = _cache[_keyname]
if reexecute:
fn(self, *args, **kwargs)
else:
val = fn(self, *args, **kwargs)
if set_cache:
setattr(self, cache_attr, {**_cache, **{_keyname: val}})
return val
return wrapper
return inner_fn
# LSH attention as described in https://openreview.net/pdf?id=rkgNKkHtvB
# adapted from trax, stripped to what paper said needed to work
# namely that buckets need to be at least 64 with 8 rounds of hashing
# https://github.com/google/trax/blob/master/trax/layers/research/efficient_attention.py#L442
class ReformerAttention(nn.Module):
def __init__(self,
softmax_temp=None,
attention_dropout=0.,
bucket_size=64,
n_hashes=8,
causal=False,
allow_duplicate_attention=True,
attend_across_buckets=True,
rehash_each_round=True,
drop_for_hash_rate=0.0,
random_rotations_per_head=False,
device=None, dtype=None):
super().__init__()
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.dropout_for_hash = nn.Dropout(drop_for_hash_rate)
assert rehash_each_round or allow_duplicate_attention, (
'The setting {allow_duplicate_attention=False, rehash_each_round=False}'
' is not implemented.')
self.causal = causal
self.bucket_size = bucket_size
self.n_hashes = n_hashes
self._allow_duplicate_attention = allow_duplicate_attention
self._attend_across_buckets = attend_across_buckets
self._rehash_each_round = rehash_each_round
self._random_rotations_per_head = random_rotations_per_head
# cache buckets for reversible network, reported by authors to make Reformer work at depth
self._cache = {}
@cache_method_decorator('_cache', 'buckets', reexecute=True)
def hash_vectors(self, n_buckets, vecs):
batch_size = vecs.shape[0]
device = vecs.device
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
assert n_buckets % 2 == 0
rot_size = n_buckets
rotations_shape = (
batch_size if self._random_rotations_per_head else 1,
vecs.shape[-1],
self.n_hashes if self._rehash_each_round else 1,
rot_size // 2)
random_rotations = torch.randn(rotations_shape, dtype=vecs.dtype, device=device).expand(batch_size, -1, -1, -1)
dropped_vecs = self.dropout_for_hash(vecs)
rotated_vecs = torch.einsum('btf,bfhi->bhti', dropped_vecs, random_rotations)
if self._rehash_each_round:
# rotated_vectors size [batch,n_hash,seq_len,buckets]
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
buckets = torch.argmax(rotated_vecs, dim=-1)
else:
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
# In this configuration, we map each item to the top self.n_hashes buckets
rotated_vecs = torch.squeeze(rotated_vecs, 1)
bucket_range = torch.arange(rotated_vecs.shape[-1], device=device)
bucket_range = torch.reshape(bucket_range, (1, -1))
bucket_range = bucket_range.expand_as(rotated_vecs)
_, buckets = sort_key_val(rotated_vecs, bucket_range, dim=-1)
# buckets size [batch size, seq_len, buckets]
buckets = buckets[... , -self.n_hashes:].transpose(1, 2)
# buckets is now (batch_size, self.n_hashes, seq_len).
return buckets
def forward(self, qk, k, v, attn_mask=None, key_padding_mask=None, need_weights=False):
# Ignoring k, assuming that q = k = qk
_, seqlen_og, n_head, _ = qk.shape
qk = pad_to_multiple(qk, self.bucket_size * 2, dims=1)
v = pad_to_multiple(v, self.bucket_size * 2, dims=1)
# Extract some shapes and compute the temperature
B, T, H, E = qk.shape
_, S, _, D = v.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
# pad the masks
if S > seqlen_og:
if key_padding_mask is None:
key_padding_mask = LengthMask(qk.new_full((qk.shape[0],), seqlen_og,
dtype=torch.long), max_len=S)
else:
key_padding_mask = pad_mask(key_padding_mask, pad_length=S - seqlen_og,
left=False, value=False)
if key_padding_mask is not None and not key_padding_mask.all_ones:
# Repeat for all heads and all hash functions
key_padding_mask_bool = repeat(key_padding_mask.bool_matrix, 'b s -> (b head) s',
head=H)
else:
key_padding_mask_bool = None
if attn_mask is not None and (S > seqlen_og or T > seqlen_og):
attn_mask = FullMask(F.pad(attn_mask._mask, (0, S - seqlen_og, 0, T - seqlen_og),
value=False))
if attn_mask is not None and not attn_mask.all_ones:
attn_mask_bool = attn_mask.bool_matrix # (T, S)
else:
attn_mask_bool = None
qk = rearrange(qk, 'b t h e -> (b h) t e')
v = rearrange(v, 'b s h d -> (b h) s d')
batch_size, seqlen, dim, device = *qk.shape, qk.device
assert seqlen % (self.bucket_size * 2) == 0, f'Sequence length ({seqlen}) needs to be divisible by target bucket size x 2 - {self.bucket_size * 2}'
n_buckets = seqlen // self.bucket_size
buckets = self.hash_vectors(n_buckets, qk, set_cache=self.training)
assert buckets.shape[1] == self.n_hashes
assert buckets.shape[2] == seqlen
total_hashes = self.n_hashes
buckets = rearrange(buckets, 'b nhashes seqlen -> nhashes b seqlen')
s_buckets, perm = torch.sort(buckets, dim=-1, stable=True)
perm_inv = invert_permutation(perm)
# Allow each chunk to attend within itself, and also one chunk back. Chunk
# boundaries might occur in the middle of a sequence of items from the
# same bucket, so this increases the chances of attending to relevant items.
def look_one_back(x):
x_extra = torch.cat([x[:, -1:, ...], x[:, :-1, ...]], dim=1)
return torch.cat([x, x_extra], dim=2)
# We differ here from https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py
# the look_one_back only looks back at the bucket from the same hash function, while
# lucidrains's implementation could look back at the bucket from the previous hash function.
perm_oneback = look_one_back(rearrange(perm, 'h b (nbuckets bucketsz) '
'-> (h b) nbuckets bucketsz',
nbuckets=n_buckets))
perm_oneback = rearrange(perm_oneback, '(h b) nbuckets2 bucketsz -> h b (nbuckets2 bucketsz)',
h=self.n_hashes)
# sort queries, keys, values
def sort_to_buckets(x, perm, bucketsz, unsqueeze=True):
if unsqueeze:
x = rearrange(x, 'b s d -> 1 b s d')
return rearrange(batched_index_select(x, perm),
'h b (nbuckets bucketsz) d -> h b nbuckets bucketsz d',
bucketsz=bucketsz)
qk_norm = F.normalize(qk, p=2, dim=-1).type_as(qk)
tq = sort_to_buckets(qk, perm, self.bucket_size)
tk = sort_to_buckets(qk_norm, perm_oneback, self.bucket_size * 2)
tv = sort_to_buckets(v, perm_oneback, self.bucket_size * 2)
# Dot-product attention.
inner = torch.einsum('zbhie,zbhje->zbhij', tq, tk) * softmax_temp
masked_value = max_neg_value(inner)
bq_idx = rearrange(perm, 'h b (nbuckets bucketsz) -> h b nbuckets bucketsz 1',
bucketsz=self.bucket_size)
bkv_idx = rearrange(perm_oneback, 'h b (nbuckets bucketsz2) -> h b nbuckets 1 bucketsz2',
bucketsz2=self.bucket_size * 2)
# Mask for post qk attention logits of the input sequence
if attn_mask_bool is not None:
dot_attn_indices = bq_idx * seqlen + bkv_idx
mask = attn_mask_bool.flatten()[dot_attn_indices]
inner.masked_fill_(~mask, masked_value)
del mask
# mask out attention to padded tokens
if key_padding_mask is not None and not key_padding_mask.all_ones:
s_key_padding_mask = sort_to_buckets(rearrange(key_padding_mask_bool,
'b s -> b s 1'),
perm_oneback, self.bucket_size * 2)
s_key_padding_mask = rearrange(s_key_padding_mask,
'... bucketsz 1 -> ... 1 bucketsz')
inner.masked_fill_(~s_key_padding_mask, masked_value)
# Causal masking
if self.causal:
mask = bq_idx < bkv_idx
inner.masked_fill_(mask, masked_value)
del mask
# Mask out attention to self except when no other targets are available.
self_mask = bq_idx == bkv_idx
inner.masked_fill_(self_mask, TOKEN_SELF_ATTN_VALUE)
del self_mask
# Mask out attention to other hash buckets.
if not self._attend_across_buckets:
bq_buckets = sort_to_buckets(rearrange(buckets, 'h b s -> h b s 1'), perm,
self.bucket_size, unsqueeze=False)
bkv_buckets = sort_to_buckets(rearrange(buckets, 'h b s -> h b s 1'), perm_oneback,
self.bucket_size * 2, unsqueeze=False)
bkv_buckets = rearrange(bkv_buckets, 'h b nbuckets bucketsz2 1 -> h b nbuckets 1 bucketsz2')
bucket_mask = bq_buckets != bkv_buckets
inner.masked_fill_(bucket_mask, masked_value)
del bucket_mask
# Don't double-count query-key pairs across multiple rounds of hashing.
# There are two possible strategies here. (1) The default is to count how
# many times a query-key pair is repeated, and to lower its log-prob
# correspondingly at each repetition. (2) When hard_k is set, the code
# instead masks all but the first occurence of each query-key pair.
if not self._allow_duplicate_attention:
locs1 = rearrange(perm_inv // self.bucket_size, 'h b seqlen -> b seqlen h')
locs2 = (locs1 + 1) % n_buckets
if not self._attend_across_buckets:
locs1 = buckets * n_buckets + locs1
locs2 = buckets * n_buckets + locs2
locs = torch.cat([locs1, locs2], dim=-1)
slocs = sort_to_buckets(locs, perm, self.bucket_size) # (h b nbuckets bucketsz h*2)
bq_locs = repeat(slocs[..., :total_hashes],
'h b nbuckets bucketsz nh -> h b nbuckets bucketsz 1 (2 nh)')
bkv_locs = look_one_back(rearrange(slocs, 'h b nbuckets bucketsz nh2'
'-> (h b) nbuckets bucketsz nh2'))
bkv_locs = rearrange(bkv_locs,
'(h b) nbuckets bucketsz2 nh2 -> h b nbuckets 1 bucketsz2 nh2',
h=self.n_hashes)
dup_counts = bq_locs == bkv_locs
# for memory considerations, chunk summation of last dimension for counting duplicates
dup_counts = chunked_sum(dup_counts, chunks=(total_hashes * batch_size))
dup_counts = dup_counts.detach()
assert dup_counts.shape == inner.shape
inner = inner - torch.log(dup_counts + 1e-9)
del dup_counts
# Softmax.
dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
dots = torch.exp(inner - dots_logsumexp).type_as(inner)
dropped_dots = self.dropout(dots)
so = torch.einsum('...ij,...jd->...id', dropped_dots, tv)
# undo sort
def unsort_from_buckets(s_x, perm_inverse):
b_x = rearrange(s_x, 'h b nbuckets bucketsz d -> h b (nbuckets bucketsz) d')
return batched_index_select(b_x, perm_inverse)
o = unsort_from_buckets(so, perm_inv)
logits = unsort_from_buckets(dots_logsumexp, perm_inv)
probs = torch.exp(logits - torch.logsumexp(logits, dim=0, keepdim=True))
out = torch.sum(o * probs, dim=0)
out = rearrange(out, '(b h) t d -> b t h d', h=H)
out = out[:, :seqlen_og]
attn = None
if need_weights:
dot_attn_indices = rearrange(bq_idx * seqlen + bkv_idx,
'h b nbuckets qbucketsz kbucketsz -> h b (nbuckets qbucketsz kbucketsz)')
unsorted_dots = torch.zeros(self.n_hashes, batch_size, seqlen * seqlen, device=device)
unsorted_dots.scatter_(-1, dot_attn_indices, dots.view_as(dot_attn_indices))
del dot_attn_indices
unsorted_dots = rearrange(unsorted_dots,
'h b (q_seqlen k_seqlen) -> h b q_seqlen k_seqlen',
q_seqlen = seqlen)
attn = torch.sum(unsorted_dots * probs, dim=0)
attn = rearrange(attn, '(b h) t s -> b h t s', h=n_head)[:, :, :seqlen_og, :seqlen_og]
return out, attn
| fly-master | src/models/attention/reformer_attention.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed as dist
from einops import rearrange
import hydra
from triton.ops.blocksparse import softmax
from deepspeed.ops.sparse_attention import FixedSparsityConfig
from fast_transformers.local_product import local_dot_product, local_weighted_average
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.feature_maps_sb import SBPerformerFeatures
from src.models.attention.scatterbrain_utils import linear_attention_normalization
from src.models.attention.scatterbrain_utils import causal_linear_attention, linear_attention
from src.utils.padding import pad_to_multiple
from src.models.attention.blocksparse_utils import densify_tensor
from src.models.attention.blocksparse_utils import sparsify_broadcast_tensor
from src.models.attention.mask_utils import pad_mask
from src.models.attention.blocksparse_matmul import matmul
from src.models.attention.blocksparse_logsumexp import logsumexp
from src.models.attention.blocksparse_sum import blocksparse_sum
class SBBlockSparseAttention(nn.Module):
"""Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
Arguments
---------
sparsity_config: optional: this parameter determins sparsity pattern configuration; it is based on SparsityConfig class.
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
"""
def __init__(self, sparsity_config, dim_heads, nb_features=None, ortho_scaling=0,
causal=False, softmax_temp=None, attention_dropout=0.0, softmax_eps=0.0,
max_seq_length=2048):
super().__init__()
self.sparsity_config = hydra.utils.instantiate(sparsity_config)
# TODO: nb_features should be a multiple of block
self.feature_map = SBPerformerFeatures(dim_heads, nb_features, ortho_scaling=ortho_scaling,
softmax_temp=softmax_temp, eps=softmax_eps)
self.causal = causal
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.softmax_eps = softmax_eps
# initialize sparse layout and register as buffer
master_layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("master_layout", master_layout)
self._need_layout_synchronization = True
self.ops_cache = dict()
def get_layout(self, T, S=None):
if S is None:
S = T
# if layout is never synchronized across GPUs, broadcast the layout from global rank 0
if self._need_layout_synchronization and dist.is_initialized():
dist.broadcast(self.master_layout, src=0)
self._need_layout_synchronization = False
block = self.sparsity_config.block
if (T % block != 0) or (S % block != 0):
raise ValueError(
f'Sequence lengths, {T} and {S}, needs to be divisible by block size {block}!'
)
num_blocksT, num_blocksS = T // self.sparsity_config.block, S // self.sparsity_config.block
return self.master_layout[..., :num_blocksT, :num_blocksS].cpu() # layout needs to be a CPU tensor
# add to cache
def get_ops(self, T, S=None):
if S is None:
S = T
if (T, S) not in self.ops_cache:
sparsity_layout = self.get_layout(T, S)
sparse_dot_sdd_nt = matmul(sparsity_layout,
self.sparsity_config.block,
'sdd',
trans_a=False,
trans_b=True)
sparse_dot_dsd_nn = matmul(sparsity_layout,
self.sparsity_config.block,
'dsd',
trans_a=False,
trans_b=False)
sparse_logsumexp = logsumexp(sparsity_layout, self.sparsity_config.block)
sparse_sum = blocksparse_sum(sparsity_layout, self.sparsity_config.block)
sparse_softmax = softmax(sparsity_layout, self.sparsity_config.block)
self.ops_cache[T, S] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax,
sparse_logsumexp, sparse_sum)
return self.ops_cache[T, S]
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False,
return_attn_unnormalized=False):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of
"""
block = self.sparsity_config.block
# Pad to multiples of @block
_, q_seqlen_og, n_head, q_dim_og = query.shape
_, k_seqlen_og, _, _ = key.shape
_, _, _, v_dim_og = value.shape
query = pad_to_multiple(query, block, dims=(1, 3))
key = pad_to_multiple(key, block, dims=(1, 3))
value = pad_to_multiple(value, block, dims=(1, 3))
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
# pad the masks
if S > k_seqlen_og:
if key_padding_mask is None:
key_padding_mask = LengthMask(key.new_full((key.shape[0],), k_seqlen_og,
dtype=torch.long), max_len=S)
else:
key_padding_mask = pad_mask(key_padding_mask, pad_length=S - k_seqlen_og,
left=False, value=False)
# TODO: this doesn't work for causal right now
if attn_mask is not None and (S > k_seqlen_og or T > q_seqlen_og):
attn_mask = FullMask(F.pad(attn_mask._mask, (0, S - k_seqlen_og, 0, T - q_seqlen_og),
value=False))
# TODO: check causal
if attn_mask is None:
attn_mask_additive_matrix = torch.zeros(T, S, device=query.device)
else:
attn_mask_additive_matrix = attn_mask.additive_matrix_finite
if key_padding_mask is None:
key_padding_mask_lengths = torch.full(size=(B,), fill_value=S, dtype=torch.long,
device=key.device)
else:
key_padding_mask_lengths = key_padding_mask.lengths
# Permute the dimensions to BHTE instead of BTHE
query = rearrange(query, 'b t h e -> b h t e').contiguous()
key = rearrange(key, 'b s h e -> b h s e').contiguous()
value = rearrange(value, 'b s h d -> b h s d').contiguous()
self.feature_map.new_feature_map(query.device)
q_prime, q_prime_log_scale = self.feature_map.forward_queries(query)
k_prime, k_prime_log_scale = self.feature_map.forward_keys(key)
prime_log_scale = q_prime_log_scale + k_prime_log_scale
m = q_prime.shape[-1]
if key_padding_mask is not None:
k_prime = k_prime.masked_fill(~rearrange(key_padding_mask.bool_matrix,
'b s -> b 1 s 1'), 0.0)
attn_fn = linear_attention if not self.causal else causal_linear_attention
q_prime_k_prime_1 = linear_attention_normalization(q_prime, k_prime, causal=self.causal)
q_prime_k_prime_v, attn_prime = attn_fn(q_prime, k_prime, value, need_weights=need_weights)
# cache look-up table computations etc
layout = self.get_layout(T, S)
(sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax, sparse_logsumexp,
sparse_sum) = self.get_ops(T, S)
QK = softmax_temp * sparse_dot_sdd_nt(query, key)
dots_prime = sparse_dot_sdd_nt(q_prime, k_prime)
assert attn_mask is None and key_padding_mask is None
# TODO: sparse masked_fill_
# Compute the normalization first
QK_lse = rearrange(sparse_logsumexp(QK), 'b h t -> b h t 1')
dots_prime_sum = rearrange(sparse_sum(dots_prime), 'b h t -> b h t 1')
lr_log_normalization = torch.log((rearrange(q_prime_k_prime_1, 'b h s -> b h s 1')
- dots_prime_sum).clamp_min_(1e-24)) + prime_log_scale
log_normalization = torch.logaddexp(QK_lse, lr_log_normalization)
prime_scale = torch.exp(prime_log_scale - log_normalization)
# When we drop out, we want that location in the attn matrix to be zero.
# So here we dropout just torch.exp(QK) and leave -dots_prime, so that when we add it back
# to attn_prime it's equivalent to setting that location to zero.
log_normalization_sparse = sparsify_broadcast_tensor(log_normalization.squeeze(-1), layout,
block)
prime_scale_sparse = sparsify_broadcast_tensor(prime_scale.squeeze(-1), layout, block)
dots = self.dropout(torch.exp(QK - log_normalization_sparse)) - dots_prime * prime_scale_sparse
# outputs
out_blocksparse = sparse_dot_dsd_nn(dots, value)
out = out_blocksparse + q_prime_k_prime_v * prime_scale
attn = None
if need_weights:
attn_blocksparse = densify_tensor(dots, layout)
attn = attn_blocksparse + attn_prime * prime_scale
if return_attn_unnormalized: # For testing purpose
attn = (attn, attn * torch.exp(log_normalization),
attn_prime * torch.exp(prime_log_scale))
return rearrange(out, 'b h t d -> b t h d'), attn
| fly-master | src/models/attention/sbblocksparse_attention.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed as dist
from einops import rearrange
import hydra
from deepspeed.ops.sparse_attention import SparsityConfig
# from triton.ops.blocksparse import matmul, softmax
from triton.ops.blocksparse import softmax
from src.models.modules.masking import FullMask, LengthMask
from src.utils.padding import pad_to_multiple
from src.models.attention.blocksparse_utils import sparsify_tensor, densify_tensor
from src.models.attention.mask_utils import pad_mask
from src.models.attention.blocksparse_matmul import matmul
class BlockSparseAttention(nn.Module):
"""Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
Arguments
---------
sparsity_config: optional: this parameter determins sparsity pattern configuration; it is based on SparsityConfig class.
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
"""
def __init__(self, sparsity_config, softmax_temp=None, attention_dropout=0.1, max_seq_length=2048):
super().__init__()
self.sparsity_config = hydra.utils.instantiate(sparsity_config)
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
# initialize sparse layout and register as buffer
master_layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("master_layout", master_layout)
self._need_layout_synchronization = True
self.ops_cache = dict()
def get_layout(self, T, S=None):
if S is None:
S = T
# if layout is never synchronized across GPUs, broadcast the layout from global rank 0
if self._need_layout_synchronization and dist.is_initialized():
dist.broadcast(self.master_layout, src=0)
self._need_layout_synchronization = False
block = self.sparsity_config.block
if (T % block != 0) or (S % block != 0):
raise ValueError(
f'Sequence lengths, {T} and {S}, needs to be divisible by block size {block}!'
)
num_blocksT, num_blocksS = T // self.sparsity_config.block, S // self.sparsity_config.block
return self.master_layout[..., :num_blocksT, :num_blocksS].cpu() # layout needs to be a CPU tensor
# add to cache
def get_ops(self, T, S=None):
if S is None:
S = T
if (T, S) not in self.ops_cache:
sparsity_layout = self.get_layout(T, S)
sparse_dot_sdd_nt = matmul(sparsity_layout,
self.sparsity_config.block,
'sdd',
trans_a=False,
trans_b=True)
sparse_dot_dsd_nn = matmul(sparsity_layout,
self.sparsity_config.block,
'dsd',
trans_a=False,
trans_b=False)
sparse_softmax = softmax(sparsity_layout, self.sparsity_config.block)
self.ops_cache[T, S] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax)
return self.ops_cache[T, S]
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to (seqlen, seqlen)
key_padding_mask: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of. (bs, seqlen)
"""
# if attn_mask is None:
# attn_mask = FullMask(N=query.size(1), M=key.size(1), device=query.device)
block = self.sparsity_config.block
# Pad to multiples of @block
_, q_seqlen_og, n_head, q_dim_og = query.shape
_, k_seqlen_og, _, _ = key.shape
_, _, _, v_dim_og = value.shape
query = pad_to_multiple(query, block, dims=(1, 3))
key = pad_to_multiple(key, block, dims=(1, 3))
value = pad_to_multiple(value, block, dims=(1, 3))
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(q_dim_og)
# pad the masks
if S > k_seqlen_og:
if key_padding_mask is None:
key_padding_mask = LengthMask(key.new_full((key.shape[0],), k_seqlen_og,
dtype=torch.long), max_len=S)
else:
key_padding_mask = pad_mask(key_padding_mask, pad_length=S - k_seqlen_og,
left=False, value=False)
# TODO: this doesn't work for causal right now
if attn_mask is not None and (S > k_seqlen_og or T > q_seqlen_og):
attn_mask = FullMask(F.pad(attn_mask._mask, (0, S - k_seqlen_og, 0, T - q_seqlen_og),
value=False))
# Permute the dimensions to BHTE instead of BTHE
query = rearrange(query, 'b t h e -> b h t e').contiguous()
key = rearrange(key, 'b s h e -> b h s e').contiguous()
value = rearrange(value, 'b s h d -> b h s d').contiguous()
# cache look-up table computations etc
layout = self.get_layout(T, S)
sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(T, S)
# [2021-09-05] TD: There's a bug in triton that gives wrong result when block size is 16
# and dim is an odd multiple of 16. https://github.com/openai/triton/issues/266
# [2021-09-06] I've fixed it in my version of blocksparse matmul.
# if block == 16 and (E // 16) % 2 == 1:
# # print('Using dense matmul instead of block sparse matmul due to bug in triton')
# sparse_dot_sdd_nt = lambda q, k: sparsify_tensor(q @ k.transpose(-1, -2), layout)
# attention scores
QK = sparse_dot_sdd_nt(query, key)
attn_blocksparse = sparse_softmax( # This is done in-place
QK,
scale=softmax_temp,
key_padding_mask=None if key_padding_mask is None else key_padding_mask.additive_matrix,
attn_mask=None if attn_mask is None else attn_mask.additive_matrix,
key_padding_mask_mode='add',
attn_mask_mode='add')
# If there are no valid keys for a query (because of sparse mask, key_padding_mask, and
# attention mask), then that row of attn_local will be NaN.
# We want that row to actually be zero.
if ((key_padding_mask is not None and not key_padding_mask.all_ones)
or (attn_mask is not None and not attn_mask.all_ones)):
attn_blocksparse.nan_to_num_()
A = self.dropout(attn_blocksparse)
# outputs
output = sparse_dot_dsd_nn(A, value)
attn = None
if need_weights:
attn = densify_tensor(attn_blocksparse, layout)[:, :, :q_seqlen_og, :k_seqlen_og]
return rearrange(output, 'b h t d -> b t h d')[:, :q_seqlen_og, :, :v_dim_og], attn
| fly-master | src/models/attention/blocksparse_attention.py |
import torch
from einops import rearrange
def batched_index_select(values: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
"""
Params:
values: (1 or n_hashes, batch, seqlen, dim)
indices: (n_hashes, batch, seqlen)
Return:
(n_hashes, batch, seqlen, dim)
"""
last_dim = values.shape[-1]
indices_expanded = rearrange(indices, '... -> ... 1').expand(*indices.shape, last_dim)
return values.expand(*indices_expanded.shape[:-2],
*values.shape[-2:]).gather(-2, indices_expanded)
| fly-master | src/models/attention/batching_utils.py |
# Adapted from https://github.com/giannisdaras/smyrf/blob/master/smyrf/torch/attn.py
import math
import torch
import torch.nn as nn
from einops import rearrange, repeat
from src.utils.padding import pad_to_multiple
from src.ops.permutation import invert_permutation
from src.models.attention.hash_utils import XBOXPLUS, lsh_clustering
from src.models.attention.batching_utils import batched_index_select
from src.models.attention.reformer_attention import max_neg_value, chunked_sum
from src.models.modules.masking import LengthMask
from src.models.attention.mask_utils import pad_mask
from src.models.attention.feature_maps_sb import SBPerformerFeatures
from src.models.attention.scatterbrain_utils import linear_attention_normalization
from src.models.attention.scatterbrain_utils import causal_linear_attention, linear_attention
class SBSmyrfAttention(nn.Module):
def __init__(self, n_hashes, q_cluster_size, k_cluster_size, d_head,
r=1, # LSH clustering
nb_features=None, ortho_scaling=0, softmax_eps=0.0, # Performer
causal=False, softmax_temp=None, attention_dropout=0., device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.feature_map = SBPerformerFeatures(d_head, nb_features, ortho_scaling=ortho_scaling,
softmax_temp=softmax_temp, eps=softmax_eps,
**factory_kwargs)
self.n_hashes = n_hashes
self.q_cluster_size = q_cluster_size
self.k_cluster_size = k_cluster_size
self.causal = causal
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.hash_fn = XBOXPLUS()
self.clustering_params = {'r': r, 'n_hashes': self.n_hashes}
def hash_vectors(self, query, key, key_padding_mask=None):
# XBOX+ transform
self.hash_fn.set_norms(query, key)
query_t = self.hash_fn.Q(query)
key_t = self.hash_fn.K(key)
num_clusters = query_t.shape[-2] // self.q_cluster_size
assert num_clusters == (key_t.shape[-2] // self.k_cluster_size), 'Unequal number of clusters for query and key.'
q_positions, k_positions = lsh_clustering(query_t, key_t, **self.clustering_params,
key_padding_mask=key_padding_mask)
return q_positions, k_positions
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False,
return_attn_unnormalized=False):
if attn_mask is not None:
raise NotImplementedError('Support for attn_mask is not implemented')
_, q_seqlen_og, _, _ = query.shape
_, k_seqlen_og, _, _ = key.shape
query = pad_to_multiple(query, self.q_cluster_size, dims=1)
key = pad_to_multiple(key, self.k_cluster_size, dims=1)
value = pad_to_multiple(value, self.k_cluster_size, dims=1)
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
# pad the masks
if S > k_seqlen_og:
if key_padding_mask is None:
key_padding_mask = LengthMask(key.new_full((key.shape[0],), k_seqlen_og,
dtype=torch.long), max_len=S)
else:
key_padding_mask = pad_mask(key_padding_mask, pad_length=S - k_seqlen_og,
left=False, value=False)
query = rearrange(query, 'b t h e -> (b h) t e')
key = rearrange(key, 'b t h e -> (b h) t e')
value = rearrange(value, 'b s h d -> (b h) s d')
bs = query.shape[0]
if key_padding_mask is not None and not key_padding_mask.all_ones:
# Repeat for all heads
key_padding_mask_bool = repeat(key_padding_mask.bool_matrix, 'b s -> (b h) s', h=H)
else:
key_padding_mask_bool = None
with torch.no_grad():
q_positions, k_positions = self.hash_vectors(query, key,
rearrange(key_padding_mask_bool,
'b s -> 1 b s')
if key_padding_mask_bool is not None else None)
self.feature_map.new_feature_map(query.device)
q_prime, q_prime_log_scale = self.feature_map.forward_queries(query)
k_prime, k_prime_log_scale = self.feature_map.forward_keys(key)
prime_log_scale = q_prime_log_scale + k_prime_log_scale
m = q_prime.shape[-1]
if key_padding_mask_bool is not None:
k_prime.masked_fill_(~rearrange(key_padding_mask_bool, 'b s -> b s 1'), 0.0)
if self.causal:
try:
import fast_transformers.causal_product.causal_product_cuda
causal_linear_fn = causal_linear_attention
except ImportError:
print('unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
# self.causal_linear_fn = causal_linear_attention_noncuda
attn_fn = linear_attention if not self.causal else causal_linear_attention
q_prime_k_prime_1 = linear_attention_normalization(q_prime, k_prime, causal=self.causal)
q_prime_k_prime_v, attn_prime = attn_fn(q_prime, k_prime, value, need_weights=need_weights)
# sort queries, keys, values
def sort_to_buckets(x, perm, bucketsz):
return rearrange(batched_index_select(rearrange(x, 'b s d -> 1 b s d'), perm),
'h b (nbuckets bucketsz) d -> h b nbuckets bucketsz d',
bucketsz=bucketsz)
s_query = sort_to_buckets(query, q_positions, self.q_cluster_size)
s_key = sort_to_buckets(key, k_positions, self.k_cluster_size)
s_value = sort_to_buckets(value, k_positions, self.k_cluster_size)
sq_prime = sort_to_buckets(q_prime, q_positions, self.q_cluster_size)
sk_prime = sort_to_buckets(k_prime, k_positions, self.k_cluster_size)
# sq_prime, sq_prime_log_scale = kernel_fn(s_queries, is_query=True)
# sk_prime, sk_prime_log_scale = kernel_fn(s_keys, is_query=False)
# k_prime_log_scale doesn't depend on the index of the token
sprime_log_scale = sort_to_buckets(prime_log_scale, q_positions, self.q_cluster_size)
# sprime_log_scale = sq_prime_log_scale + sk_prime_log_scale
inner = torch.einsum('...id,...jd->...ij', s_query, s_key) * softmax_temp
dots_prime = torch.einsum('...im,...jm->...ij', sq_prime, sk_prime)
masked_value = max_neg_value(inner)
# mask out attention to padded tokens
if key_padding_mask is not None and not key_padding_mask.all_ones:
s_key_padding_mask = sort_to_buckets(rearrange(key_padding_mask_bool,
'b s -> b s 1'),
k_positions, self.k_cluster_size)
s_key_padding_mask = rearrange(s_key_padding_mask,
'... bucketsz 1 -> ... 1 bucketsz')
inner.masked_fill_(~s_key_padding_mask, masked_value)
dots_prime.masked_fill_(~s_key_padding_mask, 0.0)
# Causal masking
if self.causal:
s_q_positions = rearrange(q_positions,
'h b (nbuckets bucketsz) -> h b nbuckets bucketsz 1',
bucketsz=self.q_cluster_size)
s_k_positions = rearrange(k_positions,
'h b (nbuckets bucketsz) -> h b nbuckets 1 bucketsz',
bucketsz=self.k_cluster_size)
causal_mask = s_q_positions < s_k_positions
inner.masked_fill_(causal_mask, masked_value)
dots_prime.masked_fill_(causal_mask, 0.0)
del causal_mask
q_rev_positions = invert_permutation(q_positions)
# Don't double-count query-key pairs across multiple rounds of hashing.
# Count how many times a query-key pair is repeated, and to lower its log-prob
# correspondingly at each repetition.
if self.n_hashes > 1:
k_rev_positions = invert_permutation(k_positions)
q_bucket_idx = rearrange(q_rev_positions // self.q_cluster_size,
'h b seqlen -> b seqlen h')
k_bucket_idx = rearrange(k_rev_positions // self.k_cluster_size,
'h b seqlen -> b seqlen h')
s_q_bucket_idx = sort_to_buckets(q_bucket_idx, q_positions, self.q_cluster_size)
s_k_bucket_idx = sort_to_buckets(k_bucket_idx, k_positions, self.k_cluster_size)
dup_counts = (rearrange(s_q_bucket_idx, '... bk_size h -> ... bk_size 1 h') ==
rearrange(s_k_bucket_idx, '... bk_size h -> ... 1 bk_size h'))
# for memory considerations, chunk summation of last dimension for counting duplicates
dup_counts = chunked_sum(dup_counts, chunks=(self.n_hashes * bs))
dup_counts = dup_counts.detach()
assert dup_counts.shape == inner.shape
inner = inner - torch.log(dup_counts.float())
dots_prime = dots_prime / dup_counts
# free memory
if not need_weights:
del q_positions, k_positions
# softmax denominator
# TD: Even though we call this dots_logsumexp, it can be of arbitrary value and the
# computation would still be correct (assuming infinite precision), since it's just an
# arbitrary scaling of @dots.
# Here we choose it for numerical stability: we want torch.exp(inner - dots_logsumexp) <= 1.0
# and torch.exp(spring_log_scale - dots_logsumexp) <= 1.0
# dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
dots_logsumexp = torch.maximum(torch.amax(inner, dim=-1, keepdim=True), sprime_log_scale)
# TD: dots and dots_sum has log scale dots_logsumexp
# TD: No longer need this because we pick dots_logsumexp to not be -inf
# dots_prime_scale = torch.exp(sprime_log_scale - dots_logsumexp)
# nan_q_indices = dots_prime_scale.isinf()
# # dots_logsumexp[nan_q_indices] = 0.0
# dots_logsumexp = torch.where(nan_q_indices, torch.tensor(0.0, device=dots_logsumexp.device),
# dots_logsumexp)
dots_prime_scale = torch.exp(sprime_log_scale - dots_logsumexp)
dots = torch.exp(inner - dots_logsumexp) - dots_prime * dots_prime_scale
# TD: No longer need this because we pick dots_logsumexp to not be -inf
# If the whole row within this bucket is masked out, then inner is the uniform distribution.
# We actually want it to be zero.
# if key_padding_mask is not None and not key_padding_mask.all_ones:
# full_row_mask = (inner <= masked_value).all(dim=-1, keepdim=True)
# dots = dots.masked_fill(full_row_mask, 0.0)
dots_sum = dots.sum(dim=-1, keepdim=True)
# dropout
dropped_dots = self.dropout(dots)
# n_hashes outs
so = torch.einsum('...ij,...jd->...id', dropped_dots, s_value)
# undo sort
def unsort_from_buckets(s_x, perm_inverse):
b_x = rearrange(s_x, 'h b nbuckets bucketsz d -> h b (nbuckets bucketsz) d')
return batched_index_select(b_x, perm_inverse)
o = unsort_from_buckets(so, q_rev_positions)
logits = unsort_from_buckets(dots_logsumexp, q_rev_positions)
dots_sum_unsort = unsort_from_buckets(dots_sum, q_rev_positions)
# free memory
del q_rev_positions
normalization_log_scale = torch.logsumexp(logits, dim=0)
probs = torch.exp(logits - rearrange(normalization_log_scale, '... -> 1 ...'))
out_lsh = torch.sum(o * probs, dim=0)
prime_scale = torch.exp(prime_log_scale - normalization_log_scale)
out = out_lsh + q_prime_k_prime_v * prime_scale
normalization = (dots_sum_unsort * probs).sum(dim=0) + q_prime_k_prime_1.unsqueeze(-1) * prime_scale
out_normalized = out / normalization.clamp_min(1e-6)
out_normalized = (rearrange(out_normalized, '(b h) t d -> b t h d', h=H))[:, :q_seqlen_og]
attn = None
if need_weights:
q_pos_2d = rearrange(q_positions, 'h b (nbuckets bucketsz) -> h b nbuckets bucketsz 1',
bucketsz=self.q_cluster_size)
k_pos_2d = rearrange(k_positions, 'h b (nbuckets bucketsz) -> h b nbuckets 1 bucketsz',
bucketsz=self.k_cluster_size)
pos_2d = rearrange(q_pos_2d * S + k_pos_2d,
'h b nbuckets qbucketsz kbucketsz -> h b (nbuckets qbucketsz kbucketsz)')
unsorted_dots = torch.zeros(self.n_hashes, bs, T * S, device=query.device)
unsorted_dots.scatter_(-1, pos_2d, dots.view_as(pos_2d))
del pos_2d
unsorted_dots = rearrange(unsorted_dots,
'h b (q_seqlen k_seqlen) -> h b q_seqlen k_seqlen',
q_seqlen=T)
attn_lsh = torch.sum(unsorted_dots * probs, dim=0)
attn_unnormalized = attn_lsh + attn_prime * prime_scale
attn = attn_unnormalized / normalization.clamp_min(1e-6)
attn = rearrange(attn, '(b h) t s -> b h t s', h=H)[:, :, :q_seqlen_og, :k_seqlen_og]
if return_attn_unnormalized: # For testing purpose
attn_unnormalized = rearrange(
attn_unnormalized, '(b h) t s -> b h t s', h=H
)[:, :, :q_seqlen_og, :k_seqlen_og]
normalization_log_scale = rearrange(normalization_log_scale,
'(b h) s 1 -> b h s 1', h=H)[:, :, :q_seqlen_og]
attn_prime = rearrange(attn_prime,
'(b h) s d -> b h s d', h=H)[:, :, :q_seqlen_og, :k_seqlen_og]
prime_log_scale = rearrange(prime_log_scale,
'(b h) s 1 -> b h s 1', h=H)[:, :, :q_seqlen_og]
smyrf_mask = rearrange(attn_lsh != 0.0,
'(b h) t s -> b h t s', h=H)[:, :, :q_seqlen_og, :k_seqlen_og]
attn = (attn, attn_unnormalized * torch.exp(normalization_log_scale),
attn_prime * torch.exp(prime_log_scale), smyrf_mask)
return out_normalized, attn
| fly-master | src/models/attention/sbsmyrf_attention.py |
# Adapted from https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/softmax.py
import triton.language as tl
import triton
import torch
from src.models.attention.blocksparse_utils import sparsify_broadcast_tensor
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3] * meta['BLOCK'])})
@triton.jit
def _forward(
X, OUT, LUT, sizemax, stride_zx, stride_zout, stride_hout, **meta
):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=0)
x = x.to(tl.float32)
# computation
out = tl.sum(x, axis=0)
# pointers to OUT
pout = OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK + rxm
tl.store(pout, out)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3]) * meta['BLOCK']})
@triton.jit
def _backward(DX, DOUT, LUT, sizemax, stride_zdx, stride_zdout, stride_hdout, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
pdx = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pdout = DOUT + pidz * stride_zdout + headid * stride_hdout + rowid * BLOCK + rxm
# Load
# [2021-09-14] TD: Triton's broadcasting is very buggy, I have to read from dx (which is all
# zeros) just so that I can broadcast dout (a scalar).
dx_zeros = tl.load(pdx, mask=check, other=0)
dout = tl.load(pdout)
# Computation
dx = dout - dx_zeros
tl.store(pdx, dx, mask=check)
class _sum(torch.autograd.Function):
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero(as_tuple=False)[:, 0]
rows = layout.nonzero(as_tuple=False)[:, 1]
columns = layout.nonzero(as_tuple=False)[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
n_head = layout.shape[0]
n_row = layout.shape[1] * block
return lut, int(sizes.max()), n_head, n_row
@staticmethod
def forward(ctx, x, spdims, block, lut, maxlut, n_head, n_row, layout, bench, time):
out = torch.zeros((x.shape[0], n_head, n_row), dtype=x.dtype, device=x.device)
# run kernel
M = x.shape[0]
meta = {'BLOCK': block}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, out, lut, maxlut, x.stride(0), out.stride(0), out.stride(1),
force_nc_cache=True, **meta)
# save to context
ctx.save_for_backward(x, lut, layout)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
return out
@staticmethod
def backward(ctx, dout):
# retrieve from context
x, lut, layout = ctx.saved_tensors
block = x.shape[-1]
dx = sparsify_broadcast_tensor(dout, layout, block).expand(-1, -1, -1, block)
# dx = torch.zeros_like(x)
# run kernel
# M = x.shape[0]
# grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
# _backward[grid](dx, dout, lut, ctx.maxlut, dx.stride(0), dout.stride(0), dout.stride(1),
# force_nc_cache=True, BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None, None
class blocksparse_sum:
apply_sum = _sum.apply
def make_lut(self, device):
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _sum.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self, x):
time_y = [None]
lut, maxlut, n_head, n_row = self.make_lut(x.device)
x = blocksparse_sum.apply_sum(
x, self.spdims, self.block, lut, maxlut, n_head, n_row, self.layout, self.bench, time_y
)
return x
| fly-master | src/models/attention/blocksparse_sum.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from fast_transformers.local_product import local_dot_product, local_weighted_average
from src.models.modules.masking import FullMask, LengthMask
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/local_attention.py
class LocalAttention(nn.Module):
"""Implement fast local attention where a query can only attend to
neighboring keys.
In this attention module the query Q_i can only attend to a key K_j if
|i-j| < local_context/2.
Arguments
---------
local_context: The neighborhood to consider for local attention.
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, local_context, softmax_temp=None, attention_dropout=0.0, device=None,
dtype=None):
super().__init__()
self.local_context = local_context
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
query: (B, T, H, E) The tensor containing the query
key: (B, S, H, E) The tensor containing the key
value: (B, S, H, D) The tensor containing the value
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of
"""
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
if attn_mask is None:
attn_mask_additive_matrix = torch.zeros(T, S, device=query.device)
else:
attn_mask_additive_matrix = attn_mask.additive_matrix_finite
if key_padding_mask is None:
key_padding_mask_lengths = torch.full(size=(B,), fill_value=S, dtype=torch.long,
device=key.device)
else:
key_padding_mask_lengths = key_padding_mask.lengths
# Permute the dimensions to BHTE instead of BTHE
query = rearrange(query, 'b t h e -> b h t e').contiguous()
key = rearrange(key, 'b s h e -> b h s e').contiguous()
value = rearrange(value, 'b s h d -> b h s d').contiguous()
QK = local_dot_product(
query,
key,
attn_mask_additive_matrix,
key_padding_mask_lengths,
self.local_context
)
attn_local = torch.softmax(softmax_temp * QK, dim=-1)
# If there are no valid keys for a query (because of local and key_padding_mask),
# then that row of attn_local will be 1 / S as QK will all be -1e24 on that row.
# We want that row to actually be zero.
# If there are no valid keys because of attn_mask, we're not going to set that row to zero
# because it's too much work to deal with the attn_mask.
if key_padding_mask is not None and not key_padding_mask.all_ones:
i = rearrange(torch.arange(T, device=query.device), 't -> 1 1 t 1')
j = torch.arange(self.local_context, device=query.device)
local_idx = i - self.local_context // 2 + j
valid_idx_mask = ((local_idx >= 0)
& (local_idx < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1')))
attn_local = attn_local.masked_fill(~valid_idx_mask, 0.0)
A = self.dropout(attn_local)
V_new = local_weighted_average(A, value)
attn = None
if need_weights:
attn = torch.zeros(B, H, T, S, device=query.device)
i = rearrange(torch.arange(T, device=query.device), 't -> 1 1 t 1')
j = torch.arange(self.local_context, device=query.device)
local_idx = i - self.local_context // 2 + j
valid_idx_mask = ((local_idx >= 0)
& (local_idx < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1')))
k = torch.arange(S, device=key.device)
idx = k - i
local_mask = ((idx >= -(self.local_context // 2))
& (idx < (self.local_context + 1) // 2)
& (k < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1')))
attn.masked_scatter_(local_mask, attn_local.masked_select(valid_idx_mask))
return rearrange(V_new, 'b h t d -> b t h d'), attn
| fly-master | src/models/attention/local_attention.py |
"""Implement linear attention."""
import torch
import torch.nn as nn
import hydra
from einops import rearrange
from fast_transformers.feature_maps import elu_feature_map
from src.models.modules.masking import TriangularCausalMask
from src.models.attention.performer_utils import causal_linear_attention, linear_attention
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/linear_attention.py
class LinearAttention(nn.Module):
"""Implement unmasked attention using dot product of feature maps in
O(N D^2) complexity.
Given the query, key and value as Q, K, V instead of computing
V' = softmax(Q.mm(K.t()), dim=-1).mm(V),
we make use of a feature map function Φ(.) and perform the following
computation
V' = normalize(Φ(Q).mm(Φ(K).t())).mm(V).
The above can be computed in O(N D^2) complexity where D is the
dimensionality of Q, K and V and N is the sequence length. Depending on the
feature map, however, the complexity of the attention might be limited.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
normalization_eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
"""
def __init__(self, query_dims, feature_map_cfg=None, normalization_eps=1e-6, softmax_temp=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if softmax_temp is not None and feature_map_cfg is not None:
feature_map_cfg.softmax_temp = softmax_temp
self.feature_map = (
hydra.utils.instantiate(feature_map_cfg, query_dims, **factory_kwargs)
if feature_map_cfg is not None else elu_feature_map(query_dims)
)
self.normalization_eps = normalization_eps
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
# Permute the dimensions to BHTE instead of BTHE
query = rearrange(query, 'b t h e -> b h t e')
key = rearrange(key, 'b s h e -> b h s e')
value = rearrange(value, 'b s h d -> b h s d')
# Apply the feature map to the query and key
self.feature_map.new_feature_map(query.device)
Q = self.feature_map.forward_queries(query)
K = self.feature_map.forward_keys(key)
# Apply the key padding mask and make sure that the attn_mask is
# all_ones or is causal
causal = attn_mask is not None and attn_mask.lower_triangular
if not (attn_mask is None or attn_mask.all_ones or causal):
raise RuntimeError(("LinearAttention does not support arbitrary attention masks"))
if causal:
assert query.shape[1] == key.shape[1], 'query and key must have the same sequence length'
if key_padding_mask is not None and not key_padding_mask.all_ones:
K.masked_fill_(~rearrange(key_padding_mask.bool_matrix, 'b s -> b 1 s 1'), 0.0)
attn_fn = causal_linear_attention if causal else linear_attention
out, attn = attn_fn(Q, K, value, eps=self.normalization_eps, need_weights=need_weights)
out = rearrange(out, 'b h s d -> b s h d')
return out, attn
| fly-master | src/models/attention/linear_attention.py |
import math
import torch
from einops import rearrange
def gaussian_orthogonal_random_matrix(nrows, ncols, scaling=0, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
nblocks = int(math.ceil(nrows / ncols))
# TD [2021-10-28]: Sometimes QR fails on CUDA
unstructured_blocks = torch.randn((nblocks, ncols, ncols), device='cpu')
q, r = torch.linalg.qr(unstructured_blocks)
# To make sure Q is uniform from the Haar distribution https://arxiv.org/pdf/math-ph/0609050.pdf
q *= rearrange(torch.diagonal(r, dim1=-2, dim2=-1).sign(), 'b c -> b 1 c')
q = q.to(**factory_kwargs)
# TD [2021-10-28] Idk why the transpose is necessary. I suspect it isn't.
# https://github.com/google-research/google-research/blob/ea313c6e96acce6c863de41615c6cf4079b8ca94/performer/fast_attention/jax/fast_attention.py#L362
q = rearrange(q, 'b c c1 -> b c1 c')
g_ortho = rearrange(q, 'b c1 c -> (b c1) c')[:nrows]
if scaling == 0:
multiplier = torch.randn((nrows, ncols), **factory_kwargs).norm(dim=1)
return rearrange(multiplier, 'r -> r 1') * g_ortho
elif scaling == 1:
return math.sqrt(ncols) * g_ortho
else:
raise ValueError(f'Invalid scaling {scaling}')
| fly-master | src/models/attention/projection_utils.py |
import torch.nn.functional as F
from src.models.modules.masking import FullMask, LengthMask
def pad_mask(mask, pad_length, left=True, value=True):
assert value in [True, False]
assert isinstance(mask, (FullMask, LengthMask))
if isinstance(mask, FullMask):
pad = (pad_length, 0) if left else (0, pad_length)
return FullMask(F.pad(mask._mask, pad, value=value))
elif isinstance(mask, LengthMask):
if value:
return LengthMask(mask._lengths + pad_length, max_len=mask._max_len + pad_length,
device=mask._lengths.device)
else:
return LengthMask(mask._lengths, max_len=mask._max_len + pad_length,
device=mask._lengths.device)
| fly-master | src/models/attention/mask_utils.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/exact_topk_attention.py
class ExactTopKAttention(nn.Module):
"""Implement the oracle top-k softmax attention.
Arguments
---------
top-k: The top k keys to attend to (default: 32)
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, topk, softmax_temp=None, attention_dropout=0.0, device=None, dtype=None):
super().__init__()
self.topk = topk
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
topk = min(self.topk, S)
# Scale the query instead of applying the softmax temperature to the
# dot products
query = query * softmax_temp
# Compute the unnormalized attention and apply the masks
QK = torch.einsum("bthe,bshe->bhts", query, key)
if attn_mask is not None and not attn_mask.all_ones:
QK.masked_fill_(~attn_mask.bool_matrix, float('-inf'))
if key_padding_mask is not None and not key_padding_mask.all_ones:
QK.masked_fill_(rearrange(~key_padding_mask.bool_matrix, 'b s -> b 1 1 s'),
float('-inf'))
_, topk_idx = torch.topk(QK, topk, dim=-1, sorted=False)
non_topk_mask = torch.ones_like(QK, dtype=torch.bool)
non_topk_mask.scatter_(dim=-1, index=topk_idx, src=torch.zeros_like(non_topk_mask))
QK.masked_fill_(non_topk_mask, float('-inf'))
# Compute the attention and the weighted average
attn = torch.softmax(QK, dim=-1)
A = self.dropout(attn)
output = torch.einsum("bhts,bshd->bthd", A, value)
return output, attn if need_weights else None
| fly-master | src/models/attention/topk_attention.py |
import math
import torch
from torch.nn import functional as F
from einops import rearrange
def block_butterfly_multiply(twiddle, input, increasing_stride=True,
output_size=None):
"""
twiddle: (nstacks, nblocks, log_n, n // 2, 2, 2, block_size, block_size)
input: (batch_size, nstacks, block_size * n)
"""
batch_size, nstacks, input_size = input.shape
nblocks = twiddle.shape[1]
log_n = twiddle.shape[2]
block_size = twiddle.shape[-1]
n = 1 << log_n
assert twiddle.shape == (nstacks, nblocks, log_n, n // 2, 2, 2, block_size, block_size)
# Pad or trim input to size block_size * n
input = (F.pad(input, (0, block_size * n - input_size)) if input_size < block_size * n
else input[:, :, :block_size * n])
output_size = block_size * n if output_size is None else output_size
assert output_size <= block_size * n
output = input.contiguous()
cur_increasing_stride = increasing_stride
for block in range(nblocks):
for idx in range(log_n):
log_stride = idx if cur_increasing_stride else log_n - 1 - idx
stride = 1 << log_stride
# shape (nstacks, n // (2 * stride), 2, 2, stride, block_size, block_size)
t = rearrange(twiddle[:, block, idx],
's (diagblk stride) i j k l -> s diagblk i j stride k l', stride=stride)
output_reshape = rearrange(output,
'b s (diagblk j stride l) -> b s diagblk j stride l',
stride=stride, j=2, l=block_size)
output = torch.einsum('s d i j t k l, b s d j t l -> b s d i t k',
t, output_reshape)
output = rearrange(output, 'b s diagblk i stride k -> b s (diagblk i stride k)')
# output_reshape = output.view(
# batch_size, nstacks, n // (2 * stride), 1, 2, stride, block_size, 1)
# output = (t @ output_reshape).sum(dim=4).reshape(batch_size, nstacks, block_size * n)
cur_increasing_stride = not cur_increasing_stride
return output.view(batch_size, nstacks, block_size * n)[:, :, :output_size]
def block_butterfly_factor_multiply(twiddle, input, factor_idx, increasing_stride=True, output_size=None):
"""
twiddle: (nstacks, log_n, n // 2, 2, 2, block_size, block_size)
input: (batch_size, nstacks, block_size * n)
"""
batch_size, nstacks, input_size = input.shape
block_size = twiddle.shape[-1]
log_n = twiddle.shape[1]
n = 1 << log_n
assert twiddle.shape == (nstacks, log_n, n // 2, 2, 2, block_size, block_size)
# Pad or trim input to size block_size * n
input = (F.pad(input, (0, block_size * n - input_size)) if input_size < block_size * n
else input[:, :, :block_size * n])
output_size = block_size * n if output_size is None else output_size
assert output_size <= block_size * n
output = input.contiguous()
cur_increasing_stride = increasing_stride
idx = factor_idx
log_stride = idx if cur_increasing_stride else log_n - 1 - idx
stride = 1 << log_stride
# shape (nstacks, n // (2 * stride), 2, 2, stride, block_size, block_size)
t = rearrange(twiddle[:, idx],
's (diagblk stride) i j k l -> s diagblk i j stride k l', stride=stride)
output_reshape = rearrange(output,
'b s (diagblk j stride l) -> b s diagblk j stride l',
stride=stride, j=2, l=block_size)
output = torch.einsum('s d i j t k l, b s d j t l -> b s d i t k',
t, output_reshape)
output = rearrange(output, 'b s diagblk i stride k -> b s (diagblk i stride k)')
return output.view(batch_size, nstacks, block_size * n)[:, :, :output_size]
| fly-master | src/models/layers/block_butterfly_multiply.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from src.models.layers.structured_linear import StructuredLinear
from src.ops.blockdiag_multiply import blockdiag_multiply
class BlockdiagLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, shuffle=False, **kwargs):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
super().__init__(*args, **kwargs)
in_blksz = int(math.ceil(self.in_features / nblocks))
out_blksz = int(math.ceil(self.out_features / nblocks))
self.in_features_extended = in_blksz * nblocks
self.out_features_extended = out_blksz * nblocks
self.shuffle = shuffle
self.weight = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
self.reset_parameters()
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features_extended, self.in_features_extended,
device=self.weight.device, dtype=self.weight.dtype)
dense_init_fn_(dense_weight)
# Scale by sqrt because the weight is sparse
scaling = math.sqrt(dense_weight.numel() / self.weight.numel())
dense_weight *= scaling
with torch.no_grad():
nblocks = self.weight.shape[0]
self.weight.copy_(rearrange(dense_weight, '(b o) (b1 i) -> b b1 o i',
b=nblocks, b1=nblocks)[0])
@property
def saving(self):
return self.weight.numel() / (self.in_features * self.out_features)
def forward_matmul(self, x):
x = self.preprocess(x)
if self.shuffle:
x = rearrange(x, '... (group c_per_group) -> ... (c_per_group group)',
group=self.weight.shape[0]) # group=nblocks
output = blockdiag_multiply(x, self.weight)
return self.postprocess(output)
class BlockdiagSparsityConfig:
def __init__(self, nblocks, block=32, global_size=0):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
self.nblocks = nblocks
self.block = block
self.global_size = global_size
def make_layout(self, out_features, in_features):
assert out_features % self.block == 0 and in_features % self.block == 0
assert out_features % self.nblocks == 0 and in_features % self.nblocks == 0
layout = torch.block_diag(*[torch.ones(out_features // self.nblocks,
in_features // self.nblocks,
dtype=torch.int32)] * self.nblocks)
if self.global_size > 0:
layout[:self.global_size] = 1
layout[:, :self.global_size] = 1
# Convert from (out_features, in_features) mask to
# (out_features // block, in_features // block) mask
layout = rearrange(layout, '(p blksz) (r blksz1) -> p r (blksz blksz1)',
blksz=self.block, blksz1=self.block)
return (layout > 0).any(dim=-1).int()
| fly-master | src/models/layers/blockdiag_linear.py |
import math
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
def blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2):
"""
This implementation is slow but more likely to be correct.
There are 3 implementations, which should all yield the same answer
Arguments:
x: (batch, n)
w1_bfly: (k, q, p), where k = n / p
w2_bfly: (l, s, r), where l = k * q / r = n * q / (p * r)
Outputs:
out: (batch, m), where m = l * s = n * s * q / (p * r)
"""
if version not in [1, 2, 3]:
raise NotImplementedError('version must be either 1, 2, or 3')
batch, n = x.shape
k, q, p = w1_bfly.shape
l, s, r = w2_bfly.shape
assert k * p == n
assert l * r == k * q
x_reshaped = rearrange(x, 'b (k p) -> b k p', k=k)
if version == 1: # Implementation 1 (only works for when k = q = p = l = s = r = sqrt(n))
assert k == q == p == l == s == r == int(math.sqrt(n))
return torch.einsum('bkp,kqp,qlk->blq', x_reshaped, w1_bfly, w2_bfly).reshape(batch, n)
elif version == 2: # Implementation 2
out1 = torch.einsum('kqp,bkp->bkq', w1_bfly, x_reshaped)
out1 = rearrange(rearrange(out1, 'b k q -> b (k q)'), 'b (r l) -> b l r', l=l)
return torch.einsum('lsr,blr->bsl', w2_bfly, out1).reshape(batch, s * l)
# Implementation 3: most likely to be correct, but it's the slowest
elif version == 3:
w1_dense = torch.block_diag(*torch.unbind(w1_bfly, dim=0))
out1 = F.linear(x, w1_dense)
out1 = rearrange(out1, 'b (r l) -> b (l r)', l=l)
w2_dense = torch.block_diag(*torch.unbind(w2_bfly, dim=0))
out2 = F.linear(out1, w2_dense)
out2 = rearrange(out2, 'b (l s) -> b (s l)', l=l)
return out2
class BlockdiagButterflyMultiply(torch.autograd.Function):
"""This is a faster implementation, with careful memory copies for the fastest
bmm performance.
The backward pass is also written manually with careful memory copies.
Arguments:
x: (batch, n)
w1_bfly: (k, q, p), where k = n / p
w2_bfly: (l, s, r), where l = k * q / r = n * q / (p * r)
Outputs:
out: (batch, m), where m = l * s = n * s * q / (p * r)
"""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, w1_bfly, w2_bfly):
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
k, q, p = w1_bfly.shape
l, s, r = w2_bfly.shape
assert k * p == n
assert l * r == k * q
x_reshaped = x.reshape(batch_dim, k, p).transpose(0, 1)
out1 = torch.empty(batch_dim, k, q, device=x.device, dtype=x.dtype).transpose(0, 1)
out1 = torch.bmm(x_reshaped, w1_bfly.transpose(-1, -2), out=out1)
out1 = out1.transpose(0, 1).reshape(batch_dim, r, l).transpose(-1, -2).contiguous().transpose(0, 1)
out2 = torch.empty(batch_dim, l, s, device=x.device, dtype=x.dtype).transpose(0, 1)
out2 = torch.bmm(out1, w2_bfly.transpose(-1, -2), out=out2)
out2 = out2.permute(1, 2, 0).reshape(*batch_shape, s * l)
ctx.save_for_backward(x, w1_bfly, w2_bfly, out1)
return out2
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, dout):
x, w1_bfly, w2_bfly, out1 = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
k, q, p = w1_bfly.shape
l, s, r = w2_bfly.shape
# assert k * p == n
# assert l * r == k * q
dx, dw1_bfly, dw2_bfly = None, None, None
# dout_reshaped = dout.reshape(batch_dim, sqrtn, sqrtn).permute(2, 1, 0).contiguous()
dout_reshaped = dout.reshape(batch_dim, s, l).transpose(-1, -2).contiguous()
dout_reshaped = dout_reshaped.transpose(0, 1)
if ctx.needs_input_grad[2]:
# dw2_bfly = torch.empty(l, s, r, device=w2_bfly.device, dtype=w2_bfly.dtype)
# dw2_bfly = torch.bmm(dout_reshaped.transpose(-1, -2), out1, out=dw2_bfly)
dw2_bfly = torch.bmm(dout_reshaped.transpose(-1, -2), out1.conj())
if ctx.needs_input_grad[1] or ctx.needs_input_grad[0]:
dout1 = torch.empty(batch_dim, l, r, device=x.device, dtype=x.dtype).transpose(0, 1)
dout1 = torch.bmm(dout_reshaped, w2_bfly.conj(), out=dout1)
dout1 = dout1.transpose(0, 1).transpose(-1, -2).contiguous().reshape(batch_dim, k, q).transpose(0, 1)
# dout1 = dout1.permute(1, 2, 0).contiguous().transpose(0, 1)
if ctx.needs_input_grad[0]:
dx = torch.empty(batch_dim, k, p, device=x.device, dtype=x.dtype)
dx = torch.bmm(dout1, w1_bfly.conj(), out=dx.transpose(0, 1)).transpose(0, 1).reshape(*batch_shape, n)
if ctx.needs_input_grad[1]:
x_reshaped = x.reshape(batch_dim, k, p).transpose(0, 1)
dw1_bfly = torch.bmm(dout1.transpose(-1, -2), x_reshaped.conj())
return dx, dw1_bfly, dw2_bfly
blockdiag_butterfly_multiply = BlockdiagButterflyMultiply.apply
| fly-master | src/models/layers/blockdiag_butterfly_multiply.py |
import math
import torch
import torch.nn as nn
from torch.nn import init
from einops import rearrange
from src.models.layers.structured_linear import StructuredLinear
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.utils.utils import get_logger
logger = get_logger()
class MonarchLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, **kwargs):
super().__init__(*args, **kwargs)
in_blksz = int(math.ceil(self.in_features / nblocks))
out_blksz = int(math.ceil(self.out_features / nblocks))
self.in_features_extended = in_blksz * nblocks
self.out_features_extended = out_blksz * nblocks
if self.in_features_extended < self.out_features_extended:
self.blkdiag1 = nn.Parameter(torch.empty(nblocks, in_blksz, in_blksz))
self.blkdiag2 = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
else:
self.blkdiag1 = nn.Parameter(torch.empty(nblocks, out_blksz, in_blksz))
self.blkdiag2 = nn.Parameter(torch.empty(nblocks, out_blksz, out_blksz))
self.reset_parameters()
logger.info(f'Linear class {self.__class__}: saving={self.saving}')
def reset_parameters(self) -> None:
# Mimic init.kaiming_uniform: https://github.com/pytorch/pytorch/blob/24087d07ca7ffa244575d259711dd7c99245a67a/torch/nn/init.py#L360
for blkdiag in [self.blkdiag1, self.blkdiag2]:
fan_in = blkdiag.shape[-1]
gain = init.calculate_gain(nonlinearity='leaky_relu', param=math.sqrt(5))
std = gain / math.sqrt(fan_in)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
blkdiag.uniform_(-bound, bound)
self.reset_parameters_bias()
@property
def saving(self):
return ((self.blkdiag1.numel() + self.blkdiag2.numel())
/ (self.in_features * self.out_features))
def forward_matmul(self, x):
output = blockdiag_butterfly_multiply(self.preprocess(x), self.blkdiag1, self.blkdiag2)
return self.postprocess(output)
| fly-master | src/models/layers/monarch_linear.py |
import torch
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') | fly-master | src/models/layers/weight_init_helper.py |
fly-master | src/models/layers/__init__.py |
|
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from einops import rearrange, repeat
import hydra
from src.utils.utils import get_logger
logger = get_logger()
from src.utils.padding import pad_to_multiple
try:
from src.models.attention.blocksparse_matmul import matmul
except ImportError:
logger.info('triton is not installed')
matmul = None
from src.ops.butterfly_factor import butterfly_factor_to_matrix
from src.models.attention.blocksparse_utils import sparsify_tensor, densify_tensor
try:
from pytorch_block_sparse import BlockSparseMatrix
from pytorch_block_sparse.block_sparse_linear import BlockSparseLinearFunction
except ImportError:
logger.info('pytorch_block_sparse is not installed')
BlockSparseMatrix = None
BlockSparseLinearFunction = None
class BlockSparseLinear(nn.Module):
"""
Arguments
---------
sparsity_config: optional: this parameter determins sparsity pattern configuration; it is based on SparsityConfig class.
"""
def __init__(self, in_features, out_features, sparsity_config, bias=True,
backend='triton', weight_decay=True):
"""
weight_decay: whether to mark the sparse weight as _no_weight_decay.
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.sparsity_config = hydra.utils.instantiate(sparsity_config)
self.block_size = self.sparsity_config.block
self.in_features_extended = int(math.ceil(in_features / self.block_size)) * self.block_size
self.out_features_extended = int(math.ceil(out_features / self.block_size)) * self.block_size
# initialize sparse layout and register as buffer
layout = self.sparsity_config.make_layout(self.out_features_extended,
self.in_features_extended)
self.register_buffer("layout", layout)
self.nnz_blocks = self.layout.sum().item()
if backend is None:
backend = 'huggingface' if self.block_size == 32 else 'triton'
if backend not in ['huggingface', 'triton', 'dense']:
raise NotImplementedError(f'backend {backend} not supported')
if backend == 'huggingface':
if self.block_size != 32:
raise NotImplementedError(f'backend huggingface requires block size to be 32')
if BlockSparseLinearFunction is None or BlockSparseMatrix is None:
raise ImportError(f'backend huggingface but package pytorch_block_sparse cannot be imported')
self.backend = backend
self.weight = nn.Parameter(torch.empty(self.nnz_blocks, self.block_size, self.block_size))
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
if self.backend == 'huggingface':
self.weight = nn.Parameter(
rearrange(self.weight, 'nnz blksz blksz1 -> (nnz blksz1) blksz').contiguous()
)
elif self.backend == 'triton':
self.weight = nn.Parameter(
rearrange(self.weight, 'nnz blksz blksz1 -> 1 nnz blksz blksz1')
)
if not weight_decay:
self.weight._no_weight_decay = True
self.ops_cache = dict()
logger.info(f'Linear class {self.__class__}: saving={self.saving}')
def reset_parameters(self) -> None:
self.set_weights_from_dense_init(dense_init_fn_=partial(init.kaiming_uniform_, a=math.sqrt(5)))
fan_in, fan_out = self.in_features_extended, self.out_features_extended
if self.bias is not None:
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features_extended, self.in_features_extended,
device=self.weight.device, dtype=self.weight.dtype)
dense_init_fn_(dense_weight)
# We scale depending on how many nonzero cols there are in each row.
ncol = self.layout.shape[-1]
n_nonzero_cols = self.layout.sum(dim=-1, keepdim=True)
scaling = torch.sqrt(ncol / n_nonzero_cols)
dense_weight *= repeat(scaling, 'm 1 -> (b m) 1', b=self.block_size)
with torch.no_grad():
self.weight.copy_(sparsify_tensor(rearrange(dense_weight, 'o i -> 1 o i'),
rearrange(self.layout, 'o_blk i_blk -> 1 o_blk i_blk')))
@property
def saving(self):
return self.nnz_blocks * self.block_size ** 2 / (self.in_features * self.out_features)
# add to cache
def get_ops(self):
if self.backend not in self.ops_cache:
if self.backend == 'triton':
matmul_dds_op = matmul(self.layout.cpu(), self.block_size, 'dds',
trans_a=False, trans_b=True)
self.ops_cache[self.backend] = matmul_dds_op
elif self.backend == 'huggingface':
weight_bsm = BlockSparseMatrix(
(self.out_features_extended, self.in_features_extended),
self.layout.bool().to('cuda'),
data=self.weight,
block_shape=(self.block_size, self.block_size)
)
self.ops_cache[self.backend] = weight_bsm
elif self.backend == 'dense':
self.ops_cache[self.backend] = None
return self.ops_cache[self.backend]
def forward(self, x):
"""
Arguments
---------
x: (..., in_features)
Return:
out: (..., out_features)
"""
if not x.is_cuda and self.backend != 'dense':
raise NotImplementedError('Backend triton and huggingface only support CUDA tensors')
in_features = x.shape[-1]
if in_features < self.in_features_extended:
x = F.pad(x, (0, self.in_features_extended - in_features))
if self.backend == 'huggingface':
weight_bsm = self.get_ops()
output = BlockSparseLinearFunction.apply(x, self.weight, weight_bsm)
elif self.backend == 'triton':
matmul_dds_op = self.get_ops()
batch_shape = x.shape[:-1]
x = x.reshape(-1, x.shape[-1])
batch_dim = x.shape[0]
x = pad_to_multiple(x, multiple=self.block_size, dims=0)
output = rearrange(matmul_dds_op(rearrange(x, 'b d -> 1 1 b d'), self.weight),
'1 1 b d -> b d')
if output.shape[0] > batch_dim:
output = output[:batch_dim, :]
output = output.reshape(batch_shape + (output.shape[-1],))
elif self.backend == 'dense':
weight = rearrange(densify_tensor(self.weight, rearrange(self.layout, 'p r -> 1 p r')),
'1 m n -> m n')
output = F.linear(x, weight)
out_features_extended = output.shape[-1]
if out_features_extended > self.out_features:
output = output[..., :self.out_features]
# Convert bias to output.dtype in case of AMP, otherwise bias and activation will be in FP32
return (output + self.bias.to(dtype=output.dtype)) if self.bias is not None else output
class FlatBlockButterflySparsityConfig:
def __init__(self, butterfly_size, n_factors, block=32, global_size=0, shuffle=False):
"""shuffle: apply channel_shuffle operation before the matmul as in ShuffleNet
"""
self.block = block
log_n = int(math.log2(butterfly_size))
if butterfly_size != 2 ** log_n or butterfly_size < 2:
raise NotImplementedError('butterfly_size must be a power of 2')
if not (1 <= n_factors <= log_n):
raise NotImplementedError('n_factors must be a between 1 and log_2(butterfly_size)')
self.butterfly_size = butterfly_size
self.n_factors = n_factors
self.global_size = global_size
self.shuffle = shuffle
def make_layout(self, out_features, in_features):
assert out_features % self.block == 0 and in_features % self.block == 0
twiddle = torch.ones(self.butterfly_size // 2, 2, 2)
layout = sum(butterfly_factor_to_matrix(twiddle, index) for index in range(self.n_factors))
layout = layout.bool().int()
if self.shuffle:
log_n = int(math.log2(self.butterfly_size))
ngroups = 2 ** (log_n - self.n_factors)
layout = rearrange(layout, 'm (group c_per_group) -> m (c_per_group group)',
group=ngroups)
# Convert from (butterfly_size, butterfly_size) mask to (out_features, in_features) mask
layout = repeat(layout, 'b b1 -> (b f) (b1 f1)',
f=out_features // self.butterfly_size, f1=in_features // self.butterfly_size)
if self.global_size > 0:
layout[:self.global_size] = 1
layout[:, :self.global_size] = 1
# Convert from (out_features, in_features) mask to
# (out_features // block, in_features // block) mask
layout = rearrange(layout, '(p blksz) (r blksz1) -> p r (blksz blksz1)',
blksz=self.block, blksz1=self.block)
return (layout > 0).any(dim=-1).int()
| fly-master | src/models/layers/blocksparse_linear.py |
""" MLP module w/ dropout and configurable activation layer
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.layers.fastlinear import FastLinear, ButterflyLinear, RandomLinear, SLLinear, \
SLXLinear, TopkLinear, TopkLrLinear, ButterflyGlobalLinear, NinjaTurtleLinear
from src.models.layers.maskedlinear import MaskLinearWrap
import math
from einops import rearrange
import hydra
from src.ops.butterfly_factor import butterfly_factor_to_matrix
@torch.jit.script
def bias_gelu_scripted(x, bias):
return F.gelu(x + bias)
class MlpCustom(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
act_fn=None, drop=0., drop_btw_fcs=True, linear1_cfg=None, linear2_cfg=None):
"""TD [2021-10-27] act_fn takes precedence over act_layer if set.
This is to support Pytorch 1.10 Transformer interface that construct the activation
*function*, not the activation *layer*.
"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
if linear1_cfg is None:
self.fc1 = nn.Linear(in_features, hidden_features)
else:
self.fc1 = hydra.utils.instantiate(linear1_cfg, in_features, hidden_features,
_recursive_=False)
self.act = act_layer() if act_fn is None else act_fn
if linear2_cfg is None:
self.fc2 = nn.Linear(hidden_features, out_features)
else:
self.fc2 = hydra.utils.instantiate(linear2_cfg, hidden_features, out_features,
_recursive_=False)
self.drop = nn.Dropout(drop)
self.drop_btw_fcs = drop_btw_fcs
# TD [2022-01-08] bias_gelu_scripted was working on Pytorch 1.10.1 but stops
# working on Pytorch 1.11.0a0+b6df043 (nvcr.io pytorch 21.12) with error
# RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::gelu
# So I'm disabling fused_bias_gelu for now
# self._fused_bias_gelu = ((act_fn is F.gelu or act_layer is nn.GELU)
# and self.fc1.bias is not None
# and hasattr(self.fc1, 'forward_matmul'))
self._fused_bias_gelu = False
def forward(self, x):
if self._fused_bias_gelu and x.is_cuda:
x = self.fc1.forward_matmul(x)
x = bias_gelu_scripted(x, self.fc1.bias.to(dtype=x.dtype))
else:
x = self.fc1(x)
x = self.act(x)
if self.drop_btw_fcs:
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/mlp.py
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.saving = 1.0
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GluMlp(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
# override init of fc1 w/ gate portion set to weight near zero, bias=1
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)
def forward(self, x):
x = self.fc1(x)
x, gates = x.chunk(2, dim=-1)
x = x * self.act(gates)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GatedMlp(nn.Module):
""" MLP as used in gMLP
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
gate_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
if gate_layer is not None:
assert hidden_features % 2 == 0
self.gate = gate_layer(hidden_features)
hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
else:
self.gate = nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.gate(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
# Legacy code... delete soon
class ButterflyFactorBanditNewMlp(nn.Module):
""" ButterflyMlp, similar to Mlp layers in MLP-Mixer
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., factor=0, base_size=3):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
self.drop = nn.Dropout(drop)
self.register_buffer('sparse_mask', torch.eye(in_features))
butterfly_1d = torch.eye(int(math.sqrt(in_features)))
window_size = base_size**factor
for i in range(0, int(math.sqrt(in_features)) - window_size):
# for j in range(window_size):
butterfly_1d[i, i + window_size] = 1.
butterfly_1d[i + window_size, i] = 1.
self.sparse_mask = torch.kron(butterfly_1d, butterfly_1d)
def forward(self, x):
# sparse
attn_s = torch.einsum("ds,td->st", self.fc1.weight, self.fc2.weight)
attn = attn_s*self.sparse_mask
attn = self.drop(attn)
x = torch.einsum("bds,st->bdt", x, attn) + self.fc2.bias
x = self.act(x)
return x
class ButterflyFactorNewMlp(nn.Module):
""" ButterflyMlp, similar to Mlp layers in MLP-Mixer
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., factor=0, base_size=3):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
self.drop = nn.Dropout(drop)
self.register_buffer('sparse_mask', torch.zeros([in_features, out_features]))
b = base_size
log_b_n = int(math.log(math.sqrt(in_features), b))
n = b ** log_b_n
twiddle = torch.arange(1, n * b + 1, dtype=torch.float).reshape(n // b, b, b)
butterfly_1d = butterfly_factor_to_matrix(twiddle, factor_index=factor)
self.sparse_mask = torch.kron(butterfly_1d, butterfly_1d)
self.sparse_mask[self.sparse_mask > 0] = 1.
def forward(self, x):
# sparse
attn_s = torch.einsum("ds,td->st", self.fc1.weight, self.fc2.weight)
attn = attn_s*self.sparse_mask
attn = self.drop(attn)
x = torch.einsum("bds,st->bdt", x, attn) + self.fc2.bias
x = self.act(x)
return x
class ButterflyNewMlp(nn.Module):
""" ButterflyMlp, similar to Mlp layers in MLP-Mixer
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=6, stripes=3):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
self.drop = nn.Dropout(drop)
self.local_stride = window_size
self.register_buffer('sparse_mask', torch.zeros([in_features, out_features]))
stride = int(math.sqrt(in_features))
for k in range(stripes):
patch_start = stride * k
for i in range(0, in_features, window_size):
self.sparse_mask[patch_start + i:patch_start + i + window_size, i:i + window_size] = 1.
self.sparse_mask = (self.sparse_mask.bool() | self.sparse_mask.bool().t()).float()
def forward(self, x):
# sparse
attn_s = torch.einsum("ds,td->st", self.fc1.weight, self.fc2.weight)
attn = attn_s*self.sparse_mask
attn = self.drop(attn)
x = torch.einsum("bds,st->bdt", x, attn) + self.fc2.bias
x = self.act(x)
return x
class RandomSparseNewMlp(nn.Module):
""" SparseLrMLP, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., sparse_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
self.drop = nn.Dropout(drop)
self.register_buffer('sparse_mask', torch.zeros([in_features, out_features]))
nnz = int(in_features*out_features*sparse_ratio)
ind = torch.randperm(in_features*out_features)[:nnz]
tmp_mask = torch.zeros([in_features*out_features])
tmp_mask[ind] = 1.
self.sparse_mask.data = tmp_mask.view(in_features, out_features)
def forward(self, x):
# sparse
attn_s = torch.einsum("ds,td->st", self.fc1.weight, self.fc2.weight) + self.fc2.bias
attn = attn_s*self.sparse_mask
attn = self.drop(attn)
x = torch.einsum("bds,st->bdt", x, attn)
x = self.act(x)
return x
class NewMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
# dense
attn = torch.einsum("ds,td->st", self.fc1.weight, self.fc2.weight) + self.fc2.bias
attn = self.drop(attn)
x = torch.einsum("bds,st->bdt", x, attn)
x = self.act(x)
return x
class ButterflySimpleMlp(nn.Module):
""" SimpleButterflyMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=3):
super().__init__()
out_features = out_features or in_features
self.fc = nn.Linear(in_features, out_features, bias=True)
self.act = act_layer()
self.drop = nn.Dropout(drop)
self.local_stride = window_size
self.register_buffer('sparse_mask', torch.zeros([in_features, out_features]))
stride = int(math.sqrt(in_features))
for k in range(window_size):
patch_start = stride * k
for i in range(0, in_features):
self.sparse_mask[patch_start + i:patch_start + i + window_size, i:i + window_size] = 1
self.sparse_mask = (self.sparse_mask.bool() | self.sparse_mask.bool().t()).float()
def forward(self, x):
attn = x @ (self.fc.weight * self.sparse_mask) + self.fc.bias
attn = self.act(attn)
attn = self.drop(attn)
return attn
class SimpleMlp(nn.Module):
""" SimpleMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
self.fc = nn.Linear(in_features, out_features, bias=True)
self.act = act_layer()
self.drop = nn.Dropout(drop)
def forward(self, x):
attn = self.fc(x)
attn = self.act(attn)
attn = self.drop(attn)
return attn
class NinjaTurtleMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.,
window_size=3, step=1, stripes_1=3, stripes_2=1, gtoken=1, block_size=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = NinjaTurtleLinear(in_features, hidden_features, bias=True, window_size=window_size,
stripes=stripes_1, step=step, gtoken=gtoken, block_size=block_size)
self.act = act_layer()
self.fc2 = NinjaTurtleLinear(hidden_features, out_features, bias=True, window_size=window_size,
stripes=stripes_2, step=step, gtoken=gtoken, block_size=block_size)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ButterflyGlobalMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.,
window_size=3, step=1, stripes_1=3, stripes_2=1, gtoken=1, block_size=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = ButterflyGlobalLinear(in_features, hidden_features, bias=True, window_size=window_size,
stripes=stripes_1, step=step, gtoken=gtoken, block_size=block_size)
self.act = act_layer()
self.fc2 = ButterflyGlobalLinear(hidden_features, out_features, bias=True, window_size=window_size,
stripes=stripes_2, step=step, gtoken=gtoken, block_size=block_size)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class TopkGradMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.,
topk_ratio=0.1, window_size=1, stripes=1, step=1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = MaskLinearWrap(in_features, hidden_features, bias=True, topk_ratio=topk_ratio,
window_size=window_size, stripes=stripes, step=step)
self.act = act_layer()
self.fc2 = MaskLinearWrap(hidden_features, out_features, bias=True, topk_ratio=topk_ratio,
window_size=window_size, stripes=stripes, step=step)
self.drop = nn.Dropout(drop)
self.saving = topk_ratio
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class TopkActMlp(nn.Module):
""" Sanity check if topk work for activation
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., topk_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.saving = topk_ratio
self.topk_ratio = topk_ratio
def forward(self, x):
x = self.fc1(x)
topk, ind = torch.topk(x, int(x.shape[-1]*self.topk_ratio), dim=-1)
mask = torch.zeros_like(x)
mask = mask.scatter(-1, ind, 1.)
x = x*mask
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
topk, ind = torch.topk(x, int(x.shape[-1]*self.topk_ratio), dim=-1)
mask = torch.zeros_like(x)
mask = mask.scatter(-1, ind, 1.)
x = x*mask
x = self.drop(x)
return x
class TopkLrMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=3, topk_ratio=0.1, rank_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = TopkLrLinear(in_features, hidden_features, bias=True, window_size=window_size, topk_ratio=topk_ratio, rank_ratio=rank_ratio)
self.act = act_layer()
self.fc2 = TopkLrLinear(hidden_features, out_features, bias=True, window_size=window_size, topk_ratio=topk_ratio, rank_ratio=rank_ratio)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class TopkMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=3, topk_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = TopkLinear(in_features, hidden_features, bias=True, window_size=window_size, topk_ratio=topk_ratio)
self.act = act_layer()
self.fc2 = TopkLinear(hidden_features, out_features, bias=True, window_size=window_size, topk_ratio=topk_ratio)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SLXMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=3, step=1, stripes_1=3, stripes_2=1, rank_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = SLXLinear(in_features, hidden_features, bias=True, window_size=window_size, stripes=stripes_1, step=step, rank_ratio=rank_ratio)
self.act = act_layer()
self.fc2 = SLXLinear(hidden_features, out_features, bias=True, window_size=window_size, stripes=stripes_2, step=step, rank_ratio=rank_ratio)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SLMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=3, step=1, stripes_1=3, stripes_2=1, rank_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = SLLinear(in_features, hidden_features, bias=True, window_size=window_size, stripes=stripes_1, step=step, rank_ratio=rank_ratio)
self.act = act_layer()
self.fc2 = SLLinear(hidden_features, out_features, bias=True, window_size=window_size, stripes=stripes_2, step=step, rank_ratio=rank_ratio)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RandomMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=1, sparse_ratio=0.1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = RandomLinear(in_features, hidden_features, bias=True, window_size=window_size, sparse_ratio=sparse_ratio)
self.act = act_layer()
self.fc2 = RandomLinear(hidden_features, out_features, bias=True, window_size=window_size, sparse_ratio=sparse_ratio)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ButterflyMlp(nn.Module):
""" newMlp, similar to Mlp layers in MLP-Mixer but with extra gelu act and low-rank
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., window_size=3, step=1, stripes_1=3, stripes_2=1, block_size=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = ButterflyLinear(in_features, hidden_features, bias=True, window_size=window_size, stripes=stripes_1, step=step, block_size=block_size)
self.act = act_layer()
self.fc2 = ButterflyLinear(hidden_features, out_features, bias=True, window_size=window_size, stripes=stripes_2, step=step, block_size=block_size)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class FastMlp(nn.Module):
""" FastMlp, two low_rank factors for one linear layer
"""
def __init__(self, in_features, hidden_features=None, out_features=None, rank_ratio=0.1, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = FastLinear(in_features, hidden_features, rank_ratio=rank_ratio)
self.act = act_layer()
self.fc2 = FastLinear(hidden_features, out_features, rank_ratio=rank_ratio)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
| fly-master | src/models/layers/mlp.py |
# From https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_1d.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torch.nn.utils.parametrize as parametrize
# class RealToComplex(nn.Module):
# def forward(self, x):
# return torch.view_as_complex(x)
# [2022-01-06] TD: Pytorch's parameterize complains if we have right_inverse,
# since it assumes that parameterize keeps the dtype.
# def right_inverse(self, x_cplx):
# return torch.view_as_real(x_cplx)
################################################################
# 1d fourier layer
################################################################
class SpectralConv1d(nn.Module):
def __init__(self, in_channels, out_channels, modes1):
super(SpectralConv1d, self).__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.scale = (1 / (in_channels * out_channels))
# self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat))
# weight is stored as real to avoid issue with Adam not working on complex parameters
# FNO code initializes with rand but we initializes with randn as that seems more natural.
self.weights1 = nn.Parameter(self.scale * torch.randn(in_channels, out_channels, self.modes1, 2))
# Need unsafe=True since we're changing the dtype of the parameters
# parametrize.register_parametrization(self, 'weights1', RealToComplex(), unsafe=True)
# Complex multiplication
def compl_mul1d(self, input, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x, norm='ortho')
# Multiply relevant Fourier modes
# out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat)
# out_ft[:, :, :self.modes1] = self.compl_mul1d(x_ft[:, :, :self.modes1], self.weights1)
weights1 = torch.view_as_complex(self.weights1)
out_ft = F.pad(self.compl_mul1d(x_ft[:, :, :self.modes1], weights1),
(0, x_ft.shape[-1] - self.modes1))
#Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1), norm='ortho')
return x
################################################################
# 2d fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = (1 / (in_channels * out_channels))
# weight is stored as real to avoid issue with Adam not working on complex parameters
# FNO code initializes with rand but we initializes with randn as that seems more natural.
self.weights1 = nn.Parameter(self.scale * torch.randn(in_channels, out_channels, self.modes1, self.modes2, 2))
self.weights2 = nn.Parameter(self.scale * torch.randn(in_channels, out_channels, self.modes1, self.modes2, 2))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x, norm='ortho')
# Multiply relevant Fourier modes
out_ft = torch.zeros_like(x_ft)
weights1 = torch.view_as_complex(self.weights1)
out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], weights1)
weights2 = torch.view_as_complex(self.weights2)
out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], weights2)
#Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)), norm='ortho')
return x
| fly-master | src/models/layers/spectral_conv.py |
import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import Linear, init
class MaskLinear(nn.Module):
r"""
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, topk_ratio: float = 0.1, window_size: int = 6,
stripes: int = 3, step = 1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(MaskLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
# Butterfly base
pseudo_mask_size = (min(in_features, out_features), max(in_features, out_features))
tmp_mask = torch.zeros(pseudo_mask_size)
stride = int(math.sqrt(pseudo_mask_size[0]))
d = math.ceil(pseudo_mask_size[1] / pseudo_mask_size[0])
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[patch_start + i:patch_start + i + window_size, i * d: i * d + step * d * window_size] = 1
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[i: i + window_size, (i + patch_start) * d: (patch_start + i) * d + step * d * window_size] = 1
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.register_buffer('sparse_mask_topk', torch.zeros_like(self.weight))
self.reset_parameters()
self.input = None
self.topk = math.ceil(in_features*out_features * topk_ratio)
self.saving = topk_ratio
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.weight.data = self.weight.data / math.sqrt(
torch.sum(self.sparse_mask) / (self.in_features * self.out_features))
def forward(self, input: Tensor) -> Tensor:
self.input = input
mask = (self.sparse_mask_topk.bool() | self.sparse_mask.bool()).int()
y = input @ ((mask * self.weight).t())
return y
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features is not None
)
def hook_fn(module, input, output):
tmp = output[0].float()
dense_grad = torch.einsum('bsd,bsm->bdm', tmp, module.input)
dense_grad = torch.sum(dense_grad, dim=0)
# print(torch.sum(dense_grad==module.weight.grad))
# BC: first try matrix wise topk
tmp = torch.abs(dense_grad).flatten()
_, idx = torch.topk(tmp, module.topk)
mask = torch.zeros_like(tmp, device=module.sparse_mask.device)
mask = mask.scatter(-1, idx, 1.)
mask = mask.reshape(dense_grad.shape)
module.sparse_mask_topk = mask
# print("replace mask")
class MaskLinearWrap(nn.Module):
""" Sanity check if topk work for activation
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True, topk_ratio: float = 0.1,
window_size: int = 6, stripes: int = 3, step = 1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(MaskLinearWrap, self).__init__()
self.fc = MaskLinear(in_features, out_features, topk_ratio=topk_ratio, window_size=window_size,
stripes=stripes, step=step, device=device, dtype=device)
self.fc.register_full_backward_hook(hook_fn)
self.saving = topk_ratio
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.fc.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
self.input = input
y = self.fc(input)+self.bias
return y
# Tests:
if __name__ == '__main__':
x = torch.randn((2, 10, 20), requires_grad=True)
layer = MaskLinearWrap(20, 40, topk_ratio=0.1, bias=True)
loss = 1-torch.sum(layer(x))
loss.backward()
print(torch.sum(layer.fc.sparse_mask), torch.sum(layer.fc.sparse_mask_topk))
| fly-master | src/models/layers/maskedlinear.py |
from typing import Union
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import Linear, init
from einops import rearrange
import hydra
from src.ops.low_rank import low_rank_project
from src.ops.blockdiag_butterfly_einsum import (
blockdiag_butterfly_multiply_einsum, blockdiag_butterfly_project_einsum
)
from src.utils.utils import get_logger
logger = get_logger()
class BlockdiagButterflyLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, nblocks1: int = 4, nblocks2: int = 4,
b1: int = 48, b2: int = 1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.nblocks1 = nblocks1
self.nblocks2 = nblocks2
m, n = out_features, in_features
i = n//nblocks1
l = m//nblocks2
assert n == i * nblocks1
assert m == l * nblocks2
self.w1_bfly = Parameter(torch.empty((nblocks1, nblocks2*b1, i), **factory_kwargs))
self.w2_bfly = Parameter(torch.empty((nblocks2, l, nblocks1*b1), **factory_kwargs))
self.b1 = b1
self.b2 = b2
self.saving = ((torch.numel(self.w1_bfly)+torch.numel(self.w2_bfly)))/(self.in_features*self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def init_factors(self, weight):
self.w1_bfly.data, self.w2_bfly.data = blockdiag_butterfly_project_einsum(weight, nblocks1=self.nblocks1,
nblocks2=self.nblocks2, b1=self.b1, b2=self.b2)
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.w1_bfly, a=math.sqrt(5))
init.kaiming_uniform_(self.w2_bfly, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w1_bfly)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def preprocess(self, x):
return x.reshape(-1, x.shape[-1])
def postprocess(self, output, x_shape):
batch_shape = x_shape[:-1]
return output.reshape(batch_shape + (output.shape[-1],))
def forward(self, input: Tensor) -> Tensor:
x_shape = input.shape
output = blockdiag_butterfly_multiply_einsum(self.preprocess(input), self.w1_bfly, self.w2_bfly, self.b2)
output = self.postprocess(output, x_shape)
return (output + self.bias) if self.bias is not None else output
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class NinjaTurtleProjLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, repeat: int = 3, window_size: int = 6,
stripes: int = 3, step=1, gtoken=1, block_size=None, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
pseudo_mask_size = (min(in_features, out_features//repeat), max(in_features, out_features//repeat))
tmp_mask = torch.zeros(pseudo_mask_size)
stride = int(math.sqrt(pseudo_mask_size[0]))
d = pseudo_mask_size[1] / pseudo_mask_size[0]
if (math.ceil(d) - d) < 0.1:
d = math.ceil(d)
elif (d - math.floor(d)) < 0.1:
d = math.floor(d)
for k in range(stripes):
patch_start = stride * (2 ** k - 1)
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[patch_start + i:patch_start + i + window_size * step,
int(i * d): int(i * d + step * d * window_size)] = 1.
for k in range(stripes):
patch_start = stride * (2 ** k - 1)
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[i: i + window_size * step,
int((i + patch_start) * d): int((patch_start + i) * d + step * d * window_size)] = 1.
tmp_mask = generate_mask(tmp_mask, block_size)
tmp_mask[:, :gtoken] = 1.
tmp_mask[:gtoken, :] = 1.
if in_features <= out_features//repeat:
self.register_buffer('sparse_mask', tmp_mask.t().repeat(1, repeat))
else:
self.register_buffer('sparse_mask', tmp_mask.repeat(1, repeat))
self.saving = torch.sum(tmp_mask) / (self.in_features * self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.weight.data = self.weight.data / math.sqrt(
torch.sum(self.sparse_mask) / (self.in_features * self.out_features))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return input @ ((self.sparse_mask * self.weight).t()) + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class NinjaTurtleLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, window_size: int = 6,
stripes: int = 3, step=1, gtoken=1, block_size=None, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
pseudo_mask_size = (min(in_features, out_features), max(in_features, out_features))
tmp_mask = torch.zeros(pseudo_mask_size)
stride = int(math.sqrt(pseudo_mask_size[0]))
d = pseudo_mask_size[1] / pseudo_mask_size[0]
# BC if we want to use 32/16 blocks for speed, modify this
if (math.ceil(d) - d) < 0.1:
d = math.ceil(d)
elif (d - math.floor(d)) < 0.1:
d = math.floor(d)
for k in range(stripes):
patch_start = stride * (2 ** k - 1)
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[patch_start + i:patch_start + i + window_size * step,
int(i * d): int(i * d + step * d * window_size)] = 1.
for k in range(stripes):
patch_start = stride * (2 ** k - 1)
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[i: i + window_size * step,
int((i + patch_start) * d): int((patch_start + i) * d + step * d * window_size)] = 1.
tmp_mask = generate_mask(tmp_mask, block_size)
tmp_mask[:, :gtoken] = 1.
tmp_mask[:gtoken, :] = 1.
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.saving = torch.sum(tmp_mask) / (self.in_features * self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.weight.data = self.weight.data / math.sqrt(
torch.sum(self.sparse_mask) / (self.in_features * self.out_features))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return input @ ((self.sparse_mask * self.weight).t()) + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class ButterflyGlobalLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, window_size: int = 6,
stripes: int = 3, step=1, gtoken=1, block_size=None, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
pseudo_mask_size = (min(in_features, out_features), max(in_features, out_features))
tmp_mask = torch.zeros(pseudo_mask_size)
stride = int(math.sqrt(pseudo_mask_size[0]))
d = math.ceil(pseudo_mask_size[1] / pseudo_mask_size[0])
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[patch_start + i:patch_start + i + window_size, i * d: i * d + step * d * window_size] = 1.
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[i: i + window_size, (i + patch_start) * d: (patch_start + i) * d + step * d * window_size] = 1.
tmp_mask = generate_mask(tmp_mask, block_size)
tmp_mask[:, :gtoken] = 1.
tmp_mask[:gtoken, :] = 1.
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.saving = torch.sum(generate_mask(tmp_mask))/(self.in_features*self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.weight.data = self.weight.data/math.sqrt(torch.sum(self.sparse_mask)/(self.in_features*self.out_features))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return input @ ((self.sparse_mask*self.weight).t()) + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class NinjaTurtleSparsityConfig:
linear_cls = NinjaTurtleLinear
def __init__(self, **kwargs):
self.kwargs = kwargs
self.block = kwargs.pop('block')
def make_layout(self, out_features, in_features):
linear = self.linear_cls(in_features, out_features, bias=False, **self.kwargs)
layout = linear.sparse_mask
# Convert from (out_features, in_features) mask to
# (out_features // block_size, in_features // block_size) mask
layout = rearrange(layout, '(p blksz) (r blksz1) -> p r (blksz blksz1)',
blksz=self.block, blksz1=self.block)
return (layout > 0).any(dim=-1).int()
class ButterflyGlobalSparsityConfig(NinjaTurtleSparsityConfig):
linear_cls = ButterflyGlobalLinear
class TopkLrLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = False, rank_ratio: float = 0.1,
window_size: int = 6, topk_ratio: float = 0.1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.gate = Linear(in_features=in_features, out_features=1)
self.sparse = TopkLinear(in_features, out_features, window_size=window_size, topk_ratio=topk_ratio)
self.low_rank = FastLinear(in_features, out_features, rank_ratio=rank_ratio)
self.saving = self.sparse.saving + self.low_rank.saving
def forward(self, input: Tensor) -> Tensor:
g = self.gate(input.detach())
g = torch.sigmoid(g)
sparse_comp = self.sparse(input)
low_rank_comp = self.low_rank(input)
return g * sparse_comp + (1. - g) * low_rank_comp
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class TopkLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, window_size: int = 1,
topk_ratio: float = 0.1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.window_size = window_size
self.topk_ratio = topk_ratio
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
tmp_mask = self.reset_parameters()
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.saving = torch.sum(tmp_mask) / (self.in_features * self.out_features)
def init_mask(self, weight):
pseudo_mask_size = (min(self.in_features, self.out_features), max(self.in_features, self.out_features))
d = math.ceil(pseudo_mask_size[1] / pseudo_mask_size[0])
x = math.ceil(pseudo_mask_size[0] / self.window_size)
y = math.ceil(math.ceil(pseudo_mask_size[1] / d) / self.window_size)
blocks = x * y
topk_blocks = math.ceil(blocks * self.topk_ratio)
kernel = torch.nn.AvgPool2d((self.window_size, self.window_size * d),
stride=(self.window_size, self.window_size * d), ceil_mode=True)
if self.in_features <= self.out_features:
value, ind = torch.topk(kernel(torch.abs(weight.t()[None, None])).view(-1), k=topk_blocks)
else:
value, ind = torch.topk(kernel(torch.abs(weight[None, None])).view(-1), k=topk_blocks)
base = torch.zeros([blocks, 1], device=weight.device)
base[ind] = 1.
base = torch.repeat_interleave(base.view(x, y), self.window_size * d).view(x, y * self.window_size * d)
tmp_mask = torch.repeat_interleave(base, self.window_size, dim=0)
return tmp_mask[:pseudo_mask_size[0], :pseudo_mask_size[1]]
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
tmp_mask = self.init_mask(self.weight)
self.weight.data = self.weight.data / math.sqrt(torch.sum(tmp_mask) / (self.in_features * self.out_features))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
return tmp_mask
def forward(self, input: Tensor) -> Tensor:
tmp_mask = self.init_mask(self.weight)
if self.in_features <= self.out_features:
tmp_mask = tmp_mask.t()
return input @ ((tmp_mask * self.weight).t()) + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class SLXLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, rank_ratio: float = 0.1,
window_size: int = 6, stripes: int = 3, step=1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.gate = Linear(in_features=in_features, out_features=1)
self.rank = int(rank_ratio*min(in_features, out_features))
self.weight1 = Parameter(torch.empty((out_features, min(in_features, out_features)), **factory_kwargs))
self.weight2 = Parameter(torch.empty((min(in_features, out_features), in_features), **factory_kwargs))
pseudo_mask_size = (min(in_features, out_features), max(in_features, out_features))
tmp_mask = torch.zeros(pseudo_mask_size)
stride = int(math.sqrt(pseudo_mask_size[0]))
d = math.ceil(pseudo_mask_size[1] / pseudo_mask_size[0])
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[patch_start + i:patch_start + i + window_size, i * d: i * d + step * d * window_size] = 1.
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[i: i + window_size, (i + patch_start) * d: (patch_start + i) * d + step * d * window_size] = 1.
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.saving = ((self.in_features+self.out_features)*self.rank)/(self.in_features*self.out_features) \
+ torch.sum(tmp_mask)/(in_features*out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
def forward(self, input: Tensor) -> Tensor:
g = self.gate(input)
g = torch.sigmoid(g)
attn = torch.einsum("od,di->st", self.weight1, self.weight2)
attn = attn*self.sparse_mask
sparse_comp = input @ (attn.t())
low_rank_comp = input @ (self.weight2.t()[:, :self.rank]) @ (self.weight1.t()[:self.rank, :])
return g * sparse_comp + (1.-g) * low_rank_comp + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class SLLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = False, rank_ratio: float = 0.1,
window_size: int = 6, stripes: int = 3, step=1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.gate = Linear(in_features=in_features, out_features=1)
self.sparse = ButterflyLinear(in_features, out_features, window_size=window_size, stripes=stripes, step=step)
self.low_rank = FastLinear(in_features, out_features, rank_ratio=rank_ratio)
self.saving = self.sparse.saving + self.low_rank.saving
def forward(self, input: Tensor) -> Tensor:
g = self.gate(input)
g = torch.sigmoid(g)
sparse_comp = self.sparse(input)
low_rank_comp = self.low_rank(input)
return g * sparse_comp + (1.-g) * low_rank_comp
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
def generate_mask(base, block_size=None):
if block_size is not None:
num_r, num_c = base.shape
b_r, b_c = block_size
mask = torch.zeros(base.shape)
for i in range(0, num_r, b_r):
for j in range(0, num_c, b_c):
lighten = torch.sum(base[i:(i+b_r), j:(j+b_c)])
if lighten > 0.0:
mask[i:(i+b_r), j:(j+b_c)] = 1.
return mask
else:
return base
class RandomLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, window_size: int = 1,
sparse_ratio: float = 0.1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
pseudo_mask_size = (min(in_features, out_features), max(in_features, out_features))
tmp_mask = torch.zeros(pseudo_mask_size)
d = math.ceil(pseudo_mask_size[1] / pseudo_mask_size[0])
x = math.ceil(pseudo_mask_size[0] / window_size)
y = math.ceil(pseudo_mask_size[1] // d / window_size)
blocks = x * y
nnz_block = math.ceil(blocks * sparse_ratio)
ind = torch.randperm(blocks)[:nnz_block]
for k in range(nnz_block):
block_x = ind[k] // y
block_y = ind[k] % y
tmp_mask[block_x * window_size:(block_x + 1) * window_size,
block_y * window_size * d:(block_y + 1) * window_size * d] = 1.
tmp_mask = tmp_mask.view(pseudo_mask_size[0], pseudo_mask_size[1])
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.saving = torch.sum(tmp_mask) / (self.in_features * self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.weight.data = self.weight.data / math.sqrt(
torch.sum(self.sparse_mask) / (self.in_features * self.out_features))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return input @ ((self.sparse_mask * self.weight).t()) + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class ButterflyLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, window_size: int = 6, stripes: int = 3, step = 1,
block_size=None, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
pseudo_mask_size = (min(in_features, out_features), max(in_features, out_features))
tmp_mask = torch.zeros(pseudo_mask_size)
stride = int(math.sqrt(pseudo_mask_size[0]))
d = math.ceil(pseudo_mask_size[1] / pseudo_mask_size[0])
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[patch_start + i:patch_start + i + window_size, i * d: i * d + step * d * window_size] = 1.
for k in range(stripes):
patch_start = stride * k
for i in range(0, pseudo_mask_size[0], window_size):
tmp_mask[i: i + window_size, (i + patch_start) * d: (patch_start + i) * d + step * d * window_size] = 1.
tmp_mask = generate_mask(tmp_mask, block_size)
if in_features <= out_features:
self.register_buffer('sparse_mask', tmp_mask.t())
else:
self.register_buffer('sparse_mask', tmp_mask)
self.saving = torch.sum(generate_mask(tmp_mask))/(self.in_features*self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.weight.data = self.weight.data/math.sqrt(torch.sum(self.sparse_mask)/(self.in_features*self.out_features))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return input @ ((self.sparse_mask*self.weight).t()) + self.bias
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class FastLinear(nn.Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True, rank_ratio: float = 0.1,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.rank = int(rank_ratio*min(in_features, out_features))
self.low_rank1 = Parameter(torch.empty((in_features, self.rank), **factory_kwargs))
self.low_rank2 = Parameter(torch.empty((self.rank, out_features), **factory_kwargs))
self.saving = ((self.in_features+self.out_features)*self.rank)/(self.in_features*self.out_features)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def init_low_rank(self, weight):
u, s, v = torch.svd(weight.t())
self.low_rank1.data = u[:, :self.rank] @ torch.diag(s[:self.rank])
self.low_rank2.data = v[:, :self.rank].t()
assert torch.norm(weight.t() - u @ torch.diag(s) @ v.t()) < 0.01
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.low_rank1, a=math.sqrt(5))
init.kaiming_uniform_(self.low_rank2, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.low_rank1)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return (input @ self.low_rank1 @ self.low_rank2 + self.bias) if self.bias is not None else input @ self.low_rank1 @ self.low_rank2
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class LowRank(nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, rank: Union[int, float],
bias: bool=True, init='linear', weight_decay: bool = True,
device=None, dtype=None) -> None:
"""
weight_decay: whether to mark the low-rank weights as _no_weight_decay.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
if isinstance(rank, float):
rank = int(rank * min(in_features, out_features))
self.rank = rank
self.lr_weight1 = Parameter(torch.empty((self.rank, in_features), **factory_kwargs))
self.lr_weight2 = Parameter(torch.empty((out_features, self.rank), **factory_kwargs))
if init not in ['linear', 'svd']:
raise NotImplementedError(f'init {init} not supported')
self.init = init
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
if not weight_decay:
self.lr_weight1._no_weight_decay = True
self.lr_weight2._no_weight_decay = True
def reset_parameters(self) -> None:
with torch.no_grad():
if self.init == 'linear':
# Mimic torch.nn.Linear init
init.kaiming_uniform_(self.lr_weight1, a=math.sqrt(5))
init.kaiming_uniform_(self.lr_weight2, a=math.sqrt(5))
elif self.init == 'svd':
# Use spectral initialization as described in https://openreview.net/forum?id=KTlJT1nof6d
dense_init_fn_ = partial(init.kaiming_uniform_, a=math.sqrt(5))
self.set_weights_from_dense_init(dense_init_fn_)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.lr_weight1)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def set_weights_from_projection(self, weight):
U, Vt = low_rank_project(weight, rank=self.rank)
with torch.no_grad():
self.lr_weight1.copy_(Vt)
self.lr_weight2.copy_(U)
def set_weights_from_dense_init(self, dense_init_fn_):
dense_weight = torch.empty(self.out_features, self.in_features,
device=self.lr_weight1.device, dtype=self.lr_weight1.dtype)
dense_init_fn_(dense_weight)
self.set_weights_from_projection(dense_weight)
@property
def saving(self):
return ((self.in_features + self.out_features) * self.rank
/ (self.in_features * self.out_features))
def forward(self, input: Tensor) -> Tensor:
return F.linear(F.linear(input, self.lr_weight1), self.lr_weight2, self.bias)
class SparseLRLinear(nn.Module):
def __init__(self, in_features, out_features, sparse_cfg,
bias=True, rank: Union[int, float] = 0.1,
gating=True, checkpointing=False):
"""If rank is float (e.g., 0.1), treat it as rank ratio.
If rank is int (e.g., 32), treat it as rank.
gating: whether to use sigmoid gating, otherwise we simply average the sparse and low-rank
components.
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.sparse = hydra.utils.instantiate(sparse_cfg, in_features, out_features, bias=False,
_recursive_=False)
self.low_rank = LowRank(in_features, out_features, rank=rank, bias=False)
if gating:
self.gate = nn.Linear(in_features=in_features, out_features=1)
else:
self.register_parameter('gate', None)
self.checkpointing = checkpointing
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
logger.info(f'Linear class {self.__class__}: saving={self.saving}')
def reset_parameters(self) -> None:
if self.bias is not None:
fan_in = self.bias.shape[0]
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
@property
def saving(self):
return self.sparse.saving + self.low_rank.saving
def _multiply(self, x):
sparse_output = self.sparse(x)
low_rank_output = self.low_rank(x)
g = torch.sigmoid(self.gate(x)) if self.gate is not None else 0.5
# output = (1.0 - g) * sparse_output + g * low_rank_output
return torch.lerp(sparse_output, low_rank_output, g)
def forward(self, x):
if self.checkpointing:
output = torch.utils.checkpoint.checkpoint(self._multiply, x)
else:
output = self._multiply(x)
# Convert bias to output.dtype in case of AMP, otherwise bias and activation will be in FP32
return (output + self.bias.to(dtype=output.dtype)) if self.bias is not None else output
| fly-master | src/models/layers/fastlinear.py |
""" Image to Patch Embedding using Conv2d
A convolution based approach to patchifying a 2D image w/ embedding projection.
Based on the impl in https://github.com/google-research/vision_transformer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from itertools import repeat
import collections.abc
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/helpers.py
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
# Adapt from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/patch_embed.py
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
| fly-master | src/models/layers/patch_embed.py |
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class StructuredLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):
"""Subclasses should call reset_parameters
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
# Subclasses may override {in,out}_features_extended
if not hasattr(self, 'in_features_extended'):
self.in_features_extended = in_features
if not hasattr(self, 'out_features_extended'):
self.out_features_extended = out_features
if bias:
self.bias = nn.Parameter(torch.zeros(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
def reset_parameters(self) -> None:
self.set_weights_from_dense_init(dense_init_fn_=partial(init.kaiming_uniform_, a=math.sqrt(5)))
self.reset_parameters_bias()
def set_weights_from_dense_init(self, dense_init_fn_):
raise NotImplementedError
def reset_parameters_bias(self):
if self.bias is not None:
fan_in = self.bias.shape[-1]
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
@property
def saving(self):
raise NotImplementedError
def convert_to_dense_weight(self):
factory_kwargs = {'device': self.weight.device, 'dtype': self.weight.dtype}
dense_weight = self.forward_matmul(torch.eye(self.in_features, **factory_kwargs)).T
return dense_weight
def preprocess(self, x):
in_features = x.shape[-1]
if in_features < self.in_features_extended:
x = F.pad(x, (0, self.in_features_extended - in_features))
return x
def postprocess(self, output):
out_features_extended = output.shape[-1]
if out_features_extended > self.out_features:
output = output[..., :self.out_features]
return output
def forward_matmul(self, x):
raise NotImplementedError
def forward(self, x):
output = self.forward_matmul(x)
# Convert bias to output.dtype in case of AMP, otherwise bias and activation will be in FP32
return (output + self.bias.to(dtype=output.dtype)) if self.bias is not None else output
| fly-master | src/models/layers/structured_linear.py |
# Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/components/positional_embedding/rotary.py
# We split the input differently ((d 2) -> d 2 instead of (2 d) -> d 2), following the original
# paper's implementation. This should not matter.
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox
# NOTE: Almost the same right now, moving parts to Triton is the next step
from typing import Tuple
import torch
from einops import rearrange
def rotate_half(x):
# rearrange doesn't work with torch.jit
# x = rearrange(x, '... (d r) -> ... d r', r=2)
x = x.unflatten(dim=-1, sizes=(-1, 2))
x1, x2 = x.unbind(dim=-1)
rotated_x = torch.stack((-x2, x1), dim=-1)
# return rearrange(rotated_x, '... d r -> ... (d r)')
return rotated_x.flatten(start_dim=-2)
@torch.jit.script
def apply_rotary_pos_emb(x, cos, sin):
# NOTE: This could probably be moved to Triton
# Handle a possible sequence length mismatch in between q and k
cos = cos[:, :, : x.shape[-2], :]
sin = sin[:, :, : x.shape[-2], :]
return (x * cos) + (rotate_half(x) * sin)
class RotaryEmbedding(torch.nn.Module):
"""
The rotary position embeddings from RoFormer_ (Su et. al).
A crucial insight from the method is that the query and keys are
transformed by rotation matrices which depend on the relative positions.
Other implementations are available in the Rotary Transformer repo_ and in
GPT-NeoX_, GPT-NeoX was an inspiration
.. _RoFormer: https://arxiv.org/abs/2104.09864
.. _repo: https://github.com/ZhuiyiTechnology/roformer
.. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
.. warning: Please note that this embedding is not registered on purpose, as it is transformative
(it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis
"""
def __init__(self, dim_model: int, *_, **__):
super().__init__()
# Generate and save the inverse frequency buffer (non trainable)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
self.register_buffer("inv_freq", inv_freq)
self._seq_len_cached = None
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_tables(self, x, seq_dimension=1):
seq_len = x.shape[seq_dimension]
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
self._seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(
self.inv_freq
)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
emb = rearrange(torch.stack([freqs, freqs], dim=-1), '... d r -> ... (d r)', r=2)
self._cos_cached = emb.cos()[None, None, :, :]
self._sin_cached = emb.sin()[None, None, :, :]
return self._cos_cached, self._sin_cached
def forward(
self, q: torch.Tensor, k: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
k, seq_dimension=-2
)
return (
apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
)
| fly-master | src/models/layers/rotary.py |
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
T2T-ViT
"""
import math
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from timm.models.helpers import load_pretrained
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
from src.models.modules.t2t import T2T_module
from src.models.modules.seq_common import PositionalEncoding
from src.models.modules.vision_common import Block
from src.models.layers.fastlinear import LowRank, SparseLRLinear
from src.models.layers.blocksparse_linear import BlockSparseLinear
from src.models.layers.blockdiag_linear import BlockdiagLinear
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225),
'classifier': 'head',
**kwargs
}
default_cfgs = {
'T2t_vit_7': _cfg(),
'T2t_vit_10': _cfg(),
'T2t_vit_12': _cfg(),
'T2t_vit_14': _cfg(),
'T2t_vit_19': _cfg(),
'T2t_vit_24': _cfg(),
'T2t_vit_t_14': _cfg(),
'T2t_vit_t_19': _cfg(),
'T2t_vit_t_24': _cfg(),
'T2t_vit_14_resnext': _cfg(),
'T2t_vit_14_wide': _cfg(),
}
class T2T_ViT(nn.Module):
def __init__(self, img_size=224, tokens_type='performer', in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, token_dim=64,
t2tattn1_cfg=None, t2tattn2_cfg=None,
attnlinear_cfg=None, mlp_cfg=None):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = T2T_module(img_size=img_size, tokens_type=tokens_type, in_chans=in_chans,
embed_dim=embed_dim, token_dim=token_dim,
attn1_cfg=t2tattn1_cfg, attn2_cfg=t2tattn2_cfg)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = PositionalEncoding(embed_dim, max_len=num_patches + 1, batch_first=True,
dropout=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
attnlinear_cfg=attnlinear_cfg, mlp_cfg=mlp_cfg)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, BlockSparseLinear, BlockdiagLinear, LowRank, SparseLRLinear)):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
dense_init_fn_ = partial(trunc_normal_, std=.02)
if isinstance(m, nn.Linear):
dense_init_fn_(m.weight)
elif isinstance(m, (BlockSparseLinear, BlockdiagLinear, LowRank)):
m.set_weights_from_dense_init(dense_init_fn_)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_embed(x)
x = self.blocks(x)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def t2t_vit_7(pretrained=False, **kwargs): # adopt performer for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 256 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=256, depth=7, num_heads=4, mlp_ratio=2., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_7']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_10(pretrained=False, **kwargs): # adopt performer for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 256 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=256, depth=10, num_heads=4, mlp_ratio=2., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_10']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_12(pretrained=False, **kwargs): # adopt performer for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 256 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=256, depth=12, num_heads=4, mlp_ratio=2., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_12']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_14(pretrained=False, **kwargs): # adopt performer for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 384 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=384, depth=14, num_heads=6, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_14']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_19(pretrained=False, **kwargs): # adopt performer for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 448 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=448, depth=19, num_heads=7, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_19']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_24(pretrained=False, **kwargs): # adopt performer for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 512 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=512, depth=24, num_heads=8, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_24']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_t_14(pretrained=False, **kwargs): # adopt transformers for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 384 ** -0.5)
model = T2T_ViT(tokens_type='transformer', embed_dim=384, depth=14, num_heads=6, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_t_14']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_t_19(pretrained=False, **kwargs): # adopt transformers for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 448 ** -0.5)
model = T2T_ViT(tokens_type='transformer', embed_dim=448, depth=19, num_heads=7, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_t_19']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_t_24(pretrained=False, **kwargs): # adopt transformers for tokens to token
if pretrained:
kwargs.setdefault('qk_scale', 512 ** -0.5)
model = T2T_ViT(tokens_type='transformer', embed_dim=512, depth=24, num_heads=8, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_t_24']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
# rexnext and wide structure
@register_model
def t2t_vit_14_resnext(pretrained=False, **kwargs):
if pretrained:
kwargs.setdefault('qk_scale', 384 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=384, depth=14, num_heads=32, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_14_resnext']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
@register_model
def t2t_vit_14_wide(pretrained=False, **kwargs):
if pretrained:
kwargs.setdefault('qk_scale', 512 ** -0.5)
model = T2T_ViT(tokens_type='performer', embed_dim=768, depth=14, num_heads=12, mlp_ratio=3., **kwargs)
model.default_cfg = default_cfgs['T2t_vit_14_wide']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
| fly-master | src/models/vit/t2t_vit.py |
"""
The original Vision Transformer (ViT) from timm, copyright belongs to / Copyright 2020 Ross Wightman
"""
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from timm.models.layers import PatchEmbed, Mlp, trunc_normal_, lecun_normal_
from timm.models.registry import register_model
from src.models.modules.vision_common import Block
from src.models.layers.fastlinear import LowRank, SparseLRLinear
from src.models.layers.blocksparse_linear import BlockSparseLinear
from src.models.layers.blockdiag_linear import BlockdiagLinear
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'vit_small_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth',
),
# patch models (weights ported from official Google JAX impl)
'vit_base_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'vit_base_patch32_224': _cfg(
url='', # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_base_patch16_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
'vit_base_patch32_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
'vit_large_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch32_224': _cfg(
url='', # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch16_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
'vit_large_patch32_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
# patch models, imagenet21k (weights ported from official Google JAX impl)
'vit_base_patch16_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_base_patch32_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch16_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch32_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_huge_patch14_224_in21k': _cfg(
hf_hub='timm/vit_huge_patch14_224_in21k',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
# deit models (FB weights)
'vit_deit_tiny_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'),
'vit_deit_small_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'),
'vit_deit_base_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth',),
'vit_deit_base_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_deit_tiny_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
classifier=('head', 'head_dist')),
'vit_deit_small_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
classifier=('head', 'head_dist')),
'vit_deit_base_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
classifier=('head', 'head_dist')),
'vit_deit_base_distilled_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
input_size=(3, 384, 384), crop_pct=1.0, classifier=('head', 'head_dist')),
# ViT ImageNet-21K-P pretraining
'vit_base_patch16_224_miil_in21k': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221,
),
'vit_base_patch16_224_miil': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm'
'/vit_base_patch16_224_1k_miil_84_4.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear',
),
}
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init='',
attnlinear_cfg=None, mlp_cfg=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,
attnlinear_cfg=attnlinear_cfg, mlp_cfg=mlp_cfg)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(m, (nn.Linear, BlockSparseLinear, BlockdiagLinear, LowRank, SparseLRLinear)):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.constant_(m.bias, head_bias)
elif n.startswith('pre_logits'):
lecun_normal_(m.weight)
nn.init.zeros_(m.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
else:
if m.bias is not None:
nn.init.zeros_(m.bias)
dense_init_fn_ = partial(trunc_normal_, std=.02)
if isinstance(m, nn.Linear):
dense_init_fn_(m.weight)
elif isinstance(m, (BlockSparseLinear, BlockdiagLinear, LowRank)):
m.set_weights_from_dense_init(dense_init_fn_)
elif jax_impl and isinstance(m, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
_logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed, getattr(model, 'num_tokens', 1),
model.patch_embed.grid_size)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. embed_dim=768, depth=8, num_heads=8, mlp_ratio=3.
NOTE:
* this differs from the DeiT based 'small' definitions with embed_dim=384, depth=12, num_heads=6
* this model does not have a bias for QKV (unlike the official ViT and DeiT models)
"""
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=8, num_heads=8, mlp_ratio=3.,
qkv_bias=False, norm_layer=nn.LayerNorm, **kwargs)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault('qk_scale', 768 ** -0.5)
model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs)
model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_deit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_deit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_deit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
'vit_deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
'vit_deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
'vit_deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
'vit_deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_miil(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs)
return model
| fly-master | src/models/vit/vit.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_normal_, xavier_uniform_
from torch.nn.init import constant_
from torch.nn.parameter import Parameter
from torch import Tensor
from typing import Tuple, List, Optional
from einops import rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.mask_utils import pad_mask
# Adapted from https://pytorch.org/docs/stable/_modules/torch/nn/modules/activation.html#MultiheadAttention
# https://github.com/pytorch/pytorch/blob/release/1.9/torch/nn/modules/activation.py
class MultiheadAttention(nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
"""TD: We have a different interpretation of kdim and vdim compared to Pytorch.
To be fair the Pytorch's interpretation is very confusing and the docs is unclear as well.
https://github.com/pytorch/pytorch/issues/60831
https://github.com/pytorch/pytorch/pull/61977/files
Here we use the interpretation from the original "Attention is all you need" paper.
query, key, value all have last dimension embed_dim.
They are then projected to dimension kdim, kdim, vdim.
"""
__constants__ = ['batch_first']
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False, shared_qk=False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_dim = self.kdim == self.vdim
self.num_heads = num_heads
self.batch_first = batch_first
assert self.kdim % num_heads == 0, "self.kdim must be divisible by num_heads"
assert self.vdim % num_heads == 0, "self.vdim must be divisible by num_heads"
self.shared_qk = shared_qk
if self._qkv_same_dim is False:
self.q_proj_weight = Parameter(torch.empty((self.kdim, embed_dim), **factory_kwargs))
if not shared_qk:
self.k_proj_weight = Parameter(torch.empty((self.kdim, embed_dim), **factory_kwargs))
else:
self.register_parameter('k_proj_weight', None)
self.v_proj_weight = Parameter(torch.empty((self.vdim, embed_dim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(((3 if not shared_qk else 2) * self.kdim,
embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty((2 if not shared_qk else 1) * self.kdim
+ self.vdim, **factory_kwargs))
else:
self.register_parameter('in_proj_bias', None)
self.in_proj_container = InProjContainer(self.q_proj_weight, self.k_proj_weight,
self.v_proj_weight, self.in_proj_weight,
self.in_proj_bias, shared_qk=self.shared_qk)
self.out_proj = nn.Linear(self.vdim, embed_dim, bias=bias, **factory_kwargs)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, self.kdim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, self.vdim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
if self.k_proj_weight is not None:
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(self, attention_layer: nn.Module, query: Tensor, key: Tensor, value: Tensor,
key_padding_mask: Optional[Tensor] = None, need_weights: bool = False,
attn_mask: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shapes for inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: if a 2D mask: :math:`(L, S)` where L is the target sequence length, S is the
source sequence length.
If a 3D mask: :math:`(N\cdot\text{num\_heads}, L, S)` where N is the batch size, L is the target sequence
length, S is the source sequence length. ``attn_mask`` ensure that position i is allowed to attend
the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
Shapes for outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
use_separate_proj_weight = not self._qkv_same_dim
num_heads = self.num_heads
# set up shape vars
if self.batch_first:
bsz, tgt_len, embed_dim = query.shape
_, src_len, _ = key.shape
else:
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert embed_dim == self.embed_dim, \
f"was expecting embedding dimension of {self.embed_dim}, but got {embed_dim}"
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
q, k, v = self.in_proj_container(query, key, value)
# TD: We want to do this transposition after the in_proj_container, because that projection
# checks if q is k and k is v, and if so it can group some matmuls together for speed.
if self.batch_first:
q, k, v = [rearrange(x, 'b s ... -> s b ...') for x in (q, k, v)]
# prep attention mask
if attn_mask is not None:
assert isinstance(attn_mask, (FullMask, LengthMask))
if isinstance(attn_mask, FullMask):
correct_shape = (tgt_len, src_len)
if attn_mask.bool_matrix.shape != correct_shape:
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.bool_matrix.shape}, but should be {correct_shape}.")
elif isinstance(attn_mask, LengthMask):
correct_shape = (tgt_len,)
if attn_mask._lengths.shape != (tgt_len,):
raise RuntimeError(f"The length of the 2D attn_mask is {attn_mask._lengths.shape}, but should be {correct_shape}.")
# add bias along batch dimension (currently second)
if self.bias_k is not None and self.bias_v is not None:
# Pytorch's implementation puts k first and the bias after.
# We put the bias first because our key_padding_mask needs to be consecutive.
# We don't want True True False ... False True
k = torch.cat([self.bias_k.repeat(1, bsz, 1), k])
v = torch.cat([self.bias_v.repeat(1, bsz, 1), v])
if attn_mask is not None:
attn_mask = pad_mask(attn_mask, 1, left=True)
if key_padding_mask is not None:
key_padding_mask = pad_mask(key_padding_mask, 1, left=True)
else:
assert self.bias_k is None
assert self.bias_v is None
# add zero attention along batch dimension
if self.add_zero_attn:
zero_attn_shape_k = (1, bsz, self.kdim)
zero_attn_shape_v = (1, bsz, self.vdim)
# Pytorch's implementation puts k first and the zeros after.
# We put the zeros first because our key_padding_mask needs to be consecutive.
# We don't want True True False ... False True
k = torch.cat([torch.zeros(zero_attn_shape_k, dtype=k.dtype, device=k.device), k],
dim=0)
v = torch.cat([torch.zeros(zero_attn_shape_v, dtype=v.dtype, device=v.device), v],
dim=0)
if attn_mask is not None:
attn_mask = pad_mask(attn_mask, 1, left=True)
if key_padding_mask is not None:
key_padding_mask = pad_mask(key_padding_mask, 1, left=True)
#
# reshape q, k, v for multihead attention and make em batch first
#
q, k, v = [rearrange(x, 't b (n_head head_dim) -> b t n_head head_dim',
n_head=self.num_heads) for x in (q, k, v)]
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = attention_layer(q, k, v,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=need_weights)
attn_output = rearrange(attn_output, 'b t h d -> b t (h d)')
attn_output = self.out_proj(attn_output)
if not self.batch_first:
attn_output = rearrange(attn_output, 'b t e -> t b e')
return attn_output, attn_output_weights if need_weights else None
class InProjContainer(torch.nn.Module):
def __init__(self, q_proj_weight, k_proj_weight, v_proj_weight, in_proj_weight, in_proj_bias,
shared_qk=False):
r"""A in-proj container to project query/key/value in MultiheadAttention. This module happens before reshaping
the projected query/key/value into multiple heads. See the linear layers (bottom) of Multi-head Attention in
Fig 2 of Attention Is All You Need paper. Also check the usage example
in torchtext.nn.MultiheadAttentionContainer.
Args:
q_proj_weight: a proj layer for query. A typical projection layer is torch.nn.Linear.
k_proj_weight: a proj layer for key. A typical projection layer is torch.nn.Linear.
v_proj_weight: a proj layer for value. A typical projection layer is torch.nn.Linear.
"""
super().__init__()
self.q_proj_weight = q_proj_weight
self.k_proj_weight = k_proj_weight
self.v_proj_weight = v_proj_weight
self.in_proj_weight = in_proj_weight
self.in_proj_bias = in_proj_bias
self.packed_weight = in_proj_weight is not None
self.shared_qk = shared_qk
def forward(self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Projects the input sequences using in-proj layers. q/k/v are simply passed to
the forward func of q/k/value_proj, respectively.
Args:
q (Tensor): The q to be projected.
k (Tensor): The keys to be projected.
v (Tensor): The values to be projected.
Examples::
>>> import torch
>>> from torchtext.nn import InProjContainer
>>> embed_dim, bsz = 10, 64
>>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim),
torch.nn.Linear(embed_dim, embed_dim),
torch.nn.Linear(embed_dim, embed_dim))
>>> q = torch.rand((5, bsz, embed_dim))
>>> k = v = torch.rand((6, bsz, embed_dim))
>>> q, k, v = in_proj_container(q, k, v)
"""
if self.packed_weight:
if not self.shared_qk:
return _in_projection_packed(q, k, v, self.in_proj_weight, self.in_proj_bias)
else:
E = self.in_proj_weight.shape[0] // 2
w, b = self.in_proj_weight, self.in_proj_bias
if k is v:
if q is k:
# self-attention
qk_projected, v_projected = F.linear(q, w, b).chunk(2, dim=-1)
return qk_projected, qk_projected, v_projected
else:
# encoder-decoder attention
w_q, _ = w.chunk(2)
w_kv = w
if b is None:
b_q = b_kv = None
else:
b_q, _ = b.chunk(2)
b_kv = b
return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
else:
w_qk, w_v = w.chunk(2)
if b is None:
b_qk = b_v = None
else:
b_qk, b_v = b.chunk(2)
return F.linear(q, w_qk, b_qk), F.linear(k, w_qk, b_qk), F.linear(v, w_v, b_v)
else:
w_q = self.q_proj_weight
w_k = self.k_proj_weight if not self.shared_qk else self.q_proj_weight
w_v = self.v_proj_weight
assert w_q is not None, "use_separate_proj_weight is False but q_proj_weight is None"
assert w_k is not None, "use_separate_proj_weight is False but k_proj_weight is None"
assert w_v is not None, "use_separate_proj_weight is False but v_proj_weight is None"
if self.in_proj_bias is None:
b_q = b_k = b_v = None
else:
kdim, vdim = self.q_proj_weight.shape[0], self.v_proj_weight.shape[0]
if not self.shared_qk:
b_q, b_k, b_v = self.in_proj_bias.split([kdim, kdim, vdim])
else:
b_q, b_v = self.in_proj_bias.split([kdim, vdim])
b_k = b_q
return _in_projection(q, k, v, w_q, w_k, w_v, b_q, b_k, b_v)
# Copied from https://github.com/pytorch/pytorch/blob/release/1.9/torch/nn/functional.py#L4836
def _in_projection_packed(
q: Tensor,
k: Tensor,
v: Tensor,
w: Tensor,
b: Optional[Tensor] = None,
) -> List[Tensor]:
r"""
Performs the in-projection step of the attention operation, using packed weights.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected. For self-attention,
these are typically the same tensor; for encoder-decoder attention,
k and v are typically the same tensor. (We take advantage of these
identities for performance if they are present.) Regardless, q, k and v
must share a common embedding dimension; otherwise their shapes may vary.
w: projection weights for q, k and v, packed into a single tensor. Weights
are packed along dimension 0, in q, k, v order.
b: optional projection biases for q, k and v, packed into a single tensor
in q, k, v order.
Shape:
Inputs:
- q: :math:`(..., E)` where E is the embedding dimension
- k: :math:`(..., E)` where E is the embedding dimension
- v: :math:`(..., E)` where E is the embedding dimension
- w: :math:`(E * 3, E)` where E is the embedding dimension
- b: :math:`E * 3` where E is the embedding dimension
Output:
- in output list :math:`[q', k', v']`, each output tensor will have the
same shape as the corresponding input tensor.
"""
E = w.shape[0] // 3
if k is v:
if q is k:
# self-attention
return F.linear(q, w, b).chunk(3, dim=-1)
else:
# encoder-decoder attention
w_q, w_kv = w.split([E, E * 2])
if b is None:
b_q = b_kv = None
else:
b_q, b_kv = b.split([E, E * 2])
return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
else:
w_q, w_k, w_v = w.chunk(3)
if b is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = b.chunk(3)
return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
def _in_projection(
q: Tensor,
k: Tensor,
v: Tensor,
w_q: Tensor,
w_k: Tensor,
w_v: Tensor,
b_q: Optional[Tensor] = None,
b_k: Optional[Tensor] = None,
b_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""
Performs the in-projection step of the attention operation. This is simply
a triple of linear projections, with shape constraints on the weights which
ensure embedding dimension uniformity in the projected outputs.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected.
w_q, w_k, w_v: weights for q, k and v, respectively.
b_q, b_k, b_v: optional biases for q, k and v, respectively.
Shape:
Inputs:
- q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
number of leading dimensions.
- k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
number of leading dimensions.
- v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
number of leading dimensions.
- w_q: :math:`(Eq, Eq)`
- w_k: :math:`(Eq, Ek)`
- w_v: :math:`(Eq, Ev)`
- b_q: :math:`(Eq)`
- b_k: :math:`(Eq)`
- b_v: :math:`(Eq)`
Output: in output triple :math:`(q', k', v')`,
- q': :math:`[Qdims..., Eq]`
- k': :math:`[Kdims..., Eq]`
- v': :math:`[Vdims..., Eq]`
"""
Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
assert Eq == Ek == Ev, 'query, key, and value must have the same dimension'
qdim, kdim, vdim = w_q.shape[0], w_k.shape[0], w_v.shape[0]
assert qdim == kdim, 'query and key must be projected to the same dimension'
assert w_q.shape == (qdim, Eq), f"expecting query weights shape of {(qdim, Eq)}, but got {w_q.shape}"
assert w_k.shape == (kdim, Ek), f"expecting key weights shape of {(kdim, Ek)}, but got {w_k.shape}"
assert w_v.shape == (vdim, Ev), f"expecting value weights shape of {(vdim, Ev)}, but got {w_v.shape}"
assert b_q is None or b_q.shape == (qdim,), f"expecting query bias shape of {(qdim,)}, but got {b_q.shape}"
assert b_k is None or b_k.shape == (kdim,), f"expecting key bias shape of {(kdim,)}, but got {b_k.shape}"
assert b_v is None or b_v.shape == (vdim,), f"expecting value bias shape of {(vdim,)}, but got {b_v.shape}"
return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
def _scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, Tensor]:
r"""
Computes scaled dot product attention on query, key and value tensors, using
an optional attention mask if passed, and applying dropout if a probability
greater than 0.0 is specified.
Returns a tensor pair containing attended values and attention weights.
Args:
q, k, v: query, key and value tensors. See Shape section for shape details.
attn_mask: optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(B, Nt, E)` where B is batch size, Nt is the target sequence length,
and E is embedding dimension.
- key: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- value: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- attn_mask: either a 3D tensor of shape :math:`(B, Nt, Ns)` or a 2D tensor of
shape :math:`(Nt, Ns)`.
- Output: attention values have shape :math:`(B, Nt, E)`; attention weights
have shape :math:`(B, Nt, Ns)`
"""
B, Nt, E = q.shape
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
attn = torch.bmm(q, k.transpose(-2, -1))
if attn_mask is not None:
attn += attn_mask
attn = torch.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = F.dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output, attn
| fly-master | src/models/modules/multihead_attention.py |
""" Standalone version of Structured (Sequence) State Space (S4) model. """
import logging
from functools import partial
import math
import numpy as np
from scipy import special as ss
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as U
from pytorch_lightning.utilities import rank_zero_only
from einops import rearrange, repeat
from omegaconf import DictConfig
import opt_einsum as oe
contract = oe.contract
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
log = get_logger(__name__)
""" Cauchy kernel """
try: # Try CUDA extension
from csrc.cauchy.cauchy import cauchy_mult
has_cauchy_extension = True
except:
log.warn(
"CUDA extension for cauchy multiplication not found. Install by going to extensions/cauchy/ and running `python setup.py install`. This should speed up end-to-end training by 10-50%"
)
has_cauchy_extension = False
try: # Try pykeops
import pykeops
from pykeops.torch import Genred
except ImportError:
if not has_cauchy_extension:
log.error(
"Install at least one of pykeops or the cauchy_mult extension."
)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def cauchy_conj(v, z, w, num=2, denom=2):
""" Pykeops version """
if num == 1:
expr_num = 'z * ComplexReal(v) - Real2Complex(ComplexReal(v)*ComplexReal(w) + ComplexImag(v)*ComplexImag(w))'
elif num == 2:
expr_num = 'z * ComplexReal(v) - Real2Complex(Sum(v * w))'
else: raise NotImplementedError
if denom == 1:
expr_denom = 'ComplexMult(z-Real2Complex(ComplexReal(w)), z-Real2Complex(ComplexReal(w))) + Real2Complex(Square(ComplexImag(w)))'
elif denom == 2:
expr_denom = 'ComplexMult(z-w, z-Conj(w))'
else: raise NotImplementedError
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
# expr_num,
# expr_denom,
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
dtype='float32' if v.dtype == torch.cfloat else 'float64',
)
v, z, w = _broadcast_dims(v, z, w)
v = torch.view_as_real(v)
z = torch.view_as_real(z)
w = torch.view_as_real(w)
r = 2*cauchy_mult(v, z, w, backend='GPU')
return torch.view_as_complex(r)
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
""" simple nn.Module components """
def Activation(activation=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear', 'modrelu' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
class TransposedLinear(nn.Module):
""" Linear module on the second-to-last dimension """
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output, 1))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
else:
self.bias = 0.0
def forward(self, x):
return contract('... u l, v u -> ... v l', x, self.weight) + self.bias
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
""" Returns a linear nn.Module with control over axes order, initialization, and activation """
# Construct core module
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu': d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, dim=-2 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
""" Misc functional utilities """
def krylov(L, A, b, c=None, return_power=False):
"""
Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
_x = A_ @ _x
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
assert x.shape[-1] == L
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
def power(L, A, v=None):
""" Compute A^L and the scan sum_i A^i v_i
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
""" HiPPO utilities """
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
elif measure == 'tlagt':
# beta = 1 corresponds to no tilt
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
elif measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
else:
raise NotImplementedError
return A, B
def rank_correction(measure, N, rank=1, dtype=torch.float):
""" Return low-rank matrix L such that A + L is normal """
if measure == 'legs':
assert rank >= 1
p = torch.sqrt(.5+torch.arange(N, dtype=dtype)).unsqueeze(0) # (1 N)
elif measure == 'legt':
assert rank >= 2
p = torch.sqrt(1+2*torch.arange(N, dtype=dtype)) # (N)
p0 = p.clone()
p0[0::2] = 0.
p1 = p.clone()
p1[1::2] = 0.
p = torch.stack([p0, p1], dim=0) # (2 N)
elif measure == 'lagt':
assert rank >= 1
p = .5**.5 * torch.ones(1, N, dtype=dtype)
else: raise NotImplementedError
d = p.size(0)
if rank > d:
p = torch.stack([p, torch.zeros(N, dtype=dtype).repeat(rank-d, d)], dim=0) # (rank N)
return p
def nplr(measure, N, rank=1, dtype=torch.float):
""" Return w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
"""
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
p = rank_correction(measure, N, rank=rank, dtype=dtype)
Ap = A + torch.sum(p.unsqueeze(-2)*p.unsqueeze(-1), dim=-3)
w, V = torch.linalg.eig(Ap) # (..., N) (..., N, N)
# V w V^{-1} = A
# Only keep one of the conjugate pairs
w = w[..., 0::2].contiguous()
V = V[..., 0::2].contiguous()
V_inv = V.conj().transpose(-1, -2)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
p = contract('ij, ...j -> ...i', V_inv, p.to(V)) # V^* p
return w, p, p, B, V
""" Final S4 Module, and simplified but slower version for testing/exposition """
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, trainable=0, lr=None, wd=None, repeat=1):
"""Utility method: register a tensor as a buffer or trainable parameter"""
if trainable == 0:
self.register_buffer(name, tensor)
elif trainable == 1:
self.register_parameter(name, nn.Parameter(tensor))
elif trainable == 2:
tensor = tensor.repeat(repeat, *(1,) * len(tensor.shape))
self.register_parameter(name, nn.Parameter(tensor))
else:
raise NotImplementedError
optim = {}
if trainable and lr is not None:
optim["lr"] = lr
# setattr(getattr(self, name), '_lr', lr)
if trainable and wd is not None:
optim["weight_decay"] = wd
# setattr(getattr(self, name), '_wd', wd)
if len(optim) > 0:
setattr(getattr(self, name), "_optim", optim)
class SSKernelNPLR(OptimModule):
"""Stores a representation of and computes the SSKernel function K_L(A^dt, B^dt, C) corresponding to a discretized state space, where A is Normal + Low Rank (NPLR)
The class name stands for 'State-Space SSKernel for Normal Plus Low-Rank'.
The parameters of this function are as follows.
A: (... N N) the state matrix
B: (... N) input matrix
C: (... N) output matrix
dt: (...) timescales / discretization step size
p, q: (... P N) low-rank correction to A, such that Ap=A+pq^T is a normal matrix
The forward pass of this Module returns:
(... L) that represents represents FFT SSKernel_L(A^dt, B^dt, C)
"""
@torch.no_grad()
def _process_C(self, L, double_length=False):
C = torch.view_as_complex(self.C)
self._setup(setup_C=False)
dA = self.dA
dA_L = power(L, dA)
# I = torch.eye(dA.size(-1)).to(dA)
N = C.size(-1)
# Multiply C by I - dA_L
C_ = C[..., 0, :]
C_ = torch.cat([C_, C_.conj()], dim=-1)
prod = contract("... m n, ... n -> ... m", dA_L.conj().transpose(-1, -2), C_)
if double_length: # Multiply by I + dA_L instead
C_ = C_ + prod
else:
C_ = C_ - prod
C_ = C_[..., :N]
self.C[..., 0, :, :].copy_(torch.view_as_real(C_))
def _nodes(self, L, dtype, device):
# Cache FFT nodes and their "unprocessed" them with the bilinear transform
# nodes = torch.tensor(np.exp(-2j * np.pi / (L)), dtype=torch.cfloat, device=Ap.device) # \omega_{2L}
nodes = torch.tensor(
np.exp(-2j * np.pi / (L)), dtype=dtype, device=device
) # \omega_{2L}
nodes = nodes ** torch.arange(0, L // 2 + 1, device=device)
z = 2 * (1 - nodes) / (1 + nodes)
return nodes, z
def __init__(
self,
L,
w,
p,
q,
B,
C,
log_dt,
trainable=None,
lr=None,
setup_C=False,
keops=False,
):
"""Optim arguments into a representation. This occurs after init so that these operations can occur after moving model to device
L: Maximum length; this module computes SSKernel function of length L
A: (..., N, N) represented by diag(w) - pq^*
B: (..., N)
C: (..., N)
dt: (...)
p: (..., N) low-rank correction to A
q: (..., N)
"""
super().__init__()
self.keops = keops
# Rank of low-rank correction
assert p.shape[-2] == q.shape[-2]
self.rank = p.shape[-2]
self.L = L
# Augment B and C with low rank correction
B = B.unsqueeze(-2) # (..., 1, N)
C = C.unsqueeze(-2) # (..., 1, N)
if len(B.shape) > len(p.shape):
p = p.repeat(B.shape[:-2] + (1, 1))
B = torch.cat([B, p], dim=-2)
if len(C.shape) > len(q.shape):
q = q.repeat(C.shape[:-2] + (1, 1))
C = torch.cat([C, q], dim=-2)
if L is not None:
nodes, z = self._nodes(L, dtype=w.dtype, device=w.device)
self.register_buffer("nodes", torch.view_as_real(nodes))
self.register_buffer("z", torch.view_as_real(z))
# Register parameters
if trainable is None:
trainable = DictConfig({"A": 0, "B": 0, "C": 0, "dt": 0})
if lr is None:
lr = DictConfig({"A": None, "B": None, "C": None, "dt": None})
repeat = C.size(0)
self.register("log_dt", log_dt, trainable.dt, lr.dt, 0.0)
self.register("w", torch.view_as_real(w), trainable.A, lr.A, 0.0, repeat=repeat)
self.register("B", torch.view_as_real(B), trainable.B, lr.B, 0.0, repeat=repeat)
self.register("C", torch.view_as_real(C), trainable.C, lr.C)
if setup_C:
self._process_C(L)
def forward(self, state=None, rate=1.0, L=None):
"""
state: (..., s, N) extra tensor that augments B
rate: sampling rate factor
"""
# if L is not None: raise NotImplementedError
# TODO: handle potential length doubling logic so that max_len doesn't need to be passed in
while rate == 1.0 and L > self.L:
log.info(f"s4: Doubling length from L = {self.L} to {2*self.L}")
self.double_length()
if L is None:
L = self.L
if rate == 1.0:
L = self.L
else:
rate = self.L / L
dt = torch.exp(self.log_dt) * rate
B = torch.view_as_complex(self.B)
C = torch.view_as_complex(self.C)
w = torch.view_as_complex(self.w) # (..., N)
# z = torch.view_as_complex(self.z) # (..., L)
# TODO adjust based on rate times normal max length
if L == self.L:
nodes = torch.view_as_complex(self.nodes)
z = torch.view_as_complex(self.z) # (..., L)
else:
nodes, z = self._nodes(L, dtype=w.dtype, device=w.device)
# Augment B
if state is not None: # TODO have not updated
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute (I + dt/2 A) @ state
s = state.transpose(0, 1) # (H B N)
p = B[..., 1:, :] # (... r N)
q = C[..., 1:, :] # (... r N)
# Calculate contract('... s n, ... r n, ... r m -> ... s m', sV, qV.conj(), pV), but take care of conjugate symmetry
sA = (
s * w.unsqueeze(-2)
- (2 + 0j) * (s @ q.conj().transpose(-1, -2)).real @ p
)
s = s / dt.unsqueeze(-1).unsqueeze(-1) + sA / 2
B = torch.cat([s, B], dim=-2) # (..., 2+s, N)
# Incorporate dt into A
w = w * dt.unsqueeze(-1) # (... N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-2).conj() # (..., 2, 2, N)
w = w[..., None, None, :] # (..., 1, 1, N)
z = z[..., None, None, :] # (..., 1, 1, L)
# Calculate resolvent at nodes
if not self.keops and has_cauchy_extension:
r = cauchy_mult(v, z, w)
else:
r = cauchy_conj(v, z, w)
r = r * dt[..., None, None, None] # (..., 1+r, 1+r, L)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[..., :-1, :-1, :] - r[..., :-1, -1:, :] * r[..., -1:, :-1, :] / (
1 + r[..., -1:, -1:, :]
)
elif self.rank == 2:
r00 = r[..., : -self.rank, : -self.rank, :]
r01 = r[..., : -self.rank, -self.rank :, :]
r10 = r[..., -self.rank :, : -self.rank, :]
r11 = r[..., -self.rank :, -self.rank :, :]
det = (1 + r11[..., :1, :1, :]) * (1 + r11[..., 1:, 1:, :]) - r11[
..., :1, 1:, :
] * r11[..., 1:, :1, :]
s = (
r01[..., :, :1, :] * (1 + r11[..., 1:, 1:, :]) * r10[..., :1, :, :]
+ r01[..., :, 1:, :] * (1 + r11[..., :1, :1, :]) * r10[..., 1:, :, :]
- r01[..., :, :1, :] * (r11[..., :1, 1:, :]) * r10[..., 1:, :, :]
- r01[..., :, 1:, :] * (r11[..., 1:, :1, :]) * r10[..., :1, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[..., : -self.rank, : -self.rank, :]
r01 = r[..., : -self.rank, -self.rank :, :]
r10 = r[..., -self.rank :, : -self.rank, :]
r11 = r[..., -self.rank :, -self.rank :, :]
r11 = rearrange(r11, "... a b n -> ... n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "... n a b -> ... a b n")
k_f = r00 - torch.einsum(
"... i j n, ... j k n, ... k l n -> ... i l n", r01, r11, r10
)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + nodes)
k = torch.fft.irfft(k_f) # (..., 1, 1+s, L)
if state is not None:
k_state = k[..., 0, :-1, :] # (..., s, L)
k_state = k_state.transpose(0, 1)
k_B = k[..., 0, -1, :] # (..., L)
return k_B.to(torch.float), k_state.to(torch.float)
else:
return k.squeeze(-2).squeeze(-2).to(torch.float)
@torch.no_grad()
def double_length(self):
self._process_C(self.L, double_length=True)
self.L *= 2
dtype = torch.view_as_complex(self.w).dtype
nodes, z = self._nodes(self.L, dtype=dtype, device=self.w.device)
self.register_buffer("nodes", torch.view_as_real(nodes))
self.register_buffer("z", torch.view_as_real(z))
@torch.no_grad()
def _check(self):
"""Check if A, B, C parameters and vanilla SSKernel construction can be recovered"""
self._setup(setup_C=True)
K = krylov(self.L, self.dA, self.dB, self.dC.conj())
diff = K - self.forward()
print("checking SSKernel construction", torch.sum(diff ** 2))
def _setup(self, setup_C=True):
w = _conj(torch.view_as_complex(self.w))
B = _conj(torch.view_as_complex(self.B))
C = _conj(torch.view_as_complex(self.C))
C = C.conj()
p = B[..., -1, :]
q = C[..., -1, :]
B = B[..., 0, :]
C = C[..., 0, :]
dt = torch.exp(self.log_dt)
d = (2.0 / dt.unsqueeze(-1) - w).reciprocal() # (H, N)
r = (1 + contract("... n, ... n, ... n -> ...", q, d, p)).reciprocal()
# A_f = torch.diag_embed(2./dt[:, None] + w) - contract('... n, ... m -> ... n m', p, q)
# A_b = torch.diag_embed(d) - contract('... p, ... p, ..., ... q, ... q -> ... p q', d, p, r, q, d)
# dA = A_b @ A_f
self.step_params = {
"d": d,
"r": r.unsqueeze(-1) * d * q,
# 'r': r,
"p": p,
"q": q,
"B": B,
"d1": 2.0 / dt.unsqueeze(-1) + w,
}
N = d.size(-1)
H = dt.size(-1)
state = torch.eye(N, dtype=w.dtype, device=w.device).unsqueeze(-2)
u = w.new_zeros(H)
dA = self.step_state_linear(u, state)
dA = rearrange(dA, "n h m -> h m n")
self.dA = dA
u = w.new_ones(H)
state = w.new_zeros(N // 2)
dB = self.step_state_linear(u, state)
dB = _conj(dB)
self.dB = dB
if setup_C:
dA_L = power(self.L, dA)
I = torch.eye(dA.size(-1)).to(dA)
dC = torch.linalg.solve(
I - dA_L.transpose(-1, -2).conj(), C.conj().unsqueeze(-1)
).squeeze(-1)
self.dC = dC
def step_state_linear(self, u=None, state=None):
"""Version of the step function that has time O(N) instead of O(N^2) per step. Unfortunately, as currently implemented it's about 2x slower because it calls several sequential operations. Perhaps a fused CUDA kernel implementation would be much faster"""
N = self.step_params["d"].size(-1)
H = self.log_dt.size(-1)
if u is None:
u = torch.zeros(H, dtype=torch.float, device=self.log_dt.device)
if state is None:
state = torch.zeros(H, N, dtype=torch.cfloat, device=self.log_dt.device)
conj = state.size(-1) != N
step_params = self.step_params.copy()
if conj:
assert state.size(-1) == N // 2
step_params = {k: v[..., : N // 2] for k, v in step_params.items()}
d1 = step_params["d1"] # (H N)
p = step_params["p"] # (H N)
q = step_params["q"] # (H N)
B = step_params["B"] # (H N)
r = step_params["r"]
d = step_params["d"] # (H N)
# dC = self.step_params['dC'] # (H N)
state = state.to(d1)
if conj:
new_state = (
2 * p * torch.sum(q * state, dim=-1, keepdim=True).real
) # conjugated version
else:
new_state = contract("... n, ... m, ... m -> ... n", p, q, state) # (B H N)
new_state = d1 * state - new_state
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
if conj:
A_ = (
2 * p * torch.sum(r * new_state, dim=-1, keepdim=True).real
) # conj version
else:
A_ = contract("... p, ... q, ... q -> ... p", p, r, new_state) # (B H N)
new_state = d * (new_state - A_)
return new_state
def step_state(self, u, state):
state = state.to(self.dA)
conj = state.size(-1) != self.dA.size(-1)
if conj:
state = _conj(state)
next_state = contract("h m n, b h n -> b h m", self.dA, state) + contract(
"h n, b h -> b h n", self.dB, u
)
if conj:
next_state = next_state[..., : state.size(-1) // 2]
return next_state
def step(self, u, state, linear=False):
N = self.step_params["d"].size(-1)
conj = state.size(-1) != N
if linear:
new_state = self.step_state_linear(u, state)
else:
new_state = self.step_state(u, state)
if conj:
assert state.size(-1) == N // 2
# dC = self.dC[..., 0::2].conj()
dC = self.dC[..., : N // 2].conj()
out = 2 * torch.sum(dC * new_state, dim=-1).real # conj version
else:
out = contract("... n, ... n -> ...", self.dC.conj(), new_state)
return out.to(torch.float), new_state
class SSKernelSlow(OptimModule):
"""Slow version of SSKernel function for illustration and benchmarking.
- Caches discretized matrices A^(dt), B^(dt)
- Computes K_L(A^dt, B^dt, C)
Usage:
```
krylov = SSKernelSlow(L, A, B, C, log_dt)()
```
Result is expected to be equal to SSKernelNPLR(L, A, B, C, log_dt, p, q)() for p, q such that A+pq^T is normal
"""
def __init__(self, L, A, B, C, log_dt, trainable=None, lr=None):
super().__init__()
self.N = A.shape[-1]
self.L = L
dA, dB = SSKernelSlow.bilinear(torch.exp(log_dt), A, B)
# Register parameters
if trainable is None:
trainable = DictConfig({"A": 0, "B": 0, "C": 0, "dt": 0})
if lr is None:
lr = DictConfig({"A": None, "B": None, "C": None, "dt": None})
if trainable is not None and lr is not None:
repeat = C.size(0)
self.register("log_dt", log_dt, trainable.dt, lr.dt)
self.register("dA", dA, trainable.A, lr.A, repeat=repeat)
self.register("dB", dB, 1, lr.B)
self.register("C", C, trainable.C, lr.C)
def forward(self, rate=1.0, L=None, state=None):
if L is None:
L = self.L
if rate is None:
rate = self.L / L # TODO this class doesn't actually support rates
k = krylov(L, self.dA, self.dB, self.C.conj()) # (H L)
if state is not None:
if state.size(-1) != self.dA.size(-1):
state = _conj(state)
state = state.to(self.dA)
state = contract("... n m, ... m -> ... n", self.dA, state)
k_state = krylov(L, self.dA, state, self.C.conj())
return k.to(torch.float), k_state.to(torch.float)
return k.to(torch.float)
@classmethod
def bilinear(cls, dt, A, B=None, separate=False):
"""
dt: (...) timescales
A: (... N N)
B: (... N)
"""
N = A.shape[-1]
I = torch.eye(N).to(A)
A_backwards = I - dt[:, None, None] / 2 * A
A_forwards = I + dt[:, None, None] / 2 * A
if B is None:
dB = None
else:
dB = dt[..., None] * torch.linalg.solve(
A_backwards, B.unsqueeze(-1)
).squeeze(
-1
) # (... N)
if separate:
A_b = torch.linalg.solve(A_backwards, I) # (... N N)
return A_forwards, A_b, dB
else:
dA = torch.linalg.solve(A_backwards, A_forwards) # (... N N)
return dA, dB
def _setup(self, setup_C=True):
if setup_C:
self.dC = self.C
def step(self, u, state):
state = state.to(self.dA)
if state.size(-1) != self.dA.size(-1):
state = _conj(state)
next_state = contract("h m n, b h n -> b h m", self.dA, state) + contract(
"h n, b h -> b h n", self.dB, u
)
y = contract("... n, ... n -> ...", self.dC.conj(), next_state)
return y.to(torch.float), next_state
class HippoSSKernel(nn.Module):
"""Wrapper around SSKernelNPLR that generates A, B, C, dt according to HiPPO arguments."""
def __init__(
self,
N,
H,
L=None,
measure="legs",
rank=1,
dt_min=0.001,
dt_max=0.1,
trainable=None,
lr=None,
mode="nplr", # 'slow' for complex naive version, 'real' for real naive version
length_correction=False,
precision=1,
cache=False,
resample=False, # if given inputs of different lengths, adjust the sampling rate
keops=False,
):
super().__init__()
self.N = N
self.H = H
L = L or 1
self.precision = precision
dtype = torch.double if self.precision == 2 else torch.float
self.rate = None if resample else 1.0
# Set default trainable and lr parameters
self.trainable = DictConfig(
{
"A": 1,
"B": 2,
"C": 1,
"dt": 1,
}
)
if trainable is not None:
self.trainable.update(trainable)
self.lr = DictConfig(
{
"A": 1e-3,
"B": 1e-3,
"C": None,
"dt": 1e-3,
}
)
if lr is not None:
self.lr.update(lr)
# Generate dt
self.log_dt = torch.rand(self.H, dtype=dtype) * (
math.log(dt_max) - math.log(dt_min)
) + math.log(dt_min)
# Compute the preprocessed representation
if mode == "real": # Testing purposes only
# Generate A, B
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype)
B = torch.as_tensor(B, dtype=dtype)[:, 0]
# Generate C
C = torch.randn(self.H, self.N, dtype=dtype)
self.krylov = SSKernelSlow(
L, A, B, C, self.log_dt, trainable=self.trainable, lr=self.lr
)
else:
# Generate low rank correction p for the measure
w, p, q, B, _ = nplr(measure, N, rank, dtype=dtype)
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
C = torch.randn(self.H, self.N // 2, dtype=cdtype)
if mode == "nplr":
self.krylov = SSKernelNPLR(
L,
w,
p,
q,
B,
C,
self.log_dt,
trainable=self.trainable,
lr=self.lr,
setup_C=length_correction,
keops=keops,
)
elif mode == "slow": # Testing only
A = torch.diag_embed(_conj(w)) - contract(
"... r p, ... r q -> ... p q", _conj(p), _conj(q).conj()
)
self.krylov = SSKernelSlow(
L,
A,
_conj(B),
_conj(C),
self.log_dt,
trainable=self.trainable,
lr=self.lr,
)
# Cached tensors
self.K = None
self.cache = cache
def forward(self, state=None, L=None):
"""
state: (B, H, N)
"""
if state is not None:
k, k_state = self.krylov(
state=state, rate=self.rate, L=L
) # (B, H, L) (B, H, N)
return k, k_state
else:
# Calculate K if needed
if not self.training and self.K is not None and self.K.size(-1) == L:
k = self.K
else:
k = self.krylov(rate=self.rate, L=L).to(torch.float)
# Store K if needed
if self.cache and not self.training:
self.K = k
else: # If training, parameter will change after backprop so make sure to recompute on next pass
self.K = None
return k
@torch.no_grad()
def next_state(self, state, u):
"""
state: (..., N)
u: (..., L)
Returns: (..., N)
"""
self.krylov._setup()
dA, dB = self.krylov.dA, self.krylov.dB
conj = state.size(-1) != dA.size(-1)
if conj:
state = _conj(state)
v = dB.unsqueeze(-1) * u.flip(-1).unsqueeze(-2) # (..., N, L)
AL, v = power(u.size(-1), dA, v)
next_state = contract("... m n, ... n -> ... m", AL, state)
next_state = next_state + v
if conj:
next_state = next_state[..., : next_state.size(-1) // 2]
return next_state
def step(self, u, state):
return self.krylov.step(u, state)
def double_length(self):
self.krylov.double_length()
class S4(nn.Module):
def __init__(
self,
H,
l_max=None,
# Arguments for SSM Kernel
d_state=64,
measure='legs',
dt_min=0.001,
dt_max=0.1,
rank=1,
trainable=None,
lr=None,
length_correction=False,
stride=1,
weight_decay=0.0, # weight decay on the SS Kernel
precision=1,
cache=False, # Cache the SS Kernel during evaluation
# Arguments for FF
activation='gelu', # activation in between SS and FF
postact=None, # activation after FF
weight_norm=False, # weight normalization on FF
initializer=None, # initializer on FF
input_linear=False,
hyper_act=None,
dropout=0.0,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
resample=False,
use_state=False,
verbose=False,
mode='nplr',
keops=False,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum sequence length, also denoted by L
if this is not known at model creation, or inconvenient to pass in,
set l_max=None and length_correction=True
dropout: standard dropout argument
transposed: choose backbone axis ordering of (B, L, D) or (B, D, L) [B=batch size, L=sequence length, D=feature dimension]
Other options are all experimental and should not need to be configured
"""
super().__init__()
if verbose:
import src.utils.train
log = src.utils.train.get_logger(__name__)
log.info(f"Constructing s4 (H, N, L) = ({H}, {d_state}, {l_max})")
self.h = H
self.n = d_state if d_state > 0 else H
self.stride = stride
if l_max is not None and stride > 1:
assert l_max % stride == 0
l_max = l_max // self.stride
self.cache = cache
self.weight_decay = weight_decay
self.transposed = transposed
self.resample = resample
self.D = nn.Parameter(torch.randn(self.h))
# Optional (position-wise) input transform
if input_linear:
self.input_linear = LinearActivation(
self.h,
self.h,
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
else:
self.input_linear = nn.Identity()
# SSM Kernel
self.kernel = HippoSSKernel(self.n, self.h, l_max, dt_min=dt_min, dt_max=dt_max, measure=measure, rank=rank, trainable=trainable, lr=lr, length_correction=length_correction, precision=precision, cache=cache, mode=mode, resample=resample, keops=keops)
self.K = None # Cache the computed convolution filter if possible (during evaluation)
# optional multiplicative modulation
self.hyper = hyper_act is not None
if self.hyper:
self.hyper_linear = LinearActivation(
self.h,
self.h,
transposed=True,
initializer=initializer,
activation=hyper_act,
activate=True,
weight_norm=weight_norm,
)
self.activation = Activation(activation)
dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
self.output_linear = LinearActivation(
self.h,
self.h,
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
if use_state:
self._initial_state = nn.Parameter(torch.zeros(self.h, self.n))
def forward(self, u, state=None, cache=None, **kwargs): # absorbs return_output and transformer src mask
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as u
"""
u = self.input_linear(u)
if not self.transposed: u = u.transpose(-1, -2)
L = u.size(-1)
# Compute SS Kernel
if state is not None:
assert self.stride == 1, "Striding not supported with states"
k, k_state = self.kernel(state=state, L=L)
else:
k = self.kernel(L=L)
# Stride the filter if needed
if self.stride > 1:
k = k[..., :L // self.stride] # (H, L/S)
k = F.pad(k.unsqueeze(-1), (0, self.stride-1)) # (H, L/S, S)
k = rearrange(k, '... h s -> ... (h s)') # (H, L)
else:
k = k[..., :L]
# Convolution
k_f = torch.fft.rfft(k, n=2*L) # (H L)
u_f = torch.fft.rfft(u.to(k.dtype), n=2*L) # (B H L)
y_f = k_f * u_f
y = torch.fft.irfft(y_f, n=2*L)[..., :L] # (B H L)
# Compute D term in state space equation - essentially a skip connection
y = y + u * self.D.unsqueeze(-1)
# Compute state update
if state is not None:
y = y + k_state[..., :L]
next_state = self.kernel.next_state(state, u)
else:
next_state = None
# Optional hyper-network multiplication
if self.hyper:
hyper = self.hyper_linear(u)
y = hyper * y
y = self.dropout(self.activation(y))
if not self.transposed: y = y.transpose(-1, -2)
y = self.output_linear(y)
return y, next_state
def step(self, u, state):
""" Step one time step as a recurrent model. Intended to be used during validation.
u: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
assert not self.training
y, next_state = self.kernel.step(u, state)
y = y + u * self.D
y = self.output_linear(self.activation(y).unsqueeze(-1)).squeeze(-1)
return y, next_state
def default_state(self, *batch_shape, device=None):
return self._initial_state.repeat(*batch_shape, 1, 1)
@property
def d_state(self):
return self.h * self.n
@property
def d_output(self):
return self.h
@property
def state_to_tensor(self):
return lambda state: rearrange('... h n -> ... (h n)', state)
| fly-master | src/models/modules/s4.py |
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
Take the standard Transformer as T2T Transformer
"""
import torch.nn as nn
from torchvision.ops import StochasticDepth
from einops import rearrange
import hydra
from src.models.modules.seq_common import Mlp
class T2TAttention(nn.Module):
def __init__(self, dim, num_heads=8, in_dim=None, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., attn_cfg=None):
super().__init__()
self.num_heads = num_heads
self.in_dim = in_dim if in_dim is not None else dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, in_dim * 3, bias=qkv_bias)
self.proj = nn.Linear(in_dim, in_dim)
self.proj_drop = nn.Dropout(proj_drop)
if attn_cfg is None:
self.attention_layer = None
self.attn_drop = nn.Dropout(attn_drop)
else:
self.attention_layer = hydra.utils.instantiate(attn_cfg, softmax_temp=self.scale,
_recursive_=False)
def forward(self, x):
B, N, C = x.shape
q, k, v = self.qkv(x).chunk(3, dim=-1) # (B, N, D)
v_og = v
q, k, v = [rearrange(x, 'b n (n_head head_dim) -> b n n_head head_dim',
n_head=self.num_heads) for x in (q, k, v)]
if self.attention_layer is None: # Full attention
q, k, v = [rearrange(x, 'b n n_head head_dim -> b n_head n head_dim') for x in (q, k, v)]
attn = (q * self.scale) @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
attn_output = (attn @ v).transpose(1, 2)
else:
attn_output, _ = self.attention_layer(q, k, v)
x = rearrange(attn_output, 'b n h d -> b n (h d)')
x = self.proj(x)
x = self.proj_drop(x)
# skip connection
# because the original x has different size with current x, use v to do skip connection
x = v_og + x
return x
class Token_transformer(nn.Module):
def __init__(self, dim, in_dim, num_heads, mlp_ratio=1., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
attn_cfg=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = T2TAttention(
dim, in_dim=in_dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop,
attn_cfg=attn_cfg,
)
self.drop_path = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_layer(in_dim)
self.mlp = Mlp(in_features=in_dim, hidden_features=int(in_dim * mlp_ratio),
out_features=in_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = self.attn(self.norm1(x))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
| fly-master | src/models/modules/token_transformer.py |
import math
import torch
import torch.nn as nn
import hydra
from einops import reduce, rearrange
from src.utils.tuples import to_2tuple
def pooling(x, pooling_mode='CLS', key_padding_mask=None, batch_first=True):
if pooling_mode not in ['MEAN', 'SUM', 'CLS', 'FLATTEN']:
raise NotImplementedError(f'pooling_mode must be MEAN, SUM, CLS, FLATTEN')
if pooling_mode in ['MEAN', 'SUM']:
if key_padding_mask is not None:
mask = rearrange(~key_padding_mask.bool_matrix,
'b s -> b s 1' if batch_first else 'b s -> s b 1')
x = x.masked_fill(mask, 0)
return reduce(x, 'b s ... -> b ...' if batch_first else 's b ... -> b ...',
pooling_mode.lower())
elif pooling_mode == 'CLS':
return x[:, 0] if batch_first else x[0]
elif pooling_mode == 'FLATTEN':
return rearrange(x, 'b ... -> b (...)' if batch_first else 's b ... -> b (s ...)')
class ClassificationHeadLinear(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, d_model, num_classes, pooling_mode='MEAN',
batch_first=False, **kwargs):
super().__init__()
assert pooling_mode in ['MEAN', 'SUM', 'CLS', 'FLATTEN'], 'pooling_mode not supported'
self.pooling_mode = pooling_mode
self.batch_first = batch_first
self.out_proj = nn.Linear(d_model, num_classes)
def forward(self, hidden_states, key_padding_mask=None, **kwargs):
"""
hidden_states: (B, S, D) if batch_first else (S, B, D)
"""
hidden_states = pooling(hidden_states, pooling_mode=self.pooling_mode,
key_padding_mask=key_padding_mask, batch_first=self.batch_first)
hidden_states = self.out_proj(hidden_states)
return hidden_states
# Adapted from https://github.com/huggingface/transformers/blob/master/src/transformers/models/reformer/modeling_reformer.py
class ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, d_model, d_inner, num_classes, dropout=0.0, pooling_mode='MEAN',
batch_first=False):
super().__init__()
assert pooling_mode in ['MEAN', 'SUM', 'CLS', 'FLATTEN'], 'pooling_mode not supported'
self.pooling_mode = pooling_mode
self.batch_first = batch_first
self.dense = nn.Linear(d_model, d_inner)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(d_inner, num_classes)
def forward(self, hidden_states, key_padding_mask=None, **kwargs):
"""
hidden_states: (B, S, D) if batch_first else (S, B, D)
"""
hidden_states = pooling(hidden_states, pooling_mode=self.pooling_mode,
key_padding_mask=key_padding_mask, batch_first=self.batch_first)
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
# Huggingface uses tanh instead of relu
hidden_states = torch.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class ClassificationHeadDual(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, d_model, d_inner, num_classes, dropout=0.0, pooling_mode='MEAN',
batch_first=False, interaction='NLI'):
super().__init__()
assert pooling_mode in ['MEAN', 'SUM', 'CLS'], 'pooling_mode not supported'
assert interaction in [None, 'NLI'], 'interaction not supported'
self.pooling_mode = pooling_mode
self.batch_first = batch_first
self.interaction = interaction
self.dense = nn.Linear(d_model * (4 if self.interaction == 'NLI' else 2), d_inner)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(d_inner, num_classes)
def forward(self, hidden_states1, hidden_states2,
key_padding_mask1=None, key_padding_mask2=None, **kwargs):
"""
hidden_states: (B, S, D) if batch_first else (S, B, D)
"""
x1 = pooling(hidden_states1, pooling_mode=self.pooling_mode,
key_padding_mask=key_padding_mask1, batch_first=self.batch_first)
x2 = pooling(hidden_states2, pooling_mode=self.pooling_mode,
key_padding_mask=key_padding_mask2, batch_first=self.batch_first)
hidden_states = (torch.cat([x1, x2, x1 * x2, x1 - x2], dim=-1) if self.interaction == 'NLI'
else torch.cat([x1, x2], dim=-1))
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
# Huggingface uses tanh instead of relu
hidden_states = torch.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
def sinusoidal_init_(tensor):
"""
tensor: (max_len, d_model)
"""
max_len, d_model = tensor.shape
position = rearrange(torch.arange(0.0, max_len), 's -> s 1')
div_term = torch.exp(-math.log(10000.0) * torch.arange(0.0, d_model, 2.0) / d_model)
tensor[:, 0::2] = torch.sin(position * div_term)
tensor[:, 1::2] = torch.cos(position * div_term)
return tensor
# Adapted from https://github.com/pytorch/examples/blob/master/word_language_model/model.py
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000, batch_first=False, initializer=None):
super().__init__()
self.batch_first = batch_first
self.dropout = nn.Dropout(p=dropout)
pe = torch.empty(max_len, d_model)
if initializer is None:
sinusoidal_init_(pe)
pe = rearrange(pe, 's d -> 1 s d' if self.batch_first else 's d -> s 1 d')
self.register_buffer('pe', pe)
else:
hydra.utils.call(initializer, pe)
pe = rearrange(pe, 's d -> 1 s d' if self.batch_first else 's d -> s 1 d')
self.pe = nn.Parameter(pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim] if not batch_first else [B, S, D]
output: [sequence length, batch size, embed dim] if not batch_first else [B, S, D]
Examples:
>>> output = pos_encoder(x)
"""
x = x + (self.pe[:, :x.size(1)] if self.batch_first else self.pe[:x.size(0)])
return self.dropout(x)
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/mlp.py
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
act_fn=None, drop=0., device=None, dtype=None):
"""TD [2021-10-27] act_fn takes precedence over act_layer if set.
This is to support Pytorch 1.10 Transformer interface that construct the activation
*function*, not the activation *layer*.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = to_2tuple(drop)
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.act = act_layer() if act_fn is None else act_fn
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class GluMlp(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
# override init of fc1 w/ gate portion set to weight near zero, bias=1
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)
def forward(self, x):
x = self.fc1(x)
x, gates = x.chunk(2, dim=-1)
x = x * self.act(gates)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GatedMlp(nn.Module):
""" MLP as used in gMLP
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
gate_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
if gate_layer is not None:
assert hidden_features % 2 == 0
self.gate = gate_layer(hidden_features)
hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
else:
self.gate = nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.gate(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
| fly-master | src/models/modules/seq_common.py |
from torch import nn
class SimpleDenseNet(nn.Module):
def __init__(self, hparams: dict):
super().__init__()
self.model = nn.Sequential(
nn.Linear(hparams["input_size"], hparams["lin1_size"]),
nn.BatchNorm1d(hparams["lin1_size"]),
nn.ReLU(),
nn.Linear(hparams["lin1_size"], hparams["lin2_size"]),
nn.BatchNorm1d(hparams["lin2_size"]),
nn.ReLU(),
nn.Linear(hparams["lin2_size"], hparams["lin3_size"]),
nn.BatchNorm1d(hparams["lin3_size"]),
nn.ReLU(),
nn.Linear(hparams["lin3_size"], hparams["output_size"]),
)
def forward(self, x):
batch_size, channels, width, height = x.size()
# (batch, 1, width, height) -> (batch, 1*width*height)
x = x.view(batch_size, -1)
return self.model(x)
| fly-master | src/models/modules/simple_dense_net.py |
fly-master | src/models/modules/__init__.py |
|
# Copied from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/masking.py
# so that users can run most of the code without having to compile pytorch-fast-transformers.
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>,
# Apoorv Vyas <[email protected]>
#
"""Create types of masks to be used in various places in transformers.
- Full mask (any key masked for any query)
- Length mask (masking out everything after a length)
- Triangular causal mask (mask any key succeeding the query)
All mask implementations should provide a single interface to be used by the
transformer layers and the attention layers.
NOTE: In all cases the value 1 or True signifies what should be kept and not
what should be deleted/masked.
"""
import torch
class BaseMask(object):
@property
def bool_matrix(self):
"""Return a bool (uint8) matrix with 1s to all places that should be
kept."""
raise NotImplementedError()
@property
def float_matrix(self):
"""Return the bool matrix as a float to be used as a multiplicative
mask for non softmax attentions."""
if not hasattr(self, "_float_matrix"):
with torch.no_grad():
self._float_matrix = self.bool_matrix.float()
return self._float_matrix
@property
def lengths(self):
"""If the matrix is of the following form
1 1 1 0 0 0 0
1 0 0 0 0 0 0
1 1 0 0 0 0 0
then return it as a vector of integers
3 1 2.
"""
if not hasattr(self, "_lengths"):
with torch.no_grad():
lengths = self.bool_matrix.long().sum(dim=-1)
# make sure that the mask starts with 1s and continues with 0s
# this should be changed to something more efficient, however,
# I chose simplicity over efficiency since the LengthMask class
# will be used anyway (and the result is cached)
m = self.bool_matrix.view(-1, self.shape[-1])
for i, l in enumerate(lengths.view(-1)):
if not torch.all(m[i, :l]):
raise ValueError("The mask is not a length mask")
self._lengths = lengths
return self._lengths
@property
def shape(self):
"""Return the shape of the boolean mask."""
return self.bool_matrix.shape
@property
def additive_matrix(self):
"""Return a float matrix to be added to an attention matrix before
softmax."""
if not hasattr(self, "_additive_matrix"):
with torch.no_grad():
self._additive_matrix = torch.log(self.bool_matrix.float())
return self._additive_matrix
@property
def additive_matrix_finite(self):
"""Same as additive_matrix but with -1e24 instead of infinity."""
if not hasattr(self, "_additive_matrix_finite"):
with torch.no_grad():
self._additive_matrix_finite = (
(~self.bool_matrix).float() * (-1e24)
)
return self._additive_matrix_finite
@property
def all_ones(self):
"""Return true if the mask is all ones."""
if not hasattr(self, "_all_ones"):
with torch.no_grad():
self._all_ones = torch.all(self.bool_matrix)
return self._all_ones
@property
def lower_triangular(self):
"""Return true if the attention is a triangular causal mask."""
if not hasattr(self, "_lower_triangular"):
self._lower_triangular = False
with torch.no_grad():
try:
lengths = self.lengths
if len(lengths.shape) == 1:
target = torch.arange(
1,
len(lengths)+1,
device=lengths.device
)
self._lower_triangular = torch.all(lengths == target)
except ValueError:
pass
return self._lower_triangular
class FullMask(BaseMask):
"""Thin wrapper over a pytorch tensor that provides the BaseMask
interface.
The arguments can be given both by keyword arguments and positional
arguments. To imitate function overloading, the constructor checks the type
of the first argument and if it is a tensor it treats it as the mask.
otherwise it assumes that it was the N argument.
Arguments
---------
mask: The mask as a PyTorch tensor.
N: The rows of the all True mask to be created if the mask argument is
not provided.
M: The columns of the all True mask to be created if the mask argument
is not provided. If N is given M defaults to N.
device: The device to create the mask in (defaults to cpu)
"""
def __init__(self, mask=None, N=None, M=None, device="cpu"):
# mask is a tensor so we ignore N and M
if mask is not None and isinstance(mask, torch.Tensor):
if mask.dtype != torch.bool:
raise ValueError("FullMask expects the mask to be bool")
with torch.no_grad():
self._mask = mask.clone()
return
# mask is an integer, N is an integer and M is None so assume they were
# passed as N, M
if mask is not None and M is None and isinstance(mask, int):
M = N
N = mask
if N is not None:
M = M or N
with torch.no_grad():
self._mask = torch.ones(N, M, dtype=torch.bool, device=device)
self._all_ones = True
return
raise ValueError("Either mask or N should be provided")
@property
def bool_matrix(self):
return self._mask
class LengthMask(BaseMask):
"""Provide a BaseMask interface for lengths. Mostly to be used with
sequences of different lengths.
Arguments
---------
lengths: The lengths as a PyTorch long tensor
max_len: The maximum length for the mask (defaults to lengths.max())
device: The device to be used for creating the masks (defaults to
lengths.device)
"""
def __init__(self, lengths, max_len=None, device=None):
self._device = device or lengths.device
with torch.no_grad():
self._lengths = lengths.clone().to(self._device)
self._max_len = max_len or self._lengths.max()
self._bool_matrix = None
self._all_ones = torch.all(self._lengths == self._max_len).item()
@property
def bool_matrix(self):
if self._bool_matrix is None:
with torch.no_grad():
indices = torch.arange(self._max_len, device=self._device)
self._bool_matrix = (
indices.view(1, -1) < self._lengths.view(-1, 1)
)
return self._bool_matrix
class TriangularCausalMask(LengthMask):
"""A square matrix with everything masked out above the diagonal.
Arguments
---------
N: The size of the matrix
device: The device to create the mask in (defaults to cpu)
"""
def __init__(self, N, device="cpu"):
lengths = torch.arange(1, N+1, device=device)
super(TriangularCausalMask, self).__init__(lengths, N, device)
self._lower_triangular = True
| fly-master | src/models/modules/masking.py |
# Adapted from https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
T2T-ViT
"""
import math
import torch
import torch.nn as nn
from .token_transformer import Token_transformer
from .token_performer import Token_performer
class T2T_module(nn.Module):
"""
Tokens-to-Token encoding module
"""
def __init__(self, img_size=224, tokens_type='performer', in_chans=3, embed_dim=768,
token_dim=64, attn1_cfg=None, attn2_cfg=None):
super().__init__()
if tokens_type == 'transformer':
print('adopt transformer encoder for tokens-to-token')
self.soft_split0 = nn.Unfold(kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
self.soft_split1 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.soft_split2 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.attention1 = Token_transformer(dim=in_chans * 7 * 7, in_dim=token_dim, num_heads=1,
mlp_ratio=1.0, attn_cfg=attn1_cfg)
self.attention2 = Token_transformer(dim=token_dim * 3 * 3, in_dim=token_dim, num_heads=1,
mlp_ratio=1.0, attn_cfg=attn2_cfg)
self.project = nn.Linear(token_dim * 3 * 3, embed_dim)
elif tokens_type == 'performer':
print('adopt performer encoder for tokens-to-token')
self.soft_split0 = nn.Unfold(kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
self.soft_split1 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.soft_split2 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
#self.attention1 = Token_performer(dim=token_dim, in_dim=in_chans*7*7, kernel_ratio=0.5)
#self.attention2 = Token_performer(dim=token_dim, in_dim=token_dim*3*3, kernel_ratio=0.5)
self.attention1 = Token_performer(dim=in_chans*7*7, in_dim=token_dim, kernel_ratio=0.5)
self.attention2 = Token_performer(dim=token_dim*3*3, in_dim=token_dim, kernel_ratio=0.5)
self.project = nn.Linear(token_dim * 3 * 3, embed_dim)
elif tokens_type == 'convolution': # just for comparison with conolution, not our model
# for this tokens type, you need change forward as three convolution operation
print('adopt convolution layers for tokens-to-token')
self.soft_split0 = nn.Conv2d(3, token_dim, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2)) # the 1st convolution
self.soft_split1 = nn.Conv2d(token_dim, token_dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) # the 2nd convolution
self.project = nn.Conv2d(token_dim, embed_dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) # the 3rd convolution
self.num_patches = (img_size // (4 * 2 * 2)) * (img_size // (4 * 2 * 2)) # there are 3 sfot split, stride are 4,2,2 seperately
def forward(self, x):
# step0: soft split
x = self.soft_split0(x).transpose(1, 2)
# iteration1: re-structurization/reconstruction
x = self.attention1(x)
B, new_HW, C = x.shape
x = x.transpose(1,2).reshape(B, C, int(math.sqrt(new_HW)), int(math.sqrt(new_HW)))
# iteration1: soft split
x = self.soft_split1(x).transpose(1, 2)
# iteration2: re-structurization/reconstruction
x = self.attention2(x)
B, new_HW, C = x.shape
x = x.transpose(1, 2).reshape(B, C, int(math.sqrt(new_HW)), int(math.sqrt(new_HW)))
# iteration2: soft split
x = self.soft_split2(x).transpose(1, 2)
# final tokens
x = self.project(x)
return x
| fly-master | src/models/modules/t2t.py |
"""
Take Performer as T2T Transformer
"""
import math
import torch
import torch.nn as nn
class Token_performer(nn.Module):
def __init__(self, dim, in_dim, head_cnt=1, kernel_ratio=0.5, dp1=0.1, dp2 = 0.1):
super().__init__()
self.emb = in_dim * head_cnt # we use 1, so it is no need here
self.kqv = nn.Linear(dim, 3 * self.emb)
self.dp = nn.Dropout(dp1)
self.proj = nn.Linear(self.emb, self.emb)
self.head_cnt = head_cnt
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(self.emb)
self.epsilon = 1e-8 # for stable in division
self.mlp = nn.Sequential(
nn.Linear(self.emb, 1 * self.emb),
nn.GELU(),
nn.Linear(1 * self.emb, self.emb),
nn.Dropout(dp2),
)
self.m = int(self.emb * kernel_ratio)
self.w = torch.randn(self.m, self.emb)
self.w = nn.Parameter(nn.init.orthogonal_(self.w) * math.sqrt(self.m), requires_grad=False)
def prm_exp(self, x):
# part of the function is borrow from https://github.com/lucidrains/performer-pytorch
# and Simo Ryu (https://github.com/cloneofsimo)
# ==== positive random features for gaussian kernels ====
# x = (B, T, hs)
# w = (m, hs)
# return : x : B, T, m
# SM(x, y) = E_w[exp(w^T x - |x|/2) exp(w^T y - |y|/2)]
# therefore return exp(w^Tx - |x|/2)/sqrt(m)
xd = ((x * x).sum(dim=-1, keepdim=True)).repeat(1, 1, self.m) / 2
wtx = torch.einsum('bti,mi->btm', x.float(), self.w)
return torch.exp(wtx - xd) / math.sqrt(self.m)
def single_attn(self, x):
k, q, v = torch.split(self.kqv(x), self.emb, dim=-1)
kp, qp = self.prm_exp(k), self.prm_exp(q) # (B, T, m), (B, T, m)
D = torch.einsum('bti,bi->bt', qp, kp.sum(dim=1)).unsqueeze(dim=2) # (B, T, m) * (B, m) -> (B, T, 1)
kptv = torch.einsum('bin,bim->bnm', v.float(), kp) # (B, emb, m)
y = torch.einsum('bti,bni->btn', qp, kptv) / (D.repeat(1, 1, self.emb) + self.epsilon) # (B, T, emb)/Diag
# skip connection
y = v + self.dp(self.proj(y)) # same as token_transformer in T2T layer, use v as skip connection
return y
def forward(self, x):
x = self.single_attn(self.norm1(x))
x = x + self.mlp(self.norm2(x))
return x
| fly-master | src/models/modules/token_performer.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# and https://github.com/yitu-opensource/T2T-ViT/blob/main/models/transformer_block.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import hydra
from torchvision.ops import StochasticDepth
from src.models.modules.seq_common import Mlp
class AttentionSimple(nn.Module):
"""This attention class makes several simplifying assumptions (commonly satisfied in vision
applications):
1. q = k = v
2. No masks: no attention mask, no key padding mask
3. Embed dimension = Input dimension, i.e. projection matrices are square.
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
packed_linear=True, linear_cfg=None):
"""packed_linear: whether to pack all 3 q_proj, k_proj, v_proj into 2 matrix.
This option is to be compatible with T2T-ViT pretrained weights, where there's only one
projection weight matrix.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if linear_cfg is not None:
packed_linear = False
self.packed_linear = packed_linear
if packed_linear:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
else:
if linear_cfg is None:
linear_cfg = {'_target_': 'torch.nn.Linear'}
self.q_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.k_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.v_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
if self.packed_linear:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
else:
q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)
q, k, v = [rearrange(x, 'b n (h d) -> b h n d', h=self.num_heads) for x in (q, k, v)]
# attn = (q @ k.transpose(-2, -1) * self.scale)
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = q.size()
_, _, k_seq_len, _ = k.size()
q = rearrange(q, 'b h t d -> (b h) t d')
k = rearrange(k, 'b h s d -> (b h) d s')
# Preallocate attn_weights for `baddbmm`
attn = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=q.dtype, device=q.device)
attn = rearrange(torch.baddbmm(attn, q, k, beta=0, alpha=self.scale),
'(b h) t s -> b h t s', h = self.num_heads)
attn = F.softmax(attn, dim=-1, dtype=v.dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
attnlinear_cfg=None, mlp_cfg=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = AttentionSimple(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,
linear_cfg=attnlinear_cfg)
self.drop_path = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
if mlp_cfg is None:
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
else:
self.mlp = hydra.utils.instantiate(mlp_cfg, in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop, _recursive_=False)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
| fly-master | src/models/modules/vision_common.py |
import torch
from einops import rearrange
def low_rank_project(M, rank):
"""Supports batches of matrices as well.
"""
U, S, Vt = torch.linalg.svd(M)
S_sqrt = S[..., :rank].sqrt()
U = U[..., :rank] * rearrange(S_sqrt, '... rank -> ... 1 rank')
Vt = rearrange(S_sqrt, '... rank -> ... rank 1') * Vt[..., :rank, :]
return U, Vt
| fly-master | src/ops/low_rank.py |
# Adapt from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py
import torch
import torch.nn as nn
# import fmhalib
# import fmhalibmine as fmhalib
import fmhalibtd as fmhalib
from einops import rearrange
def _fmha_forward(qkv, cu_seqlens, p_dropout, max_s, is_training, return_softmax):
context, softmax_lse, *rest = fmhalib.fwd(qkv, cu_seqlens, p_dropout, max_s, is_training,
False, return_softmax, None)
S_dmask = rest[0] if return_softmax else None
return context, softmax_lse, S_dmask
def _fmha_backward(dout, qkv, out, S_dmask, softmax_lse, cu_seqlens, p_dropout, max_s):
dqkv, dp, softmax_d = fmhalib.bwd(dout, qkv, out, S_dmask, softmax_lse, cu_seqlens, p_dropout, max_s, False)
return dqkv
class FMHAFun(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, p_dropout, max_s, is_training):
context, softmax_lse, S_dmask = _fmha_forward(qkv, cu_seqlens, p_dropout, max_s, is_training,
return_softmax=False)
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens)
ctx.p_dropout = p_dropout
ctx.max_s = max_s
return context
@staticmethod
def backward(ctx, dout):
qkv, context, S_dmask, softmax_lse, cu_seqlens = ctx.saved_tensors
# S_dmask is None, temporarily use another tensor just to get it running
dqkv = _fmha_backward(dout, qkv, context, context, softmax_lse, cu_seqlens, ctx.p_dropout, ctx.max_s)
return dqkv, None, None, None, None, None
# We duplicate code to return both the output and the softmax for testing
# Returning both makes backward a bit slower, so we want to keep using the other version for speed.
class FMHAFunWithS(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, p_dropout, max_s, is_training):
context, softmax_lse, S_dmask = _fmha_forward(qkv, cu_seqlens, p_dropout, max_s, is_training,
return_softmax=True)
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens)
ctx.p_dropout = p_dropout
ctx.max_s = max_s
return context, S_dmask, softmax_lse
@staticmethod
def backward(ctx, dout, _dS_dmask_ignored, _dsoftmax_sum_ignored):
qkv, context, S_dmask, softmax_lse, cu_seqlens = ctx.saved_tensors
dqkv = _fmha_backward(dout, qkv, context, S_dmask, softmax_lse, cu_seqlens, ctx.p_dropout, ctx.max_s)
return dqkv, None, None, None, None, None
def fmha_func(qkv, cu_seqlens, p_dropout, max_s, is_training, return_attn_probs=False):
func = FMHAFun if not return_attn_probs else FMHAFunWithS
return func.apply(qkv, cu_seqlens, p_dropout, max_s, is_training)
| fly-master | src/ops/bert_fmha.py |
import math
import torch
from einops import rearrange
def butterfly_factor_to_matrix(twiddle: torch.Tensor, factor_index: int) -> torch.Tensor:
"""
Let b be the base (most commonly 2).
Parameters:
twiddle: (n // b, b, b)
factor_index: an int from 0 to log_b(n) - 1
"""
n_div_b, b, _ = twiddle.shape
n = b * n_div_b
log_b_n = int(math.log(n) / math.log(b))
assert n == b ** log_b_n, f'n must be a power of {b}'
assert twiddle.shape == (n // b, b, b)
assert 0 <= factor_index <= log_b_n
stride = b ** factor_index
x = rearrange(torch.eye(n), 'bs (diagblk j stride) -> bs diagblk j stride', stride=stride, j=b)
t = rearrange(twiddle, '(diagblk stride) i j -> diagblk stride i j', stride=stride)
out = torch.einsum('d s i j, b d j s -> b d i s', t, x)
out = rearrange(out, 'b diagblk i stride -> b (diagblk i stride)')
return out.t() # Transpose because we assume the 1st dimension of x is the batch dimension
if __name__ == '__main__':
b = 2
log_b_n = 3
n = b ** log_b_n
twiddle = torch.arange(1, n * b + 1, dtype=torch.float).reshape(n // b, b, b)
for factor_index in range(log_b_n):
print(butterfly_factor_to_matrix(twiddle, factor_index))
b = 3
log_b_n = 2
n = b ** log_b_n
twiddle = torch.arange(1, n * b + 1, dtype=torch.float).reshape(n // b, b, b)
for factor_index in range(log_b_n):
print(butterfly_factor_to_matrix(twiddle, factor_index))
| fly-master | src/ops/butterfly_factor.py |
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
ctx.first_axis_dim = input.shape[0]
assert input.ndim == 2
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# return input[indices]
return torch.gather(input, 0, repeat(indices, 'z -> z d', d=input.shape[1]))
@staticmethod
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
grad_input = torch.zeros([ctx.first_axis_dim, *grad_output.shape[1:]],
device=grad_output.device, dtype=grad_output.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
grad_input.scatter_(0, repeat(indices, 'z -> z d', d=grad_output.shape[1]), grad_output)
return grad_input, None
index_first_axis = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim == 2
output = torch.zeros(first_axis_dim, values.shape[1], device=values.device,
dtype=values.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
output[indices] = values
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
return output
@staticmethod
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
grad_values = grad_output[indices]
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
def unpad_input(hidden_states, attention_mask):
"""
Arguments:
hidden_states: (batch, seqlen, dim)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
Return:
hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
"""
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times large than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
return (index_first_axis(rearrange(hidden_states, 'b s d -> (b s) d'), indices), indices,
cu_seqlens, max_seqlen_in_batch)
def pad_input(hidden_states, indices, batch, seqlen):
"""
Arguments:
hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz)
Return:
hidden_states: (batch, seqlen, dim)
"""
dim = hidden_states.shape[-1]
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
# output[indices] = hidden_states
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, '(b s) d -> b s d', b=batch)
| fly-master | src/ops/bert_padding.py |
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
from src.ops.low_rank import low_rank_project
def blockdiag_weight_to_dense_weight(weight):
"""
Argumments:
weight: (nblocks, out / nblocks, in / blocks)
Return:
dense_weight: (out / in)
"""
return torch.block_diag(*torch.unbind(weight, dim=0))
def blockdiag_multiply_reference(x, weight):
"""
This implementation is slow but more likely to be correct.
Arguments:
x: (..., n)
weight: (nblocks, q, n / nblocks)
Outputs:
out: (..., nblocks * q)
"""
n = x.shape[-1]
nblocks, q, p = weight.shape
assert nblocks * p == n
x_reshaped = rearrange(x, '... (nblocks p) -> ... nblocks p', nblocks=nblocks)
return rearrange(torch.einsum('...kp, kqp -> ...kq', x_reshaped, weight),
'... nblocks q -> ... (nblocks q)')
class BlockdiagMultiply(torch.autograd.Function):
"""This is a faster implementation, with careful memory copies for the fastest
bmm performance.
The backward pass is also written manually with careful memory copies.
Arguments:
x: (..., n)
weight: (nblocks, q, n / nblocks)
Outputs:
out: (..., nblocks * q)
"""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, weight):
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
nblocks, q, p = weight.shape
assert nblocks * p == n
x_reshaped = x.reshape(batch_dim, nblocks, p).transpose(0, 1)
out = torch.empty(batch_dim, nblocks, q, device=x.device, dtype=x.dtype).transpose(0, 1)
out = torch.bmm(x_reshaped, weight.transpose(-1, -2), out=out).transpose(0, 1)
return out.reshape(*batch_shape, nblocks * q)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, dout):
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
nblocks, q, p = weight.shape
assert nblocks * p == n
dx, dweight = None, None
dout_reshaped = dout.reshape(batch_dim, nblocks, q).transpose(0, 1)
if ctx.needs_input_grad[0]:
dx = torch.empty(batch_dim, nblocks, p, device=x.device, dtype=x.dtype)
dx = torch.bmm(dout_reshaped, weight.conj(),
out=dx.transpose(0, 1)).transpose(0, 1).reshape(*batch_shape, n)
if ctx.needs_input_grad[1]:
x_reshaped = x.reshape(batch_dim, nblocks, p).transpose(0, 1)
dweight = torch.bmm(dout_reshaped.transpose(-1, -2), x_reshaped.conj())
return dx, dweight
blockdiag_multiply = BlockdiagMultiply.apply
| fly-master | src/ops/blockdiag_multiply.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
# from src.ops.low_rank import low_rank_project
# Copied here so it's more self-contained
def low_rank_project(M, rank):
"""Supports batches of matrices as well.
"""
U, S, Vt = torch.linalg.svd(M)
S_sqrt = S[..., :rank].sqrt()
U = U[..., :rank] * rearrange(S_sqrt, '... rank -> ... 1 rank')
Vt = rearrange(S_sqrt, '... rank -> ... rank 1') * Vt[..., :rank, :]
return U, Vt
def factors(n):
return [(i, n // i) for i in range(1, math.floor(math.sqrt(n)) + 1) if n % i == 0]
def blockdiag_butterfly_project(M, sizes=None):
"""Only works for square matrices for now
"""
m, n = M.shape
if m != n:
raise NotImplementedError('Only support square matrices')
if sizes is None:
# Find the factors that are closest to sqrt(n)
sizes = factors(n)[-1]
# Larger factor first is probably more efficient, idk
sizes = (sizes[1], sizes[0])
assert n == sizes[0] * sizes[1]
M_permuted_batched = rearrange(M, '(p k) (r s) -> k r p s', k=sizes[1], r=sizes[0])
U, Vt = low_rank_project(M_permuted_batched, rank=1)
w1_bfly = rearrange(Vt, 'k r 1 s -> r k s')
w2_bfly = rearrange(U, 'k r s 1 -> k s r')
return w1_bfly, w2_bfly
class ButterflyFFT(nn.Module):
def __init__(self, n, direction='fft', norm='ortho', sizes=None):
super().__init__()
eye = torch.eye(n, dtype=torch.complex128)
assert direction in ['fft', 'ifft']
transform = torch.fft.fft if direction == 'fft' else torch.fft.ifft
dft = transform(eye, norm=norm).t()
# Find the factors that are closest to sqrt(n)
sizes = factors(n)[-1]
# Larger factor first is probably more efficient, idk
sizes = (sizes[1], sizes[0])
self.register_buffer('perm', rearrange(torch.arange(n), '(i j) -> (j i)', j=sizes[0]))
w1, w2 = blockdiag_butterfly_project(dft[:, self.perm], sizes=sizes)
# Store parameters as real instead of complex to avoid issues with Adam / AdamW
self.w1_bfly = nn.Parameter(torch.view_as_real(w1.cfloat()))
self.w2_bfly = nn.Parameter(torch.view_as_real(w2.cfloat()))
def forward(self, x):
w1_bfly, w2_bfly = torch.view_as_complex(self.w1_bfly), torch.view_as_complex(self.w2_bfly)
return blockdiag_butterfly_multiply(rearrange(x[..., self.perm], '... n -> (...) n'),
w1_bfly, w2_bfly).reshape_as(x)
class ButterflyFFT2(nn.Module):
def __init__(self, n1, n2, direction='fft', norm='ortho'):
"""Input will have shape (..., n1, n2)
"""
super().__init__()
self.fft1 = ButterflyFFT(n1, direction=direction, norm=norm)
self.fft2 = ButterflyFFT(n2, direction=direction, norm=norm)
def forward(self, x):
out = rearrange(self.fft1(rearrange(x, '... n1 n2 -> ... n2 n1')), '... n2 n1 -> ... n1 n2')
return self.fft2(out)
| fly-master | src/ops/blockdiag_butterfly_projection.py |
import torch
@torch.jit.script
def jit_dropout_add(x, residual, prob):
# type: (Tensor, Tensor, float) -> Tensor
return torch.nn.functional.dropout(x, p=prob, training=True) + residual
def fused_dropout_add(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
if is_training:
out = jit_dropout_add(x, residual, prob)
else:
out = torch.nn.functional.dropout(x, p=prob, training=is_training) + residual
return out
@torch.jit.script
def jit_bias_dropout_add(x, bias, residual, prob) :
# type: (Tensor, Tensor, Tensor, float) -> Tensor
return torch.nn.functional.dropout(x + bias, p=prob, training=True) + residual
def fused_bias_dropout_add(x, bias, residual, prob, is_training) :
# type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
if is_training:
out = jit_bias_dropout_add(x, bias, residual, prob)
else:
out = torch.nn.functional.dropout(x + bias, p=prob, training=is_training) + residual
return out
| fly-master | src/ops/fused_dropout_add.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
# On the backward pass, we don't use the fused kernel from cublasLt since that's a bit slower.
# Instead we use the regular backward from F.linear.
# We also make it work with pytorch amp.
# TD [2022-02-27] The fused backward is also less accurate, and it might silently fail to compute
# grad_bias (when it takes the cublas gemm code path instead of the cublasLt code path)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
import fused_dense_cuda # from apex
# import fused_dense_lib as fused_dense_cuda
# implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFuncMine(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, weight, bias):
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
output = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight, bias)
return output.reshape(*batch_shape, output.shape[-1])
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = np.prod(batch_shape)
grad_input, grad_weight, grad_bias = fused_dense_cuda.linear_bias_backward(
x.reshape(batch_dim, n), weight, grad_output.reshape(batch_dim, grad_output.shape[-1])
)
# print((grad_bias - grad_output.view(-1, grad_output.shape[-1]).sum(dim=0)).abs().max())
return grad_input.reshape_as(x), grad_weight, grad_bias
# grad_input, grad_weight = None, None
# grad_output_reshaped = grad_output.reshape(batch_dim, grad_output.shape[-1])
# if ctx.needs_input_grad[0]:
# grad_input = (grad_output_reshaped @ weight.conj()).reshape(*batch_shape, n)
# if ctx.needs_input_grad[1]:
# grad_weight = grad_output_reshaped.t() @ x.conj().reshape(batch_dim, n)
# # We don't need to compute grad_bias explicitly, when we return grad_out Pytorch
# # will sum over the batch dimension to get grad_bias.
# return grad_input, grad_weight, grad_output
fused_dense_function_mine = FusedDenseFuncMine.apply
class FusedDenseMine(nn.Linear):
def forward(self, x):
if x.is_cuda and self.bias is not None:
return fused_dense_function_mine(x, self.weight, self.bias)
else:
return F.linear(x, self.weight, self.bias)
| fly-master | src/ops/fused_dense.py |
import math
import torch
def bitreversal_permutation(n, device=None, dtype=None):
"""Return the bit reversal permutation used in FFT.
By default, the permutation is stored in numpy array.
Parameter:
n: integer, must be a power of 2.
Return:
perm: bit reversal permutation, pytorch tensor of size n
"""
log_n = int(math.log2(n))
assert n == 1 << log_n, 'n must be a power of 2'
perm = torch.arange(n, device=device, dtype=dtype).reshape(1, n)
for i in range(log_n):
perm = torch.vstack(perm.chunk(2, dim=-1))
perm = perm.squeeze(-1)
return perm
def invert_permutation(perm: torch.Tensor) -> torch.Tensor:
"""
Params:
perm: (..., n)
Return:
inverse_perm: (..., n)
"""
# This is simpler but has complexity O(n log n)
# return torch.argsort(perm, dim=-1)
# This is more complicated but has complexity O(n)
arange = torch.arange(perm.shape[-1], device=perm.device).expand_as(perm)
return torch.empty_like(perm).scatter_(-1, perm, arange)
| fly-master | src/ops/permutation.py |
import torch
from einops import rearrange
from src.ops.low_rank import low_rank_project
def blockdiag_butterfly_multiply_einsum_simple(x, w1_bfly, w2_bfly):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, j, i), where k = n / i
w2_bfly: (j, l, k)
Outputs:
out: (batch, m), where m = l * j
"""
batch, n = x.shape
k, j, i = w1_bfly.shape
j1, l, k1 = w2_bfly.shape
assert j1 == j
assert k1 == k
assert k * i == n
x_reshaped = rearrange(x, 'b (k i) -> b k i', k=k)
out = torch.einsum('b k i, k j i, j l k -> b l j', x_reshaped, w1_bfly, w2_bfly)
return rearrange(out, 'b l j -> b (l j)')
def blockdiag_butterfly_project_einsum_simple(M, nblocks1, nblocks2):
"""
Arguments:
M: (m, n)
Outputs:
w1_bfly: (nblocks1, nblocks2, i)
w2_bfly: (nblocks2, l, nblocks1)
"""
m, n = M.shape
k, j = nblocks1, nblocks2
M_permuted_batched = rearrange(M, '(l j) (k i) -> k j l i', k=nblocks1, j=nblocks2)
U, Vt = low_rank_project(M_permuted_batched, rank=1)
w1_bfly = rearrange(Vt, 'k j 1 i -> k j i')
w2_bfly = rearrange(U, 'k j l 1 -> j l k')
return w1_bfly, w2_bfly
def blockdiag_butterfly_multiply_einsum(x, w1_bfly, w2_bfly, b2):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, (j * b1), i), where k = n / i
w2_bfly: (j, (l * b2), (k b1))
Outputs:
out: (batch, m), where m = l * j * b2
"""
batch, n = x.shape
k, jb1, i = w1_bfly.shape
j, lb2, kb1 = w2_bfly.shape
b1 = jb1 // j
assert jb1 == j * b1
assert kb1 == k * b1
assert k * i == n
x_reshaped = rearrange(x, 'b (k i) -> b k i', k=k)
w1_bfly = rearrange(w1_bfly, 'k (j b1) i -> k j b1 i', b1=b1)
w2_bfly = rearrange(w2_bfly, 'j (l b2) (k b1) -> j l b2 k b1', b1=b1, b2=b2)
# torch.einsum doesn't support indices named b1 or b2, so we map b1 -> y, b2 -> z
out = torch.einsum('b k i, k j y i, j l z k y -> b l j z', x_reshaped, w1_bfly, w2_bfly)
return rearrange(out, 'b l j b2 -> b (l j b2)')
def blockdiag_butterfly_project_einsum(M, nblocks1, nblocks2, b1, b2):
"""
Arguments:
M: (m, n)
Outputs:
w1_bfly: (nblocks1, nblocks2, i)
w2_bfly: (nblocks2, l, nblocks1)
"""
m, n = M.shape
k, j = nblocks1, nblocks2
M_permuted_batched = rearrange(M, '(l j b2) (k i) -> k j (l b2) i', k=nblocks1, j=nblocks2,
b2=b2)
U, Vt = low_rank_project(M_permuted_batched, rank=b1)
w1_bfly = rearrange(Vt, 'k j b1 i -> k (j b1) i')
w2_bfly = rearrange(U, 'k j lb2 b1 -> j lb2 (k b1)')
return w1_bfly, w2_bfly
def blockdiag_butterfly_multiply_einsum_rank(x, w1_bfly, w2_bfly):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, (r * j), i), where k = n / i
w2_bfly: (j, l, (k r))
Outputs:
out: (batch, m), where m = l * j
"""
batch, n = x.shape
k, jb1, i = w1_bfly.shape
j, l, kb1 = w2_bfly.shape
r = jb1 // j
assert jb1 == j * r
assert kb1 == k * r
assert k * i == n
x_reshaped = rearrange(x, 'b (k i) -> b k i', k=k)
w1_bfly = rearrange(w1_bfly, 'k (r j) i -> k r j i', r=r)
w2_bfly = rearrange(w2_bfly, 'j l (k r) -> j l k r', r=r)
out = torch.einsum('b k i, k r j i, j l k r -> b l j', x_reshaped, w1_bfly, w2_bfly)
return rearrange(out, 'b l j -> b (l j)')
def blockdiag_butterfly_project_einsum_rank(M, nblocks1, nblocks2, rank):
"""
Arguments:
M: (m, n)
Outputs:
w1_bfly: (nblocks1, r * nblocks2, i)
w2_bfly: (nblocks2, l, nblocks1 * r)
"""
m, n = M.shape
k, j = nblocks1, nblocks2
M_permuted_batched = rearrange(M, '(l j) (k i) -> k j l i', k=nblocks1, j=nblocks2)
U, Vt = low_rank_project(M_permuted_batched, rank=rank)
w1_bfly = rearrange(Vt, 'k j r i -> k (r j) i')
w2_bfly = rearrange(U, 'k j l r -> j l (k r)')
return w1_bfly, w2_bfly
| fly-master | src/ops/blockdiag_butterfly_einsum.py |
import torch
from softmaxlib import additive_masked_softmax_dropout_forward
from softmaxlib import masked_scale_softmax_backward_recompute
from src.ops.triton.softmax_dropout import softmax_dropout
class _fused_softmax_dropout(torch.autograd.Function):
@staticmethod
def forward(ctx, x, p, mask, return_dropout_mask=False):
"""
x: (batch_size, nheads, q_seqlen, k_seqlen)
p: float
mask: (batch_size, 1, 1, k_seqlen)
"""
assert x.dtype == torch.float16
assert x.ndim == 4
assert mask is not None
x = x.contiguous()
dropout_results, dropout_mask = additive_masked_softmax_dropout_forward(x, mask, p)
ctx.save_for_backward(x, mask, dropout_mask)
ctx.dropout_prob = p
return dropout_results, (None if not return_dropout_mask else dropout_mask)
@staticmethod
def backward(ctx, grad_out, grad_dropout_mask):
x, mask, dropout_mask = ctx.saved_tensors
p = ctx.dropout_prob
grad_in = masked_scale_softmax_backward_recompute(grad_out, x, mask, dropout_mask, p)
return grad_in, None, None, None
def fused_softmax_dropout(x, p, mask):
if x.is_cuda and x.dtype == torch.float16 and mask is not None and p != 0.0:
return _fused_softmax_dropout.apply(x, p, mask)[0]
else:
return softmax_dropout(x, p, mask, mask_type='bk')
| fly-master | src/ops/fused_softmax_dropout.py |
import torch
from einops import rearrange, repeat
def sparse_project(M, density):
"""Return a sparse mask of the largest entries of M in magnitude.
"""
nparams = int(density * M.numel())
# Implementation 1
# sorted_idx = torch.argsort(M.abs().flatten(), descending=True)
# threashold = M.abs().flatten()[sorted_idx[nparams]]
# Implementation 2
# threashold = M.abs().flatten().kthvalue(M.numel() - nparams).values
# sparse_mask = M.abs() > threashold
# Implementation 3
_, topk_idx = torch.topk(M.abs().flatten(), nparams, sorted=False)
sparse_mask = torch.zeros_like(M, dtype=torch.bool).flatten()
# scatter_ is faster than index assignment for some reason
sparse_mask.scatter_(dim=0, index=topk_idx, src=torch.ones_like(sparse_mask))
# sparse_mask[topk_idx] = True
sparse_mask = sparse_mask.reshape(M.shape)
return sparse_mask
def blocksparse_project(M, density, blocksize=1):
"""Return a sparse mask of the largest entries of M in magnitude.
Assume that M is a matrix.
"""
M_block = rearrange(M, '(m b) (n b1) -> m n b b1', b=blocksize, b1=blocksize)
nnz_blocks = int(density * M_block.shape[0] * M_block.shape[1])
_, topk_idx = torch.topk(torch.linalg.norm(M_block, ord='fro', dim=(-1, -2)).flatten(), nnz_blocks,
sorted=False)
sparse_mask = torch.zeros(*M_block.shape[:2], device=M_block.device, dtype=torch.bool).flatten()
# scatter_ is faster than index assignment for some reason
sparse_mask.scatter_(dim=0, index=topk_idx, src=torch.ones_like(sparse_mask))
# sparse_mask[topk_idx] = True
sparse_mask = sparse_mask.reshape(*M_block.shape[:2])
return repeat(sparse_mask, 'm n -> (m b) (n b1)', b=blocksize, b1=blocksize)
| fly-master | src/ops/sparse.py |
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
import math
import torch
from torch import nn
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def bias_gelu_back(g, bias, y):
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff*g
class GeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(bias, input)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_back(grad_output, bias, input)
return tmp, tmp
bias_gelu_impl = GeLUFunction.apply
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def gelu_fwd(x):
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def gelu_bwd(g, x):
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff*g
class FastGeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input):
ctx.save_for_backward(input)
return gelu_fwd(input)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
tmp = gelu_bwd(grad_output, input)
return tmp
fast_gelu_impl = FastGeLUFunction.apply
| fly-master | src/ops/gelu_activation.py |
from typing import Optional
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
from einops import rearrange, repeat
import triton
import triton.language as tl
from src.ops.triton.k_softmax import _softmax, _softmax_backward
from src.ops.triton.k_softmax_dropout import _softmax_dropout_backward
from src.ops.triton.softmax import softmax
FAST_MHA_AVAILABLE = True
try:
from fast_multihead_attn import additive_mask_softmax_dropout_backward
except ImportError:
from src.utils.utils import get_logger
logger = get_logger()
logger.info('fast_multihead_attn from apex is not installed.')
FAST_MHA_AVAILABLE = False
_triton_registered_overflow = False
_triton_softmax_fp16_enabled = False # NOTE: PyTorch keeps softmax as fp32
# Helper to handle the SPMD launch grid and error cases
class _softmax_dropout_triton(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16 if _triton_softmax_fp16_enabled else None)
def forward(ctx, x, p, mask, causal, mask_type):
"""
Fused softmax implementation, using the Triton programming model.
This only supports a reduction over the last dimension for now
Argument:
x: (bs, nheads, q_seqlen, k_seqlen)
mask: (bs, 1, 1, k_seqlen)
"""
assert x.ndim == 4
# Handle 2D/3D tensors
x_ = x.unsqueeze(0) if x.ndim == 2 else x
x_ = x_.flatten(0, -3)
if not x_.is_contiguous():
x_ = x_.contiguous()
y = torch.empty_like(x_)
assert (
y.stride(2) == 1 and x_.stride(2) == 1
), f"{x.shape} - {x_.shape} - {x_.stride()}"
# SPMD launch grid
grid_2d = (
x_.shape[0],
x_.shape[1],
)
# enqueue GPU kernel
if mask is None:
# placeholder, will not be used
mask = x_
mask_type = None
else:
assert mask.dtype == x.dtype, "An additive mask is requested"
if mask_type == 'bk':
mask = repeat(mask, 'b 1 1 s -> b h 1 s', h=x_.shape[0] // mask.shape[0])
mask = mask.flatten(0, -2).contiguous()
_softmax[grid_2d](
y,
x_,
mask,
y.stride(0),
y.stride(1),
x_.stride(0),
x_.stride(1),
mask.stride(0),
x_.shape[2],
LOG=False,
MASK_TYPE=mask_type,
CAUSAL=causal,
)
# torch._fused_dropout takes 1 - p
dropout_results, dropout_mask = torch._fused_dropout(y, p=1.0 - p)
ctx.save_for_backward(y, dropout_mask)
ctx.dropout_prob = p
ctx.causal = causal
ctx.mask_type = mask_type
return dropout_results.reshape_as(x)
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_out):
# (y, dropout_mask) = ctx.saved_tensors
# # triton can't read from bool, uint8, or int8. Converting to int16 negatives the speed
# # benefits.
# # dropout_mask_triton = triton.code_gen.reinterpret(dropout_mask, tl.uint8)
# dropout_mask_triton = dropout_mask.to(dtype=torch.int16)
# # Handle 2D/3D tensors
# grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out
# grad_out_ = grad_out_.flatten(0, -3)
# # SPMD launch grid
# grid_2d = (
# grad_out_.shape[0],
# grad_out_.shape[1],
# )
# grad_in = torch.empty_like(
# y
# ) # torch.zeros is measurably slower, we'll zero y in the kernel
# # Make sure that the tensor are contiguous
# grad_in, grad_out, y = map(lambda x: x.contiguous(), [grad_in, grad_out, y])
# # fmt: off
# _softmax_dropout_backward[grid_2d](
# grad_in, grad_out_, y, dropout_mask_triton, ctx.dropout_prob,
# grad_in.stride(0), grad_in.stride(1),
# grad_out_.stride(0), grad_out_.stride(1),
# y.stride(0), y.stride(1),
# dropout_mask.stride(0), dropout_mask.stride(1),
# y.shape[2],
# CAUSAL=ctx.causal
# )
# # fmt: on
# return grad_in.reshape_as(grad_out), None, None, None, None
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
(y, dropout_mask) = ctx.saved_tensors
# Handle 2D/3D tensors
grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out
grad_out_ = grad_out_.flatten(0, -3)
# SPMD launch grid
grid_2d = (
grad_out_.shape[0],
grad_out_.shape[1],
)
# Make sure that the tensor are contiguous
# grad_in, grad_out_, y = map(lambda x: x.contiguous(), [grad_in, grad_out_, y])
grad_out_, y = map(lambda x: x.contiguous(), [grad_out_, y])
if (FAST_MHA_AVAILABLE and grad_out.dtype == torch.float16 and not ctx.causal
and ctx.mask_type == 'bk'):
# fast_multihead_attn from apex only works for fp16 for now.
# Apex overwrites grad_output, i.e. in-place.
# The first two flags (use_mask, heads) aren't used at all, can be set to whatever.
grad_in = additive_mask_softmax_dropout_backward(True, 1, grad_out_, y, dropout_mask,
ctx.dropout_prob)
else:
dropout_grads = torch._masked_scale(grad_out_, dropout_mask,
1.0 / (1.0 - ctx.dropout_prob))
grad_in = torch.empty_like(
y
) # torch.zeros is measurably slower, we'll zero y in the kernel
# fmt: off
_softmax_backward[grid_2d](
grad_in, dropout_grads, y,
grad_in.stride(0), grad_in.stride(1),
grad_out_.stride(0), grad_out_.stride(1),
y.stride(0), y.stride(1),
y.shape[2],
LOG=False,
CAUSAL=ctx.causal
)
# fmt: on
return grad_in.reshape_as(grad_out), None, None, None, None
def softmax_dropout(
x: torch.Tensor, p: float, mask: Optional[torch.Tensor] = None, causal: bool = False,
mask_type: str = 'qk'
) -> torch.Tensor:
if p == 0.0:
return softmax(x, mask=mask, mask_type=mask_type)
else:
return _softmax_dropout_dispatch(x, p, mask, causal, mask_type=mask_type)
def _softmax_dropout_dispatch(
x: torch.Tensor, p: float, mask: Optional[torch.Tensor], causal: bool = False,
mask_type: str = 'qk'
) -> torch.Tensor:
# Triton is used if
# - CUDA
# - there's enough data to make it faster than pytorch. This could change over time, Triton is improving
# - there was no previous failure
global _triton_registered_overflow
try:
if torch.cuda.is_available() and x.is_cuda and not _triton_registered_overflow:
return _softmax_dropout_triton.apply(x, p, mask, causal, mask_type)
except (triton.code_gen.OutOfResources, RuntimeError) as e:
# Catch cases where the current GPU does not have enough registers to hold a full tensor line
# fallback to PyTorch's implementation, which streams the tensor in and out
_triton_registered_overflow = True
logging.warning(
"Triton softmax kernel register spillover or invalid image caught."
"Deactivating this kernel, please file an issue int the xFormers repository"
)
logging.warning(e)
if mask is not None:
mask = mask.to(dtype=x.dtype)
if mask_type == 'qk':
x = x + mask
elif mask_type == 'bk':
x = x + rearrange(mask, '... k -> ... 1 k')
if causal:
x = x + torch.triu(torch.full_like(x, float("-inf")), diagonal=1)
return F.dropout(F.softmax(x, dim=-1, dtype=x.dtype), p)
class SoftmaxDropout(nn.Module):
def __init__(self, p: float) -> None:
super().__init__()
self.p = p
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False,
mask_type: str = 'qk') -> torch.Tensor:
p = self.p if self.training else 0.0
if not x.is_cuda:
if mask is not None:
mask = mask.to(dtype=x.dtype)
if mask_type == 'qk':
x = x + mask
elif mask_type == 'bk':
x = x + rearrange(mask, '... k -> ... 1 k')
if causal:
x = x + torch.triu(torch.full_like(x, float("-inf")), diagonal=1)
return F.dropout(F.softmax(x, dim=-1, dtype=x.dtype), self.p)
else:
return softmax_dropout(x, p, mask=mask, causal=causal, mask_type=mask_type)
| fly-master | src/ops/triton/softmax_dropout.py |
# Copied from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
import triton.language as tl
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
def get_depth(K):
return triton.next_power_of_2(K)
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["K"],
)
@triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])})
@triton.heuristics({'IS_FP16': lambda nargs: nargs['GradIn'].dtype == torch.float16})
@triton.jit
def _softmax_dropout_backward(
GradIn, GradOut, Out, DropoutMask, dropout_prob,
stride_bm, stride_bn,
stride_gm, stride_gn,
stride_om, stride_on,
stride_mm, stride_mn,
K,
CAUSAL: tl.constexpr,
DEPTH: tl.constexpr,
IS_FP16: tl.constexpr,
):
# fmt: on
"""
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, DEPTH)
# the memory address of all the elements that we want to load can be computed as follows
grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k
out_ptrs = Out + m * stride_om + n * stride_on + k
dropout_mask_ptrs = DropoutMask + m * stride_mm + n * stride_mn + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if CAUSAL:
io_mask = io_mask & (k <= n)
g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0))
o = tl.load(out_ptrs, mask=io_mask, other=float(0))
zero = float(0)
zero = zero.to(g.dtype)
# Causal - 2: enforce correctness over a couple of misloaded values
if CAUSAL:
g = tl.where(k > n, zero, g)
o = tl.where(k > n, zero, o)
dropout_mask = tl.load(dropout_mask_ptrs, mask=io_mask, other=float(0))
g = tl.where(dropout_mask != 0, g / (1 - dropout_prob), zero)
# Step 1: Compute the intermediate sum used for the gradient
s = tl.sum(g * o, 0)
# Step 2: Compute the gradients
grad_in = o * (g - s)
# write back to the input gradients
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k
tl.store(grad_in_ptrs, grad_in, mask=k < K)
| fly-master | src/ops/triton/k_softmax_dropout.py |
# Copied from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
from typing import Optional
import torch
import triton
from torch.cuda.amp import custom_bwd, custom_fwd
from einops import rearrange, repeat
from src.ops.triton.k_softmax import _softmax, _softmax_backward
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
_triton_registered_overflow = False
_triton_softmax_fp16_enabled = False # NOTE: PyTorch keeps softmax as fp32
class MaskType(str, Enum):
ADD = "add"
MUL = "mul"
# Helper to handle the SPMD launch grid and error cases
class _softmax_triton(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16 if _triton_softmax_fp16_enabled else None)
def forward(ctx, x, mask, log_outputs, causal, mask_type):
"""
Fused softmax implementation, using the Triton programming model.
This only supports a reduction over the last dimension for now
"""
# Handle 2D/3D tensors
x_ = x.unsqueeze(0) if x.ndim == 2 else x
x_ = x_.flatten(0, -3)
if not x_.is_contiguous():
x_ = x_.contiguous()
y = torch.empty_like(x_)
assert (
y.stride(2) == 1 and x_.stride(2) == 1
), f"{x.shape} - {x_.shape} - {x_.stride()}"
# SPMD launch grid
grid_2d = (
x_.shape[0],
x_.shape[1],
)
# enqueue GPU kernel
if mask is None:
# placeholder, will not be used
mask = x_
mask_type = None
else:
# Make sure that the mask is binary
assert mask.dtype == x.dtype, "An additive mask is requested"
if mask_type == 'bk':
mask = repeat(mask, 'b 1 1 s -> b h 1 s', h=x_.shape[0] // mask.shape[0])
mask = mask.flatten(0, -2).contiguous()
_softmax[grid_2d](
y,
x_,
mask,
y.stride(0),
y.stride(1),
x_.stride(0),
x_.stride(1),
mask.stride(0),
x_.shape[2],
LOG=log_outputs,
MASK_TYPE=mask_type,
CAUSAL=causal,
)
ctx.save_for_backward(y)
ctx.log_outputs = log_outputs
ctx.causal = causal
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
(out,) = ctx.saved_tensors
# Handle 2D/3D tensors
grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out
grad_out_ = grad_out_.flatten(0, -3)
# SPMD launch grid
grid_2d = (
grad_out_.shape[0],
grad_out_.shape[1],
)
grad_in = torch.empty_like(
out
) # torch.zeros is measurably slower, we'll zero out in the kernel
# Make sure that the tensor are contiguous
grad_in, grad_out_, out = map(lambda x: x.contiguous(), [grad_in, grad_out_, out])
# fmt: off
_softmax_backward[grid_2d](
grad_in, grad_out_, out,
grad_in.stride(0), grad_in.stride(1),
grad_out_.stride(0), grad_out_.stride(1),
out.stride(0), out.stride(1),
out.shape[2],
LOG=ctx.log_outputs,
CAUSAL=ctx.causal
)
# fmt: on
return grad_in.reshape_as(grad_out), None, None, None, None
def softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False,
mask_type: str = 'qk'
) -> torch.Tensor:
r"""Applies the Softmax function to an 3-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
.. warning: softmax is computed on the last dimension of the input tensor.
Args:
x: input tensor.
mask: optional mask, its application will be fused to the softmax computation if triton is used
causal: optional performance optimization, if triton is used and the attention is causal
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1] and sum to 1
"""
return _softmax_dispatch(x, log=False, mask=mask, causal=causal, mask_type=mask_type)
def log_softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False,
mask_type: str = 'qk'
) -> torch.Tensor:
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an 3-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
x: input tensor.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
"""
return _softmax_dispatch(x, log=True, mask=mask, causal=causal, mask_type=mask_type)
def _softmax_dispatch(
x: torch.Tensor, log: bool, mask: Optional[torch.Tensor], causal: bool = False,
mask_type: str = 'qk'
) -> torch.Tensor:
# Triton is used if
# - CUDA
# - there's enough data to make it faster than pytorch. This could change over time, Triton is improving
# - there was no previous failure
global _triton_registered_overflow
try:
if torch.cuda.is_available() and x.is_cuda and not _triton_registered_overflow:
return _softmax_triton.apply(x, mask, log, causal, mask_type)
except (triton.code_gen.OutOfResources, RuntimeError) as e:
# Catch cases where the current GPU does not have enough registers to hold a full tensor line
# fallback to PyTorch's implementation, which streams the tensor in and out
_triton_registered_overflow = True
logging.warning(
"Triton softmax kernel register spillover or invalid image caught."
"Deactivating this kernel, please file an issue int the xFormers repository"
)
logging.warning(e)
if mask is not None:
if mask_type == 'qk':
x = x + mask
elif mask_type == 'bk':
x = x + rearrange(mask, '... k -> ... 1 k')
if causal:
x = x + torch.triu(torch.full_like(x, float("-inf")), diagonal=1)
if log:
return torch.log_softmax(x, dim=-1)
else:
return torch.softmax(x, dim=-1)
| fly-master | src/ops/triton/softmax.py |
# Copied from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
import triton.language as tl
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
def get_depth(K):
return triton.next_power_of_2(K)
# autotune: Triton will test out these configurations, and automatically pick the fastest one.
# heuristic: add arguments to the kernel call automatically given some heuristics.
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["K"],
)
@triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])})
@triton.heuristics({'IS_FP16': lambda nargs: nargs['Y'].dtype == torch.float16})
@triton.jit
def _softmax(
Y, X, M,
stride_ym, stride_yn,
stride_xm, stride_xn,
stride_m,
K,
LOG: tl.constexpr,
MASK_TYPE: tl.constexpr,
CAUSAL: tl.constexpr,
DEPTH: tl.constexpr,
IS_FP16: tl.constexpr,
):
# fmt: om
"""
Fused softmax kernel over a 3d tensor.
The softmax is applied over the last dimension, meaning that this is equivalent to torch.softmax(tensor, dim=-1)
Note, if the last dimension is large, say 128K elements, the kernel compile time can shot up to many minutes when
the kernel is run for the first time.
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, DEPTH)
# the memory address of all the elements that we want to load can be computed as follows
x_ptrs = X + m * stride_xm + n * stride_xn + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if CAUSAL:
io_mask = io_mask & (k <= n)
x = tl.load(x_ptrs, mask=io_mask, other=float("-inf"))
# Causal - 2: enforce correctness over a couple of misloaded values
if CAUSAL:
off = float("-inf")
off = off.to(x.dtype)
x = tl.where(k > n, off, x)
if MASK_TYPE is not None:
if MASK_TYPE == 'qk':
mask_ptrs = M + n * stride_m + k
elif MASK_TYPE == 'bk':
mask_ptrs = M + m * stride_m + k
add_mask = tl.load(mask_ptrs, io_mask, other=float("-inf"))
x += add_mask
# compute numerically-stable softmax
z = x - tl.max(x, axis=0)
if IS_FP16:
# tl.exp() crashes on fp16 values
# See https://github.com/openai/triton/issues/241
z = z.to(tl.float32)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
if LOG:
y = z - tl.log(denom)
else:
y = num / denom
# write back to Y.
# we only write once, hence the "fused" softmax naming
y_ptrs = Y + m * stride_ym + n * stride_yn + k
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
tl.store(y_ptrs, y, mask=k < K)
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["K"],
)
@triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])})
@triton.heuristics({'IS_FP16': lambda nargs: nargs['GradIn'].dtype == torch.float16})
@triton.jit
def _softmax_backward(
GradIn, GradOut, Out,
stride_bm, stride_bn,
stride_gm, stride_gn,
stride_om, stride_on,
K,
LOG: tl.constexpr,
CAUSAL: tl.constexpr,
DEPTH: tl.constexpr,
IS_FP16: tl.constexpr,
):
# fmt: on
"""
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, DEPTH)
# the memory address of all the elements that we want to load can be computed as follows
grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k
out_ptrs = Out + m * stride_om + n * stride_on + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if CAUSAL:
io_mask = io_mask & (k <= n)
g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0))
o = tl.load(out_ptrs, mask=io_mask, other=float(0))
# Causal - 2: enforce correctness over a couple of misloaded values
if CAUSAL:
zero = float(0)
zero = zero.to(g.dtype)
g = tl.where(k > n, zero, g)
o = tl.where(k > n, zero, o)
if LOG:
s = tl.sum(g, 0)
if IS_FP16:
o = o.to(tl.float32)
grad_in = g - tl.exp(o) * s
else:
# Step 1: Compute the intermediate sum used for the gradient
s = tl.sum(g * o, 0)
# Step 2: Compute the gradients
grad_in = o * (g - s)
# write back to the input gradients
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k
tl.store(grad_in_ptrs, grad_in, mask=k < K)
| fly-master | src/ops/triton/k_softmax.py |
'''
File: generate_mapping.py
Description: Maps each concept (CUI) in UMLS to a structured entity (QID) in WikiData.
'''
from tqdm import tqdm
from pathlib import Path
import pandas as pd
import sys
import logging
from importlib import reload
import json
import os
import argparse
from bootleg.end2end.extract_mentions import extract_mentions
from bootleg.utils.parser.parser_utils import parse_boot_and_emm_args
from bootleg.utils.utils import load_yaml_file
from bootleg.run import run_model
from bootleg.end2end.bootleg_annotator import BootlegAnnotator
from utils import load_concepts, load_types, load_descriptions, save_mapping
def load_UMLS_data(in_file, semantic_network_in_file, out_file):
'''
Description: Load UMLS data and store in Pandas DataFrame.
Input:
in_file (Path): path to UMLS data store. Download and unzip from
https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html
semantic_network_in_file (Path): path to UMLS semantic network. Download and unzip from
https://lhncbc.nlm.nih.gov/semanticnetwork/
out_file (Path): path to store processed dataframe
Returns:
umls_df (pd.DataFrame): Pandas DataFrame with UMLS data
'''
if(os.path.exists(out_file)):
print(f"Loading UMLS data from {out_file}")
return pd.read_feather(out_file)
umls_df = load_concepts(in_file)
load_types(in_file, semantic_network_in_file, umls_df)
load_descriptions(in_file, umls_df)
print(f"Saving UMLS data to {out_file}")
umls_df.to_feather(out_file)
return umls_df
def generateMapping(umls_df):
'''
Description: We use Bootleg, an off-the-shelf entity linker, to map each concept (CUI) in
UMLS to a structured entity (QID) in WikiData. Please run "bash download_models_and_data.sh"
prior to executing this function.
Input:
umls_df (pd.DataFrame): UMLS data stored in a Pandas dataframe.
Generated using load_UMLS_data()
Returns:
'''
# Set logger
reload(logging)
logging.basicConfig(stream=sys.stdout, format="%(asctime)s %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
# Set file paths for data and model
root_dir = Path(".")
data_dir = root_dir / "data"
entity_dir = data_dir / "entity_db"
cand_map = entity_dir / "entity_mappings/alias2qids.json"
model_dir = root_dir / "models"
device = 0 #Set this to -1 if a GPU with at least 12Gb of memory is not available
# Save UMLS data in jsonlines format and extract mentions from UMLS titles
out_file = data_dir / "umls_data_bootleg.jsonl"
if(os.path.exists(out_file)==False):
umls_sents = [{'sentence': s['umls_title'], 'cui': s['umls_cui']} for i, s in umls_df.iterrows()]
with open(data_dir / "umls_data.jsonl", 'w') as f:
f.write('\n'.join(map(json.dumps, umls_sents)))
extract_mentions(data_dir / "umls_data.jsonl", out_file, cand_map, verbose=True)
# Set config arguments for entity linker
config_in_path = model_dir / "bootleg_uncased/bootleg_config.yaml"
config_args = load_yaml_file(config_in_path)
config_args["run_config"]["dataset_threads"] = 8
config_args["run_config"]["log_level"] = "info"
config_args["emmental"]["model_path"] = str(model_dir / "bootleg_uncased/bootleg_wiki.pth")
config_args["data_config"]["entity_dir"] = str(entity_dir)
config_args["data_config"]["alias_cand_map"] = "alias2qids.json"
config_args["data_config"]["data_dir"] = str(data_dir)
config_args["data_config"]["test_dataset"]["file"] = out_file.name
config_args["emmental"]["device"] = device
config_args = parse_boot_and_emm_args(config_args)
# Run entity linker
bootleg_label_file = "./bootleg-logs/bootleg_wiki/umls_data_bootleg/bootleg_wiki/bootleg_labels.jsonl"
if(os.path.exists(bootleg_label_file) == False):
bootleg_label_file, _ = run_model(mode="dump_preds", config=config_args)
# Save mapping
out_file = Path('./mapping.feather')
save_mapping(bootleg_label_file, umls_df, out_file)
print(f"Saved mapping to {out_file}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='generate_mapping.py')
parser.add_argument("--umls_data_dir", help="Path to UMLS data.", default="./umls_data/2017AA")
parser.add_argument("--umls_sem_net", help="Path to UMLS semantic network.", default="./umls_sem_net/2017AA")
args = parser.parse_args()
df = load_UMLS_data(Path(args.umls_data_dir), Path(args.umls_sem_net), Path('./umls_data.feather'))
generateMapping(df)
| medical-ned-integration-main | generate_mapping.py |
'''
File: utils.py
Description: Helper functions for generating mappings
'''
import gzip
from tqdm import tqdm
import pandas as pd
import os
import json
from rich import print
VALID_VOCABULARIES = ['CPT', 'FMA', 'GO', 'HGNC', 'HPO', 'ICD10', \
'ICD10CM', 'ICD9CM', 'MDR', 'MSH', 'MTH', 'NCBI', \
'NCI', 'NDDF', 'NDFRT', 'OMIM', 'RXNORM', 'SNOMEDCT_US']
def load_concepts(in_file):
'''
Description: Load UMLS concepts and titles and store in Pandas DataFrame.
Input:
in_file (Path): path to UMLS data store. Downloadable from
https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html
Returns:
umls_df (pd.DataFrame): Pandas DataFrame with UMLS concept identifiers and titles.
'''
print(f"Loading concepts from UMLS")
cuiToTitle = {}
for part in ['aa', 'ab']:
with gzip.open(in_file / "META" / f"MRCONSO.RRF.{part}.gz") as f:
for line in tqdm(f):
fields = line.decode().strip().split('|')
if(fields[1]!='ENG' or fields[11] not in VALID_VOCABULARIES): continue
cui = fields[0]
title = fields[14]
preferredForm = (fields[2]=='P' and fields[4]=='PF')
if(cui not in cuiToTitle or cuiToTitle[cui][1]==0):
cuiToTitle[cui] = (title, preferredForm)
df = pd.DataFrame({'umls_cui': list(cuiToTitle.keys()),
'umls_title': [cuiToTitle[c][0] for c in cuiToTitle]
})
print(f"Loaded {df.shape[0]} concepts from UMLS\n")
return df
def load_types(in_file, semantic_network_in_file, df):
'''
Description: Load UMLS types and store in Pandas DataFrame.
Input:
in_file (Path): path to UMLS data store. Downloadable from
https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html
semantic_network_in_file (Path): path to UMLS semantic network. Downloadable from
https://lhncbc.nlm.nih.gov/semanticnetwork/
umls_df (pd.DataFrame): Pandas DataFrame with UMLS concept identifiers and titles.
Will be updated to include types.
Returns:
None
'''
print(f"Loading types from UMLS")
typeToName = {}
with open(semantic_network_in_file / "SRDEF", 'r') as f:
for line in tqdm(f):
fields = line.strip().split('|')
typeToName[fields[1]] = fields[2]
cuiToType = {cui: [] for cui in df['umls_cui'].to_list()}
all_types = set()
with gzip.open(in_file / "META" / "MRSTY.RRF.gz") as f:
for line in tqdm(f):
fields = line.decode().strip().split('|')
cui = fields[0]
type_id = fields[1]
if cui not in cuiToType or type_id=='UnknownType' or typeToName[type_id] in cuiToType[cui]:
continue
cuiToType[cui].append(typeToName[type_id])
all_types.add(typeToName[type_id])
df['umls_types'] = df['umls_cui'].map(cuiToType)
print(f"Loaded {len(all_types)} types from UMLS\n")
def load_descriptions(in_file, df):
'''
Description: Load UMLS descriptions and store in Pandas DataFrame.
Input:
in_file (Path): path to UMLS data store. Downloadable from
https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html
umls_df (pd.DataFrame): Pandas DataFrame with UMLS concept identifiers, titles, and types.
Will be updated to include descriptions.
Returns:
None
'''
print(f"Loading definitions from UMLS")
cuiToDef = {cui: '' for cui in df['umls_cui'].to_list()}
with gzip.open(in_file / "META" / "MRDEF.RRF.gz") as f:
for line in f:
fields = line.decode().strip().split('|')
vocab = fields[4]
cui = fields[0]
desc = fields[5]
if(cui not in cuiToDef): continue
cuiToDef[cui] = desc
df['umls_defs'] = df['umls_cui'].map(cuiToDef)
print(f"Loaded {len(df[df['umls_defs'] != ''])} definitions from UMLS\n")
def save_mapping(bootleg_label_file, umls_df, out_file):
'''
Description: Save mapping as Pandas DataFrame.
Input:
bootleg_label_file (str): path to bootleg output file
umls_df (pd.DataFrame): Pandas DataFrame with UMLS concept identifiers, titles, and types.
out_file (Path): output filepath where mapping is saved
Returns:
None
'''
with open(bootleg_label_file, 'r') as f:
lines = f.read().splitlines()
bootleg_df = pd.json_normalize(pd.DataFrame(lines)[0].apply(json.loads))
bootleg_df['qids'] = bootleg_df['qids'].apply(lambda x: x[0] if len(x)>0 else None)
cuiToQid = dict(zip(bootleg_df.cui, bootleg_df.qids))
umls_df['wikidata_qid'] = umls_df['umls_cui'].map(cuiToQid)
umls_df.to_feather(out_file)
| medical-ned-integration-main | utils.py |
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.1"
__author__ = "Aaron Swartz ([email protected])"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
def charref(name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not UNICODE_SNOB and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(c):
if not UNICODE_SNOB and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(s):
s = s.group(1)
if s[0] == "#":
return charref(s[1:])
else: return entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(s):
return r_unescape.sub(replaceEntities, s)
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def optwrap(text):
"""Wrap all paragraphs in the provided text."""
if not BODY_WIDTH:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if para[0] != ' ' and para[0] != '-' and para[0] != '*':
for line in wrap(para, BODY_WIDTH):
result += line + "\n"
result += "\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_nest_count(style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / GOOGLE_LIST_INDENT
return nest_count
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class _html2text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
if out is None: self.out = self.outtextf
else: self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
if options.google_doc:
del unifiable_n[name2cp('nbsp')]
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if options.google_doc:
self.outtext = self.outtext.replace(' _place_holder;', ' ');
return self.outtext
def handle_charref(self, c):
self.o(charref(c), 1)
def handle_entityref(self, c):
self.o(entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and options.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o("_")
self.drop_white_space += 1
if bold:
self.o("**")
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o("**")
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o("_")
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if options.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if options.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.o(" \n\n<p>\n\n")
self.p()
self.o(" \n\n</p>\n\n")
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u']: self.o("_")
if tag in ['strong', 'b']: self.o("**")
if tag in ['del', 'strike']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if options.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not IGNORE_ANCHORS:
if start:
if has_key(attrs, 'href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.o("[")
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if a:
if INLINE_LINKS:
self.o("](" + a['href'] + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not IGNORE_IMAGES:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
if INLINE_LINKS:
self.o("")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("![")
self.o(alt)
self.o("]["+ str(attrs['count']) +"]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if options.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if options.google_doc:
nest_count = google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(options.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0: self.p_p = 1
def p(self): self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None: self.abbr_data += data
if not self.quiet:
if options.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
self.startpre = 0
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
bq += " "
data = data.replace("\n", "\n"+bq)
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
self.o(data, 1)
def unknown_decl(self, data): pass
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text_file(html, out=wrapwrite, baseurl=''):
h = _html2text(out, baseurl)
h.feed(html)
h.feed("")
return h.close()
def html2text(html, baseurl=''):
return optwrap(html2text_file(html, None, baseurl))
class Storage: pass
options = Storage()
options.google_doc = False
options.ul_item_mark = '*'
if __name__ == "__main__":
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=78, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevent when -g is specified as well")
(options, args) = p.parse_args()
# handle options
if options.ul_style_dash:
options.ul_item_mark = '-'
else:
options.ul_item_mark = '*'
BODY_WIDTH = options.body_width
GOOGLE_LIST_INDENT = options.list_indent
# process input
if len(args) > 0:
file_ = args[0]
encoding = None
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
text = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, text)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
data = text.decode(encoding, "ignore")
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
data = data.decode(encoding, "ignore")
else:
data = sys.stdin.read()
wrapwrite(html2text(data, baseurl))
| bazaar-master | condor/shared/html2text.py |
#!/usr/bin/env python
import botocore.session
import errno
import getopt
import os
import pprint
import shutil
import socket
import subprocess
import sys
import time
import urlparse
import urltools
# read env_local.sh
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.rstrip().partition("=")
os.environ[key] = value
proc.communicate()
source_env_local()
EC2_INSTANCE_TYPE = os.environ.get('EC2_INSTANCE_TYPE')
if not EC2_INSTANCE_TYPE:
print('EC2_INSTANCE_TYPE is not set.')
exit(1)
AMI = 'ami-d05e75b8'
USERNAME = 'ubuntu'
PUBLIC_KEY = 'ssh/bazaar.key.pub'
REGION = 'us-east-1'
class EC2Client:
def __init__(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('ec2', region_name=REGION)
def import_key_pair(self):
with open(PUBLIC_KEY, "rb") as pubKeyFile:
f = pubKeyFile.read()
bytes = bytearray(f)
response = self.client.import_key_pair(
KeyName='bazaar',
PublicKeyMaterial=bytes
)
def delete_key_pair(self):
response = self.client.delete_key_pair(
KeyName='bazaar'
)
def create_security_group(self):
# check if security group exists already
try:
response = self.client.describe_security_groups(
GroupNames=['bazaar-group'],
)
except:
#if not response['SecurityGroups']:
print("Creating security group bazaar-group")
response = self.client.create_security_group(
GroupName='bazaar-group',
Description='Security Group enabling SSH for DeepDive\'s Bazaar',
)
#response.authorize('tcp', 22, 22, '0.0.0.0/0')
response = self.client.authorize_security_group_ingress(
GroupName='bazaar-group',
IpProtocol='tcp',
FromPort=22,
ToPort=22,
CidrIp='0.0.0.0/0')
print(response)
def run_instances(self, num=1):
response = self.client.run_instances(
ImageId=AMI, #'ami-d05e75b8',
MinCount=int(num),
MaxCount=int(num),
KeyName='bazaar',
SecurityGroups=[ 'bazaar-group' ],
InstanceType=EC2_INSTANCE_TYPE, #'m3.large',
BlockDeviceMappings=[
{
'VirtualName': 'ephemeral0',
'DeviceName': '/dev/xvdh',
},
],
Monitoring={
'Enabled': False
},
)
with open('.state/INSTANCE_IDS', 'w') as f:
for inst in response['Instances']:
f.write(inst['InstanceId'] + '\n')
with open('.state/CLOUD', 'w') as f:
f.write('ec-2')
def read_instance_ids(self):
ids = []
with open('.state/INSTANCE_IDS', 'r') as f:
for line in f:
ids.append(line.rstrip())
return ids
def read_hosts(self):
hs = []
with open('.state/HOSTS', 'r') as f:
for line in f:
hs.append(line.rstrip())
return hs
def wait_for_public_dns(self):
sys.stdout.write('Waiting for dns')
sys.stdout.flush()
ids = self.read_instance_ids()
response = None
while True:
response = self.client.describe_instances(
InstanceIds=ids
)
num_pending = 0
for inst in response['Reservations'][0]['Instances']:
if inst['State']['Name'] == 'pending':
num_pending = num_pending + 1
if num_pending == 0:
break
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(2)
print('')
with open('.state/HOSTS', 'w') as f:
for inst in response['Reservations'][0]['Instances']:
f.write(USERNAME + '@' + inst['PublicDnsName'] + ':22\n')
with open('.state/DIRS', 'w') as f:
for inst in response['Reservations'][0]['Instances']:
f.write('/home/' + USERNAME + '\n')
def wait_for_ssh(self, hname, port):
count = 0
while not is_port_reachable(hname, port):
count = count + 1
if count > 120:
print('Timed out waiting for role instance status.')
exit(1)
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(2)
def wait_for_ssh_all(self):
sys.stdout.write('Waiting for ssh')
sys.stdout.flush()
hnames = self.read_hosts()
for h in hnames:
ph = urltools.split_netloc(h)
hn = ph[2]
if ph[3] =='':
port = 22
else:
port = int(ph[3])
self.wait_for_ssh(hn, port)
def terminate_instances(self):
ids = self.read_instance_ids()
response = self.client.terminate_instances(
InstanceIds=ids
)
shutil.rmtree(".state")
def create_state_dir(self):
try:
os.makedirs('.state')
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir('.state'):
print("Found existing .state dir. Please terminate instances first.")
exit(1)
else: raise
def is_port_reachable(hname, port):
reachable = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((hname, port))
reachable = True
except socket.error as e:
reachable = False
s.close()
return reachable
def launch(argv):
num_instances = 1
try:
opts, args = getopt.getopt(argv,"n:",[])
except getopt.GetoptError:
#print " -n <numinstances>"
sys.exit(2)
for opt, arg in opts:
if opt == '-n':
num_instances = arg
print('Launching ' + str(num_instances) + ' instances on ec2')
client = EC2Client()
client.delete_key_pair()
client.import_key_pair()
client.create_state_dir()
client.create_security_group()
client.run_instances(num_instances)
client.wait_for_public_dns()
client.wait_for_ssh_all()
#print('Note: it might still take a few minutes until instances become accessible.')
def terminate():
client = EC2Client()
client.delete_key_pair()
client.import_key_pair()
client.terminate_instances()
def usage():
print("Usage: ec2-client.py launch|terminate [OPTIONS]")
exit(1)
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.partition("=")
os.environ[key] = value
proc.communicate()
def main(argv):
#source_env_local()
if len(argv) < 1:
usage()
cmd = argv[0]
if cmd == 'launch':
launch(argv[1:])
elif cmd == 'terminate':
terminate()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:])
| bazaar-master | distribute/ec2-client.py |
#!/usr/bin/env python
from azure import *
from azure.servicemanagement import *
import errno
import getopt
import os
import shutil
import subprocess
import sys
import time
# read env_local.sh
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.rstrip().partition("=")
os.environ[key] = value
proc.communicate()
source_env_local()
# make sure we have required parameters
AZURE_SUBSCRIPTION_ID = os.environ.get('AZURE_SUBSCRIPTION_ID')
if not AZURE_SUBSCRIPTION_ID:
print('AZURE_SUBSCRIPTION_ID is not set.')
exit(1)
AZURE_SERVICE_NAME = os.environ.get('AZURE_SERVICE_NAME')
if not AZURE_SERVICE_NAME:
print('AZURE_SERVICE_NAME is not set.')
exit(1)
AZURE_ROLE_SIZE = os.environ.get('AZURE_ROLE_SIZE')
if not AZURE_ROLE_SIZE:
print('AZURE_ROLE_SIZE is not set.')
exit(1)
AZURE_STORAGE_ACCOUNT = os.environ.get('AZURE_STORAGE_ACCOUNT')
if not AZURE_STORAGE_ACCOUNT:
print('AZURE_STORAGE_ACCOUNT is not set.')
exit(1)
# management certificate
AZURE_MGMT_CERT = 'ssh/mycert.pem'
# service certificate
AZURE_SERVICE_PEM = 'ssh/bazaar.pem'
# vm settings
linux_image_name = 'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB'
container_name = 'bazaarctr'
location = 'West US'
class AzureClient:
def __init__(self):
self.sms = ServiceManagementService(AZURE_SUBSCRIPTION_ID, AZURE_MGMT_CERT)
def service_exists(self):
try:
props = self.sms.get_hosted_service_properties(AZURE_SERVICE_NAME)
return props is not None
except:
return False
def create_hosted_service(self):
if not self.service_exists():
print('Creating service ' + AZURE_SERVICE_NAME)
result = self.sms.create_hosted_service(
AZURE_SERVICE_NAME,
AZURE_SERVICE_NAME + 'label',
AZURE_SERVICE_NAME + 'description',
location)
self._wait_for_async(result.request_id)
self.create_service_certificate()
def list_services(self):
result = self.sms.list_hosted_services()
for hosted_service in result:
print('- Service name: ' + hosted_service.service_name)
print(' Management URL: ' + hosted_service.url)
print(' Location: ' + hosted_service.hosted_service_properties.location)
def delete_service():
self.sms.delete_hosted_service(AZURE_SERVICE_NAME)
def delete_deployment():
self.sms.delete_deployment('myhostedservice', 'v1')
def _linux_role(self, role_name, subnet_name=None, port='22'):
container_name = 'bazaarctr' + role_name
host_name = 'hn' + role_name
system = self._linux_config(host_name)
os_hd = self._os_hd(linux_image_name,
container_name,
role_name + '.vhd')
network = self._network_config(subnet_name, port)
return (system, os_hd, network)
def get_fingerprint(self):
import hashlib
with open (AZURE_SERVICE_PEM, "r") as myfile:
data = myfile.readlines()
lines = data[1:-1]
all = ''.join([x.rstrip() for x in lines])
key = base64.b64decode(all.encode('ascii'))
fp = hashlib.sha1(key).hexdigest()
return fp.upper()
def _linux_config(self, hostname):
SERVICE_CERT_THUMBPRINT = self.get_fingerprint()
pk = PublicKey(SERVICE_CERT_THUMBPRINT, '/home/bazaar/.ssh/authorized_keys')
pair = KeyPair(SERVICE_CERT_THUMBPRINT, '/home/bazaar/.ssh/id_rsa')
system = LinuxConfigurationSet(hostname, 'bazaar', 'u7;9jbp!', True)
system.ssh.public_keys.public_keys.append(pk)
system.ssh.key_pairs.key_pairs.append(pair)
system.disable_ssh_password_authentication = True
return system
def _network_config(self, subnet_name=None, port='22'):
network = ConfigurationSet()
network.configuration_set_type = 'NetworkConfiguration'
network.input_endpoints.input_endpoints.append(
ConfigurationSetInputEndpoint('SSH', 'tcp', port, '22'))
if subnet_name:
network.subnet_names.append(subnet_name)
return network
def _os_hd(self, image_name, target_container_name, target_blob_name):
media_link = self._make_blob_url(
AZURE_STORAGE_ACCOUNT,
target_container_name, target_blob_name)
os_hd = OSVirtualHardDisk(image_name, media_link,
disk_label=target_blob_name)
return os_hd
def _make_blob_url(self, storage_account_name, container_name, blob_name):
return 'http://{0}.blob.core.windows.net/{1}/{2}'.format(
storage_account_name, container_name, blob_name)
def create_storage(self):
name = AZURE_STORAGE_ACCOUNT
label = 'mystorageaccount'
location = 'West US'
desc = 'My storage account description.'
result = self.sms.create_storage_account(name, desc, label, location=location)
self._wait_for_async(result.request_id)
def storage_account_exists(self, name):
try:
props = self.sms.get_storage_account_properties(name)
return props is not None
except:
return False
def list_storage(self):
result = self.sms.list_storage_accounts()
for account in result:
print('Service name: ' + account.service_name)
print('Location: ' + account.storage_service_properties.location)
print('')
def delete_storage(self):
self.sms.delete_storage_account(AZURE_STORAGE_ACCOUNT)
def list_role_sizes(self):
result = self.sms.list_role_sizes()
for rs in result:
print('Name: ' + rs.name)
def _wait_for_async(self, request_id):
try:
self.sms.wait_for_operation_status(request_id, timeout=600)
except azure.WindowsAzureAsyncOperationError as e:
from pprint import pprint
pprint (vars(e.result.error))
def _wait_for_deployment(self, service_name, deployment_name,
status='Running'):
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while props.status != status:
count = count + 1
if count > 120:
self.assertTrue(
False, 'Timed out waiting for deployment status.')
time.sleep(5)
props = self.sms.get_deployment_by_name(
service_name, deployment_name)
def _wait_for_role(self, service_name, deployment_name, role_instance_name,
status='ReadyRole'):
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while self._get_role_instance_status(props, role_instance_name) != status:
count = count + 1
if count > 120:
self.assertTrue(
False, 'Timed out waiting for role instance status.')
time.sleep(5)
props = self.sms.get_deployment_by_name(
service_name, deployment_name)
def _get_role_instance_status(self, deployment, role_instance_name):
for role_instance in deployment.role_instance_list:
if role_instance.instance_name == role_instance_name:
return role_instance.instance_status
return None
def delete_hosted_service(self):
print('Terminating service')
try:
self.sms.delete_hosted_service(AZURE_SERVICE_NAME, complete=True)
except:
pass
if os.path.exists('.state'):
shutil.rmtree('.state')
def create_state_dir(self):
try:
os.makedirs('.state')
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir('.state'):
print("Found existing .state dir. Please terminate instances first.")
exit(1)
else: raise
def list_os_images_public(self):
result = self.sms.list_os_images()
for img in result:
print(img.name)
def create_service_certificate(self):
with open(AZURE_SERVICE_PEM, "rb") as bfile:
cert_data = base64.b64encode(bfile.read()).decode()
cert_format = 'pfx'
cert_password = ''
cert_res = self.sms.add_service_certificate(service_name=AZURE_SERVICE_NAME,
data=cert_data,
certificate_format=cert_format,
password=cert_password)
self._wait_for_async(cert_res.request_id)
def create_deployment_and_roles(self, num_machines = 1):
deployment_name = AZURE_SERVICE_NAME
# one role for each machine
roles = []
for i in range(0, num_machines):
roles.append(AZURE_SERVICE_NAME + str(i))
system, os_hd, network = self._linux_role(roles[0], port='2000')
result = self.sms.create_virtual_machine_deployment(
AZURE_SERVICE_NAME, deployment_name, 'production',
deployment_name + 'label', roles[0], system, os_hd,
network, role_size=AZURE_ROLE_SIZE)
self._wait_for_async(result.request_id)
self._wait_for_deployment(AZURE_SERVICE_NAME, deployment_name)
self._wait_for_role(AZURE_SERVICE_NAME, deployment_name, roles[0])
for i in range(1, len(roles)):
system, os_hd, network = self._linux_role(roles[i], port=str(2000+i))
result = self.sms.add_role(AZURE_SERVICE_NAME, deployment_name, roles[i],
system, os_hd, network, role_size=AZURE_ROLE_SIZE)
self._wait_for_async(result.request_id)
self._wait_for_role(AZURE_SERVICE_NAME, deployment_name, roles[i])
# write to .state
with open('.state/HOSTS', 'w') as f:
for i in range(0, len(roles)):
f.write('bazaar@' + AZURE_SERVICE_NAME + '.cloudapp.net:' + str(2000+i) + '\n')
with open('.state/DIRS', 'w') as f:
for i in range(0, len(roles)):
f.write('/mnt\n')
with open('.state/CLOUD', 'w') as f:
f.write('azure')
def launch(argv):
num_instances = 1
try:
opts, args = getopt.getopt(argv,"n:",[])
except getopt.GetoptError:
#print " -n <numinstances>"
sys.exit(2)
for opt, arg in opts:
if opt == '-n':
num_instances = int(arg)
print('Launching ' + str(num_instances) + ' instances on Azure')
client = AzureClient()
client.create_state_dir()
client.create_hosted_service()
if not client.storage_account_exists(AZURE_STORAGE_ACCOUNT):
client.create_storage()
client.create_deployment_and_roles(num_instances)
def terminate():
client = AzureClient()
client.delete_hosted_service()
# We don't delete storage account, because it takes a long time to re-create.
#client.delete_storage()
def usage():
print("Usage: azure-client.py launch|terminate|role_sizes [OPTIONS]")
exit(1)
def main(argv):
if len(argv) < 1:
usage()
cmd = argv[0]
if cmd == 'launch':
launch(argv[1:])
elif cmd == 'terminate':
terminate()
elif cmd == 'role_sizes':
client = AzureClient()
client.list_role_sizes()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:])
| bazaar-master | distribute/azure-client.py |
from fabric.api import *
from fabric.tasks import execute
import os
import re
def get_platform():
with hide('everything'):
return run("uname -s")
def is_installed(cmd):
with settings(warn_only=True):
with hide('everything'):
result = run('command -v ' + cmd)
return result.return_code == 0
@task
@hosts('localhost')
def launch(cloud, num):
if cloud == "azure":
local('./azure-client.py launch -n ' + num)
if cloud == "ec-2" or cloud == "ec2":
local('./ec2-client.py launch -n ' + num)
@task
@parallel
def install():
ensure_hosts()
platform = get_platform()
put(local_path='installer/install-parser', remote_path='~/install-parser')
r = run('cd ~; chmod +x ~/install-parser; ./install-parser')
if not r.return_code == 0:
print('ERROR. Aborting')
sys.exit()
@task
@parallel
def restage():
ensure_hosts()
with prefix('export PATH=~/jdk1.8.0_45/bin:$PATH'):
r = run('cd ~/parser && sbt/sbt stage')
if not r.return_code == 0:
print('ERROR. Aborting')
sys.exit()
@task
@hosts('localhost')
def split(input='test/input.json',batch_size=1000):
local('rm -rf segments')
local('mkdir -p segments')
local('cat ' + input + ' | shuf | split -a 5 -l ' + str(batch_size) + ' - segments/')
def get_remote_write_dir():
if read_cloud() == 'ec-2':
directory = '/mnt'
else:
directory = env.directories[env.host_string]
return directory
@task
@parallel
def copy_to_servers():
ensure_hosts()
directory = get_remote_write_dir()
user = run('whoami')
run('sudo chown ' + user + ' ' + directory)
run('rm -rf ' + directory + '/segments')
run('mkdir -p ' + directory + '/segments')
num_machines = len(env.all_hosts)
machine = env.all_hosts.index(env.host_string)
output = local('find segments -type f', capture=True)
files = output.split('\n')
for f in files:
file_num = hash(f)
file_machine = file_num % num_machines
if file_machine == machine:
print "put %s on machine %d" % (f, file_machine)
put(local_path=f, remote_path=directory + '/segments')
@task
@runs_once
def copy(input='test/input.json',batch_size=1000):
execute(split, input=input, batch_size=batch_size)
execute(copy_to_servers)
@task
@parallel
def echo():
ensure_hosts()
run('echo "$HOSTNAME"')
@task
@parallel
def parse(parallelism=2, key_id='item_id', content_id='content'):
ensure_hosts()
directory = get_remote_write_dir()
with prefix('export PATH=~/jdk1.8.0_45/bin:$PATH'):
run('find ' + directory + '/segments -name "*" -type f 2>/dev/null -print0 | ' +
'(cd ~/parser && xargs -0 -P ' + str(parallelism) + ' -L1 bash -c \'./run.sh -i json -k ' +
key_id + ' -v ' + content_id + ' -f \"$0\"\')')
@task
@parallel
def get_registers():
directory = get_remote_write_dir()
registers = run('find ' + directory + '/segments -name "*.reg" -type f 2>/dev/null -print0 | xargs -0 -L1 head', quiet=True)
return [reg.strip() for reg in registers.split('\n')]
@task
@hosts('localhost')
def get_status():
# Get total segments count
total = len(filter(lambda f : re.search(r'\..*$', f) is None, os.listdir('segments')))
# Get registers
results = execute(get_registers)
# Parse registers & save status report
with open('parse_status.txt', 'wb') as f:
completed = 0
pending = 0
for server_name, registers in results.iteritems():
for reg in registers:
if len(reg.strip()) == 0:
continue
seg, status_code = reg.split(':')
status_code = int(status_code)
completed += status_code
pending += 1 - status_code
status = "Completed" if status_code == 1 else "Pending"
f.write("%s : %s [node=%s]\n" % (seg, status, server_name))
percent_done = 100*(float(completed)/total) if total > 0 else 0.0
print "\nStatus:"
print "Completed segments: %s / %s (%d%%)" % (completed, total, percent_done)
print "Currently processing: %s" % pending
print "Detailed report written to parse_status.txt"
@task
@parallel
def clear_for_reparse():
ensure_hosts()
directory = get_remote_write_dir()
run('rm -rf ' + directory + '/segments/*.parsed')
run('rm -rf ' + directory + '/segments/*.failed')
@task
@parallel
def collect_from_nodes():
ensure_hosts()
directory = get_remote_write_dir()
# collect all files ending in .parsed and .failed
output = run('find ' + directory + '/segments/ -name "*.*" -type f')
if output == '':
print('Warning: No result segments on node')
else:
files = output.rstrip().split('\r\n')
for f in files:
path = f
get(local_path='segments', remote_path=path)
@task
@hosts('localhost')
def cat_result():
local('rm -f result')
local('find ./segments -name "*.parsed" -type f -print0 | xargs -0 cat >>result')
print('Done. You can now load the result into your database.')
@task
@runs_once
def collect():
execute(collect_from_nodes)
execute(cat_result)
def num_lines(filepath):
with open(filepath, 'rb') as f:
for i,l in enumerate(f):
pass
return i+1
# Large batch sizes seem to cause memory errors...
MAX_BATCH_SIZE=5000
@task
@hosts('localhost')
def copy_parse_collect(input=None, batch_size=None, parallelism=2, key_id='item_id', content_id='content'):
"""
Wrapper function to split and copy file to servers, parse, and collect
If batch_size is None, it will be automatically calculated based on number of machines
and specified parallelism
"""
if input is None:
print('Please specify input file to parse.')
exit(0)
ensure_hosts()
num_machines = num_lines('.state/HOSTS')
print('Preparing to run on %s machines with PARALLELISM=%s' % (num_machines, parallelism))
if batch_size is None:
batch_size = min(num_lines(input) / (num_machines * int(parallelism)), MAX_BATCH_SIZE)
# Copy the files to the remote machines
execute(copy, input=input, batch_size=batch_size)
# Sometimes coreNLP doesn't download- restage here to ensure compiled
execute(restage)
# Parse
execute(parse, parallelism=parallelism, key_id=key_id, content_id=content_id)
# Collect
execute(collect)
execute(get_status)
@task
@hosts('localhost')
def terminate():
ensure_hosts()
cloud = read_cloud()
if cloud == 'azure':
local('./azure-client.py terminate')
elif cloud == 'ec-2':
local('./ec2-client.py terminate')
else:
print('Unknown cloud: ' + cloud)
exit(1)
def read_cloud():
if not os.path.isfile('.state/CLOUD'):
print('Could not find .state/CLOUD. Did you launch your machines already?')
exit(1)
return open('.state/CLOUD', 'r').readlines()[0].rstrip()
def read_hosts():
if os.path.isfile('.state/HOSTS'):
env.hosts = open('.state/HOSTS', 'r').readlines()
env.user = "ubuntu"
env.key_filename = "./ssh/bazaar.key"
dirs = open('.state/DIRS', 'r').readlines()
env.directories = {}
for i in range(0, len(dirs)):
env.directories[env.hosts[i].rstrip()] = dirs[i].rstrip()
else:
env.hosts = []
def ensure_hosts():
if not os.path.isfile('.state/HOSTS'):
print('Could not find .state/HOSTS. Did you launch your machines already?')
exit(1)
read_hosts()
| bazaar-master | distribute/fabfile.py |
#!/usr/bin/env python
from pyhocon import ConfigFactory
import json
import psycopg2
import psycopg2.extras
import sys
conf = ConfigFactory.parse_file('../view.conf')
conf_annotations = conf.get_list('view.annotations')
def write_annotations():
# write extractions to json file
dbconf = conf.get('view.db.default')
conn_string = "host='%s' dbname='%s' user='%s' password='%s'" % (
dbconf.get('host'),
dbconf.get('dbname'),
dbconf.get('user'),
dbconf.get('password'))
conn = psycopg2.connect(conn_string)
for ann in conf_annotations:
with open('../' + ann.get('input'), 'w') as w:
cursor = conn.cursor('ann_cursor', cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(ann.get('sql.query'))
for row in cursor:
#print(row)
# TODO: must write into the following format
# each row:
# {"range":{"type":"sentenceTokenSpan","doc_id":"doc123","sentNum":0,"f":3,"t":4},"target":{"entity":"something"}}
# save in file using w.write
obj = {"id":row[0], "range":{"type":"sentenceTokenSpan","doc_id":row[1],"sentNum":0,"f":row[2],"t":int(row[3])},"target":{"entity":row[4]}}
w.write(json.dumps(obj))
w.write('\n')
write_annotations()
| bazaar-master | view/util/fetch-annotations.py |
#!/usr/bin/env python
from elasticsearch import Elasticsearch
import json
INPUT = "../data/sentences.json"
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'dd'
TYPE_NAME = 'docs'
N = 1000
es = Elasticsearch(hosts = [ES_HOST])
es.delete_by_query(index = INDEX_NAME, body = {
"query": {
"match_all": {}
}
})
with open(INPUT, 'r') as f:
bulk_data = []
for line in f:
src = json.loads(line)
id = src['doc_id'] + '__' + src['sent_id']
content = ' '.join(src['words'])
op_dict = {
"index": {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_id": id
}
}
data_dict = {
"id": id,
"content": content
}
bulk_data.append(op_dict)
bulk_data.append(data_dict)
if len(bulk_data) > N:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
bulk_data = []
if len(bulk_data) > 0:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
es.indices.refresh(index = INDEX_NAME)
#if es.indices.exists(INDEX_NAME):
# res = es.indices.delete(index = INDEX_NAME)
#
#request_body = {
# "settings" : {
# "number_of_shards": 1,
# "number_of_replicas": 0
# }
#}
#
#print("creating '%s' index..." % (INDEX_NAME))
#res = es.indices.create(index = INDEX_NAME, body = request_body, ignore=400)
#print("bulk indexing...")
#res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = True)
# sanity check
#res = es.search(index = INDEX_NAME, size=2, body={"query": {"match_all": {}}})
#print(" response: '%s'" % (res))
#print("results:")
#for hit in res['hits']['hits']:
# print(hit["_source"])
| bazaar-master | view/util/index_docs.py |
#!/usr/bin/env python
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'view'
TYPE_ANNOTATORS_NAME = 'annotators'
TYPE_ANNOTATIONS_NAME = 'annotations'
N = 1000
from pyhocon import ConfigFactory
from elasticsearch import Elasticsearch
import json
import sys
conf = ConfigFactory.parse_file('../view.conf')
conf_annotations = conf.get_list('view.annotations')
es = Elasticsearch(hosts = [ES_HOST])
# create a small table that only contains the names of all available extractors
def index_annotators():
es.delete_by_query(index = INDEX_NAME, doc_type = TYPE_ANNOTATORS_NAME, body = {
"query": {
"match_all": {}
}
})
for ann in conf_annotations:
es.index(index = INDEX_NAME, doc_type = TYPE_ANNOTATORS_NAME, body = {
"name" : ann.get('name')
}, refresh = False)
es.indices.refresh(index = INDEX_NAME)
# create a large table that contains all extractions
def index_annotations():
es.delete_by_query(index = INDEX_NAME, doc_type = TYPE_ANNOTATIONS_NAME, body = {
"query": {
"match_all": {}
}
})
for ann in conf_annotations:
# read from file
# bulk index docs
bulk_data = []
for l in open('../' + ann.get('input')):
o = json.loads(l)
# {"id": "12", "range":{"type":"sentenceTokenSpan","doc_id":"doc123","sentNum":0,"f":3,"t":4},"target":{"entity":"something"}}
o['attribute'] = ann.get('name')
op_dict = {
"index": {
"_index": INDEX_NAME,
"_type": TYPE_ANNOTATIONS_NAME,
"_id": o['id'],
"_parent": o['range']['doc_id']
}
}
#data_dict = {
# "id": id,
# "content": content,
# "tokenOffsets": tokenOffsets
#}
#o['content'] = o[u'text']
data_dict = o
#print(op_dict)
#print(data_dict)
bulk_data.append(op_dict)
bulk_data.append(data_dict)
if len(bulk_data) > N:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
bulk_data = []
if len(bulk_data) > 0:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
es.indices.refresh(index = INDEX_NAME)
index_annotators()
index_annotations()
| bazaar-master | view/util/refresh-annotations.py |
#!/usr/bin/env python
# Author: Zifei Shan ([email protected])
''' This file construct a sentence table from ann.* files generated from Pipe project.
Example usage:
python generate_sentence_table.py DIRECTORY/OF/ANN/ > output_sentences.tsv
The generated sentence table follow the format below:
CREATE TABLE sentences (
doc_id text,
sent_id integer,
wordidxs integer[],
words text[],
poses text[],
ners text[],
lemmas text[],
dep_tuples text[], -- Triplet format. e.g.: "1 dep 0"
sentence_id text
);
'''
import sys, json
# This file can accept an argument: the folder that contains ann.*
# If not specified, use the current directory.
if len(sys.argv) == 1:
basedir = ''
else:
basedir = sys.argv[1] + '/'
# Helper functions
def list2TSVarray(a_list, quote=True):
'''Convert a list to a string that can be used in a TSV column and intepreted as
an array by the PostreSQL COPY FROM command.
If 'quote' is True, then double quote the string representation of the
elements of the list, and escape double quotes and backslashes.
'''
if a_list is None:
return '\\N'
if quote:
for index in range(len(a_list)):
if "\\" in unicode(a_list[index]):
# Replace '\' with '\\\\"' to be accepted by COPY FROM
a_list[index] = unicode(a_list[index]).replace("\\", "\\\\\\\\")
# This must happen the previous substitution
if "\"" in unicode(a_list[index]):
# Replace '"' with '\\"' to be accepted by COPY FROM
a_list[index] = unicode(a_list[index]).replace("\"", "\\\\\"")
string = ",".join(list(map(lambda x: "\"" + unicode(x) + "\"", a_list)))
else:
string = ",".join(list(map(lambda x: unicode(x), a_list)))
return "{" + string + "}"
def open_file(fname):
'''
Opens a file, if not found, return None.
'''
try:
return open(fname)
except:
return None
def read_js_line(fp):
'''
Return None if file is not open. Otherwise read a line from file.
If '' returned, EOF is found.
'''
if fp == None:
return None
s = fp.readline()
if s == '':
return ''
else:
return json.loads(s)
def escape_none(s):
'''
Just escaping a None into psql-friendly format
'''
if s is None:
return '\\N'
return unicode(s).encode('utf-8')
def findTokenOffset(token_offsets, sent_offset):
'''
Construct sent_token_offsets
'''
start = min(i for i in range(len(token_offsets)) if token_offsets[i][0] == sent_offset[0])
end = max(i for i in range(len(token_offsets)) if token_offsets[i][1] == sent_offset[1]) + 1
return start, end
# ----------- Main function -------------
# Assume fixed filenames
fdoc_id = open_file(basedir + 'ann.id')
flemma = open_file(basedir + 'ann.lemmas')
fpos = open_file(basedir + 'ann.poss')
fner = open_file(basedir + 'ann.nerTags')
fsent_offset = open_file(basedir + 'ann.sentenceOffsets')
fsent_token_offset = open_file(basedir + 'ann.sentenceTokenOffsets')
ftext = open_file(basedir + 'ann.text')
ftoken_offset = open_file(basedir + 'ann.tokenOffsets')
fsent_deps = open_file(basedir + 'ann.sentenceDependencies')
while True:
doc_id = read_js_line(fdoc_id)
lemmas = read_js_line(flemma)
poss = read_js_line(fpos)
ners = read_js_line(fner)
sent_offsets = read_js_line(fsent_offset)
# sent_token_offsets = read_js_line(fsent_token_offset)
text = read_js_line(ftext)
token_offsets = read_js_line(ftoken_offset)
sent_deps = read_js_line(fsent_deps)
if any(x == '' for x in [doc_id, lemmas, poss, sent_offsets, \
text, token_offsets]):
break
sent_token_offsets = [ findTokenOffset(token_offsets, x) for x in sent_offsets]
# loop through each sentence
sent_words = [text[o[0] : o[1]] for o in token_offsets]
# print 'WORDS:', sent_words
for sent_id in range(len(sent_token_offsets)):
sent_from, sent_to = sent_token_offsets[sent_id]
sentence_id = unicode(doc_id) + '_' + unicode(sent_id)
if sent_deps is not None:
# e.g.: [[{"name":"det","from":1,"to":0}],[{"name":"advmod","from":1,"to":0},{"name":"advmod","from":1,"to":2}]]
this_sent_deps = ['%d %s %d' % (d['from'], d['name'], d['to']) for d in sent_deps[sent_id]]
print '\t'.join([escape_none(x) for x in [ \
doc_id, \
sent_id, \
list2TSVarray([x for x in range(sent_to - sent_from)]), \
list2TSVarray( sent_words[ sent_from : sent_to] ) if sent_words is not None else None, \
list2TSVarray( poss[ sent_from : sent_to]) if poss is not None else None, \
list2TSVarray( ners[ sent_from : sent_to]) if ners is not None else None, \
list2TSVarray( lemmas[ sent_from : sent_to]) if lemmas is not None else None, \
list2TSVarray( this_sent_deps ) if sent_deps is not None else None, \
sentence_id \
]])
| bazaar-master | view/util/generate_sentence_table.py |
#! /usr/bin/env python
# Legacy support for sentences table in DeepDive.
# The script reads the table from the database and stores it in the new column format.
from pyhocon import ConfigFactory
import json
import psycopg2
import psycopg2.extras
import sys
import pipe
conf = ConfigFactory.parse_file('../view.conf')
docs = conf.get('view.docs')
def find_token_offsets(s):
# split on whitespace
pos = [ -1 ] + [ i for i, ltr in enumerate(s) if ltr == ' ' ] + [ len(s) ]
offsets = [ [ pos[i] + 1, pos[i + 1] ] for i in range(0, len(pos) - 1) ]
return offsets
def write_docs():
# write extractions to json file
dbconf = conf.get('view.db.default')
conn_string = "host='%s' dbname='%s' user='%s' password='%s'" % (
dbconf.get('host'),
dbconf.get('dbname'),
dbconf.get('user'),
dbconf.get('password'))
conn = psycopg2.connect(conn_string)
cursor = conn.cursor('ann_cursor', cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(docs.get('sql.query'))
with pipe.col_open_w('../data/sentences', [ 'id', 'text', 'tokenOffsets', 'sentenceTokenOffsets', 'sentenceOffsets', 'lemmas', 'poss' ]) as w:
sent_num = 0
prev_document_id = None
for row in cursor:
# id
#document_id = str(row[0])
#if document_id != prev_document_id:
# sent_num = 0
#id = document_id + '@' + str(sent_num)
id = row[0]
text = row[1]
token_offsets = find_token_offsets(text)
sentence_token_offsets = [[0,len(token_offsets)]]
sentence_offsets = [[0, len(text)]]
lemmas = row[2]
pos_tags = row[3]
w.write([id, text, token_offsets, sentence_token_offsets, sentence_offsets, lemmas, pos_tags])
#prev_document_id = document_id
sent_num = sent_num + 1
write_docs()
| bazaar-master | view/util/fetch-sentences-table.py |
#!/usr/bin/env python
from elasticsearch import Elasticsearch
import json
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'dd'
TYPE_NAME = 'extractors'
es = Elasticsearch(hosts = [ES_HOST])
es.delete_by_query(index = INDEX_NAME, doc_type = TYPE_NAME, body = {
"query": {
"match_all": {}
}
})
es.index(index = INDEX_NAME, doc_type = TYPE_NAME, body = {
"name" : "genepheno"
}, refresh = False)
es.indices.refresh(index = INDEX_NAME)
| bazaar-master | view/util/index_extrlist.py |
#!/usr/bin/env python
from elasticsearch import Elasticsearch
import json
EXTRACTOR='genepheno'
INPUT='../data/genepheno_rel.json'
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'dd'
TYPE_NAME = 'docs'
N = 1000
es = Elasticsearch(hosts = [ES_HOST])
with open(INPUT, 'r') as f:
bulk_data = []
for line in f:
src = json.loads(line)
id = src['doc_id'] + '__' + str(src['sent_id'])
op_dict = {
"update": {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_id": str(id)
}
}
extr = ','.join(map(str, src['gene_wordidxs'])) + '-' + ','.join(map(str, src['pheno_wordidxs']))
script_dict = {
"script" : "if (ctx._source.containsKey(\"" + EXTRACTOR + "\")) {ctx._source[\"" + EXTRACTOR + "\"] += ex;} else {ctx._source[\"" + EXTRACTOR + "\"] = [ex]}",
"params" : {
"ex" : extr
}
}
bulk_data.append(op_dict)
bulk_data.append(script_dict)
if len(bulk_data) > N:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
bulk_data = []
if len(bulk_data) > 0:
print('doing update')
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
es.indices.refresh(index = INDEX_NAME)
| bazaar-master | view/util/index_extr.py |
#! /usr/bin/env python
from os import listdir
from os.path import isfile, join
import json
# column format reader
def col_open(dir):
return ColumnReaderAsSingleObj(dir)
def col_open_arr(dir):
return ColumnReader(dir)
class ColumnReader(object):
'''Reads Pipe's column format'''
def __init__(self, dir):
files = [ f for f in listdir(dir) if isfile(join(dir, f)) and not f == '.errors' ]
self.types = [ f[f.rfind('.') + 1:] for f in files ]
self.u_types = [ unicode(s, 'utf-8') for s in self.types ]
self.handles = [ open(join(dir, f)) for f in files ]
def __iter__(self):
return self
def next(self):
row = [ h.readline() for h in self.handles ]
for c in row:
if c == '':
self.close()
raise StopIteration
return [ json.loads(c.rstrip()) for c in row ]
def close(self):
for h in self.handles:
if not h.closed:
h.close()
class ColumnReaderAsSingleObj(ColumnReader):
def next(self):
row = super(self.__class__, self).next()
obj = {}
for i in range(0, len(row)):
obj[self.u_types[i]] = row[i]
return obj
# column format writer
def col_open_w(dir, types):
return ColumnWriter(dir, types)
class ColumnWriter(object):
'''Writes Pipe's column format'''
def __init__(self, dir, types):
self.types = types
files = [ 'ann.' + t for t in types ]
self.handles = [ open(join(dir, 'ann.' + t), 'w') for t in types ]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def write(self, arr):
for i, a in enumerate(arr):
self.handles[i].write(json.dumps(a) + '\n')
def close(self):
for h in self.handles:
if not h.closed:
h.close()
| bazaar-master | view/util/pipe.py |
#!/usr/bin/env python
import pipe
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'view'
TYPE_NAME = 'docs'
N = 1000
from pyhocon import ConfigFactory
from elasticsearch import Elasticsearch
import json
import sys
conf = ConfigFactory.parse_file('../view.conf')
docs_conf = conf.get('view.docs')
es = Elasticsearch(hosts = [ES_HOST])
def index_docs():
# clear index
es.delete_by_query(index = INDEX_NAME, doc_type = TYPE_NAME, body = {
"query": {
"match_all": {}
}
})
# bulk index docs
bulk_data = []
for o in pipe.col_open('../' + docs_conf.get('input')):
id = o[u'id']
content = o[u'text']
tokenOffsets = o[u'tokenOffsets']
op_dict = {
"index": {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_id": id
}
}
#data_dict = {
# "id": id,
# "content": content,
# "tokenOffsets": tokenOffsets
#}
o['content'] = o[u'text']
data_dict = o
bulk_data.append(op_dict)
bulk_data.append(data_dict)
if len(bulk_data) > N:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
bulk_data = []
if len(bulk_data) > 0:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
es.indices.refresh(index = INDEX_NAME)
index_docs()
| bazaar-master | view/util/refresh-documents.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import json
import time
try:
from eval_server_common import connect_to_redis
except ImportError:
print("HINT: copy example.eval_server_common.py to eval_server_common.py")
raise
import codraw_data
import episode
#%%
FAREWELL_MSG = "that's it, thanks!"
class Bot():
model_name = "model_generic"
agent_type = None
fns = None
# TODO(nikita): peek action for bot drawers is not supported
def __init__(self, id):
self.id = id
self.episode = episode.Episode()
self.role = "question" if self.agent_type == codraw_data.Agent.DRAWER else "answer"
self.handlers = {
'paired': self.on_paired,
'receive message': self.on_receive_message,
'server error': self.on_server_error, #TODO(nikita): Not emitted after I modified the server code
'disconnected partner': self.on_disconnected_partner,
}
self.disconnected = False
self.num_messages_sent = 0
def disconnect(self):
if self.id in type(self).active_bots:
assert type(self).active_bots[self.id] == self
del type(self).active_bots[self.id]
if not self.disconnected:
self.disconnected = True
self.emit('disconnect')
def emit(self, event, **msg):
obj = {
'botId': self.id,
'event': event,
'msg': msg,
}
self.redis.publish('visdial_server', json.dumps(obj))
def send_msg(self, msg):
self.num_messages_sent += 1
self.emit('chat message', msg=msg, role=self.role, seqId=self.num_messages_sent)
print("Sent chat message:", msg)
def send_scene_log(self, scene):
self.emit('scene log', scene=scene.stringify(), role=self.role, seqId=self.num_messages_sent)
# TODO(nikita): implement drawer bots, including "send_scene_log" which is sent by drawer
# socket.emit('scene log', {scene: Abs.resultAMT(), hitId: hitId, assignmentId: assignmentId, workerId: workerId, role: workerRole, seqId: noOfMsg});
def run_model_actions(self, must_trigger=True):
old_len = len(self.episode)
terminated = self._run_model_actions()
if terminated:
print("No action taking. Disconnecting")
if INTERACTIVE:
display(self.episode.get_true_scene())
self.send_msg(FAREWELL_MSG)
self.disconnect()
return
if must_trigger:
if len(self.episode) == old_len:
self.disconnect()
assert False, f"No response for event: {type(self.episode[-1]).__name__}"
msg_to_send = None
do_send_scene_log = False
for event in self.episode[old_len:]:
# TODO(nikita): log latent actions, such as SelectClipart
if isinstance(event, codraw_data.TellGroup):
assert msg_to_send is None, "Multiple TellGroup events added in a single round!"
msg_to_send = event.msg
elif isinstance(event, codraw_data.ReplyGroup):
assert msg_to_send is None, "Multiple ReplyGroup events added in a single round!"
msg_to_send = event.msg
elif isinstance(event, (codraw_data.DrawClipart, codraw_data.DrawGroup)):
do_send_scene_log = True
if do_send_scene_log:
assert self.agent_type == codraw_data.Agent.DRAWER
self.send_scene_log(self.episode.reconstruct())
if self.agent_type == codraw_data.Agent.TELLER:
assert msg_to_send is not None, "No message to send"
# Empty message is a signal for the drawer to begin the conversation
if msg_to_send == "" and len([x for x in self.episode if isinstance(x, codraw_data.TellGroup)]) == 1:
msg_to_send = None
print("Model expects the human drawer to start the conversation.")
else:
assert msg_to_send is not None or isinstance(self.episode[-1], codraw_data.ObserveTruth), "No message to send, and not the start"
if msg_to_send is not None:
self.send_msg(msg_to_send)
def _run_model_actions(self):
while True:
for fn in self.fns:
if type(self.episode[-1]) in fn._trigger_types:
old_len = len(self.episode)
fn(self.episode)
if len(self.episode) == old_len:
return True # terminated
break
else:
# print('no trigger for', type(self.episode[-1]))
return False
def on_paired(self, partnerId=None, key=None, image_url=None, role=None, caption=None):
if self.disconnected:
print("[ERROR] Disconnected bot was paired!")
return
print("Paired wih human partner!")
print("image_url:", image_url)
print("partner role:", role) # Yes, the role sent in the message is for the partner
assigned_role = "question" if role == "answer" else "answer"
assert assigned_role == self.role, "Wrong role assigned to bot!"
true_scene = codraw_data.AbstractScene(image_url)
self.episode.append(codraw_data.ObserveTruth(true_scene))
self.run_model_actions(must_trigger=False)
def on_receive_message(self, message=None, noOfMsg=None):
if self.disconnected:
print("[ERROR] Disconnected bot received a message!")
return
print(f"Got human message {noOfMsg}: {message}")
assert message is not None
if self.agent_type == codraw_data.Agent.TELLER:
self.episode.append(codraw_data.ReplyGroup(message))
else:
self.episode.append(codraw_data.TellGroup(message))
self.run_model_actions()
def on_disconnected_partner(self, disable='_unused'):
print("Partner disconnected from bot! Cleanining up the bot")
self.disconnect()
def on_server_error(self, errorMsg='[no errorMsg specified]'):
print("Error from server:", errorMsg)
self.disconnect()
# %%
def run_loop(classes):
active_bots = {}
channel_to_cls = {}
for cls in classes:
assert cls.agent_type in (codraw_data.Agent.TELLER, codraw_data.Agent.DRAWER), "Invalid agent_type for bot!"
channel = f'visdial_models.{cls.model_name}'.encode('utf-8')
assert channel not in channel_to_cls, f"Duplicate model name {cls.model_name}"
channel_to_cls[channel] = cls
if not hasattr(cls, 'redis'):
cls.redis = connect_to_redis()
if not hasattr(cls, 'active_bots'):
cls.active_bots = active_bots
p = cls.redis.pubsub()
for channel in channel_to_cls:
p.subscribe(channel)
for redis_msg in p.listen():
print("Got redis msg", redis_msg)
if redis_msg['type'] != 'message':
continue
if redis_msg['channel'] not in channel_to_cls:
print(f"WARNING: unrecognized channel {redis_msg['channel']}")
continue
data = json.loads(redis_msg['data'])
id = data['botId']
event = data['event']
msg = data['msg']
if event == 'paired':
active_bots[id] = channel_to_cls[redis_msg['channel']](id)
if id in active_bots:
handler = active_bots[id].handlers.get(event, None)
if handler is None:
print(f"No handler for event '{event}'")
else:
active_bots[id].handlers[event](**msg)
# %%
def make_script_teller_class():
import model
class ScriptTellerBot(Bot):
model_name = 'teller_script'
agent_type = codraw_data.Agent.TELLER
fns = [model.scripted_tell_before_peek]
scene_to_script = {}
def _run_model_actions(self):
if not hasattr(self.episode, 'script'):
script = self.scene_to_script[self.episode.get_last(codraw_data.ObserveTruth).scene.stringify()]
self.episode.script = script
self.episode.script_index = 0
return super()._run_model_actions()
for scene, script in codraw_data.get_scenes_and_scripts('all'):
ScriptTellerBot.scene_to_script[scene.stringify()] = script
return ScriptTellerBot
# %%
def model_to_bot_class(model_name, model, model_agent_type=codraw_data.Agent.TELLER):
model_name_ = model_name
class TheBot(Bot):
model_name = model_name_
agent_type = model_agent_type
fns = model.get_action_fns()
TheBot.__name__ = type(model).__name__ + 'Bot'
TheBot.__qualname__ = TheBot.__qualname__.replace('TheBot', TheBot.__name__)
return TheBot
# %%
def run_model_pairs(tellers, drawers=[], include_script_teller=True):
classes = []
if include_script_teller:
classes.append(make_script_teller_class())
for teller_name, (a, b) in tellers:
if a is not None:
classes.append(model_to_bot_class(teller_name + '_a', a, codraw_data.Agent.TELLER))
if b is not None:
classes.append(model_to_bot_class(teller_name + '_b', b, codraw_data.Agent.TELLER))
for drawer_name, (a, b) in drawers:
if a is not None:
classes.append(model_to_bot_class(drawer_name + '_a', a, codraw_data.Agent.DRAWER))
if b is not None:
classes.append(model_to_bot_class(drawer_name + '_b', b, codraw_data.Agent.DRAWER))
run_loop(classes)
#%%
if __name__ == '__main__':
from saved_models import load_models, make_pairs
models = load_models()
models['teller_scene2seq_a'].max_rounds = 20
models['teller_scene2seq_aux2_a'].max_rounds = 20
models['teller_rl_a'].max_rounds = 20
# TODO(nikita): change max_rounds for partition-b tellers, too
tellers = make_pairs(models,
'teller_nn',
'teller_pragmaticnn',
'teller_scene2seq',
'teller_scene2seq_aux2',
'teller_rl',
)
drawers = make_pairs(models,
'drawer_nn',
'drawer_bowcanvas2bce',
'drawer_lstmaddonly',
)
run_model_pairs(tellers, drawers)
| codraw-models-master | eval_run_bots.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
from attention import AttentionSeqToMasked
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
from model import make_fns, eval_fns
from model import Model
from baseline3_models import SceneToSeqTeller
# %%
def process_episode(episode,
brw_rewards, brw_discounted_rewards,
utterance_penalty,
gamma,
uninformative_penalty,
):
scene_sims = None
for event in episode:
if isinstance(event, codraw_data.ObserveTruth):
drawn_scene = []
true_scene = event.scene
scene_sims = []
reward_idxs = []
yield event
elif isinstance(event, codraw_data.TellGroup):
if reward_idxs:
base_idx = reward_idxs[-1] + 1
else:
base_idx = 0
offset = len(event.msg.split())
if offset >= 50:
offset = 50 - 1
reward_idxs.append(base_idx + offset)
yield event
elif isinstance(event, (codraw_data.ObserveCanvas, codraw_data.ReplyGroup)):
yield event
elif isinstance(event, codraw_data.DrawGroup):
assert drawn_scene is not None
drawn_scene = [c for c in drawn_scene if c.idx not in [c2.idx for c2 in event.cliparts]]
drawn_scene.extend(event.cliparts)
scene_sims.append(scene_similarity(drawn_scene, true_scene))
yield codraw_data.SetDrawing(drawn_scene)
elif isinstance(event, codraw_data.SetDrawing):
scene_sims.append(scene_similarity(event.scene, true_scene))
yield event
if scene_sims is not None:
rewards = np.array(scene_sims) - np.array([0] + scene_sims[:-1])
rewards = np.where(rewards > 0, rewards, -uninformative_penalty)
if len(rewards) >= 50:
rewards = np.array(list(rewards - utterance_penalty))
else:
rewards = np.array(list(rewards - utterance_penalty) + [0])
if reward_idxs:
reward_idxs.append(reward_idxs[-1] + 1)
else:
reward_idxs.append(0)
new_brw_rewards = np.zeros(reward_idxs[-1] + 1)
new_brw_rewards[np.array(reward_idxs)] = rewards
brw_rewards.extend(list(new_brw_rewards))
brw_discounted_rewards.extend(list(discount_rewards(new_brw_rewards, gamma)))
def discount_rewards(r, gamma=0.99):
""" take 1D float array of rewards and compute discounted reward """
# https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
r = np.asarray(r)
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def examples_from_episodes(episodes, dg, utterance_penalty, gamma, uninformative_penalty):
brw_rewards = []
brw_discounted_rewards = []
episodes = [list(process_episode(episode,
brw_rewards, brw_discounted_rewards,
utterance_penalty,
gamma,
uninformative_penalty,
))
for episode in episodes]
example_batch = dg.tensors_from_episodes(episodes + [[codraw_data.ObserveTruth([])]])
example_batch['brw_rewards'] = torch.tensor(brw_rewards, dtype=torch.float, device=cuda_if_available)
example_batch['brw_discounted_rewards'] = torch.tensor(brw_discounted_rewards, dtype=torch.float, device=cuda_if_available)
return example_batch
# %%
def collect_episodes(fns,
dg,
scenes=codraw_data.get_scenes('dev'),
batch_size=16,
utterance_penalty=0.25,
gamma=0.99,
uninformative_penalty=0.3
):
with torch.no_grad():
episodes = []
for scene in np.random.choice(scenes, batch_size):
ep = Episode.run(scene, fns)
episodes.append(ep)
example_batch = examples_from_episodes(
episodes,
dg=dg,
utterance_penalty=utterance_penalty,
gamma=gamma,
uninformative_penalty=uninformative_penalty,
)
return episodes, example_batch
# %%
class RLSceneToSeqTeller(SceneToSeqTeller):
def disable_dropout(self):
for module in self.modules():
if isinstance(module, nn.Dropout):
module.p = 0
def calc_rl_loss(self, example_batch):
dg = self.datagen
b_clipart_tags = self.tag_embs(example_batch['b_scene_tags']).view(-1, dg.NUM_INDEX, self.d_clipart_tags)
packer = example_batch['packer']
ob_clipart_tags = packer.ob_from_b(b_clipart_tags)
ob_clipart_tags = self.pre_attn_tag_dropout(ob_clipart_tags)
ob_scene_mask = packer.ob_from_b(example_batch['b_scene_mask'])
brw_teller_tokens_in = example_batch['brw_teller_tokens_in']
brw_embs = self.pre_lstm_emb_dropout(self.word_embs(brw_teller_tokens_in))
orwb_embs = packer.orwb_from_brw_pack(brw_embs)
orwb_attended_values_prelstm = self.attn_prelstm(orwb_embs, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
orwb_lstm_in = nn.utils.rnn.PackedSequence(torch.cat([
orwb_embs.data,
orwb_attended_values_prelstm.data,
], -1), orwb_embs.batch_sizes)
orwb_lstm_out, _ = self.lstm(orwb_lstm_in)
orwb_lstm_out = nn.utils.rnn.PackedSequence(self.post_lstm_dropout(orwb_lstm_out.data), orwb_lstm_out.batch_sizes)
orwb_attended_values = self.attn(orwb_lstm_out, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
brw_pre_project = torch.cat([
packer.brw_from_orwb_unpack(orwb_lstm_out),
packer.brw_from_orwb_unpack(orwb_attended_values),
], -1)
brw_word_logits = self.word_project(brw_pre_project)
brw_word_losses = F.cross_entropy(brw_word_logits, example_batch['brw_teller_tokens_out'], reduce=False)
b_word_losses = nn.utils.rnn.pad_packed_sequence(packer.orwb_from_brw_pack(brw_word_losses))[0].sum(0)
print('mean nll', float(b_word_losses.mean()))
# Discounting occurs at every word
# brw_discounted_rewards = example_batch['brw_discounted_rewards'][:brw_word_losses.shape[0]]
# XXX(nikita): clipping here seems wrong. Make sure there are no more crashes!
brw_discounted_rewards = example_batch['brw_discounted_rewards']
# TODO(nikita): what is the right baseline?
baseline = 0.8
brw_discounted_rewards = brw_discounted_rewards - baseline
brw_rl_losses = brw_word_losses * brw_discounted_rewards
rl_loss = brw_rl_losses.mean()
return rl_loss
# %%
def load_baseline4():
models = {}
rl_spec_a = torch_load('models/rl_nodict_aug2.pt')
models['teller_rl_a'] = RLSceneToSeqTeller(spec=rl_spec_a)
models['teller_rl_b'] = None
models['teller_rl_a'].eval()
return models
| codraw-models-master | baseline4_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
An event-based view of the CoDraw dataset
"""
#%%
import numpy as np
from pathlib import Path
import json
from enum import Enum
from collections import namedtuple
import inspect
import abs_util_orig
import abs_render
#%%
if INTERACTIVE:
DATASET_PATH = Path('../CoDraw/dataset/CoDraw_1_0.json')
else:
DATASET_PATH = Path(__file__).parent / '../CoDraw/dataset/CoDraw_1_0.json'
assert DATASET_PATH.exists()
#%% clipart wrappers, with better docs than abs_util_orig.py
ClipartBase = namedtuple('Clipart',
['idx', 'subtype', 'depth', 'flip', 'x', 'y'])
# idx: integer [0-57]
# subtype: integer [0-34]
# depth: integer [0-2]
# flip: integer [0-1]
# x: float [1-500]
# y: float [1-400]
class Clipart(ClipartBase):
__slots__ = ()
NUM_IDX = 58
NUM_SUBTYPE = 35
NUM_DEPTH = 3
NUM_FLIP = 2
CANVAS_WIDTH = 500.0
CANVAS_HEIGHT = 400.0
NUM_EXPRESSION = 5
NUM_POSE = 7
assert NUM_SUBTYPE == (NUM_EXPRESSION * NUM_POSE)
HUMAN_IDXS = (18, 19)
def __new__(cls, idx, subtype, depth, flip, x=None, y=None, normed_x=None, normed_y=None):
if normed_x is not None:
if x is not None:
raise ValueError("The arguments x and normed_x are mutually exclusive")
x = normed_x * cls.CANVAS_WIDTH
elif x is None:
raise ValueError("Either x or normed_x is required")
if normed_y is not None:
if y is not None:
raise ValueError("The arguments y and normed_y are mutually exclusive")
y = normed_y * cls.CANVAS_HEIGHT
elif y is None:
raise ValueError("Either y or normed_y is required")
return ClipartBase.__new__(cls, idx, subtype, depth, flip, x, y)
@property
def normed_x(self):
return self.x / self.CANVAS_WIDTH
@property
def normed_y(self):
return self.y / self.CANVAS_HEIGHT
@property
def expression(self):
"""
Facial expression
"""
return self.subtype % self.NUM_EXPRESSION
@property
def pose(self):
"""
Body pose
"""
return self.subtype // self.NUM_EXPRESSION
@property
def human_idx(self):
if self.idx not in self.HUMAN_IDXS:
raise ValueError("Cannot get human_idx of non-human clipart")
return self.idx - self.HUMAN_IDXS[0]
@property
def render_order_key(self):
"""
Key that can be used to sort cliparts by the order in which they are
rendered.
"""
# Sun (idx=3) is always in the back; this is also in Abs.js
# All sky objects (idx < 8) are behind any non-sky objects
# Past that, objects are sorted by depth and then by index
return (self.idx != 3, self.idx >= 8, -self.depth, self.idx)
def _repr_svg_(self):
return abs_render.svg_from_cliparts([self])
class AbstractScene(list):
"""
Abstract scene representation that only encodes objects which are present,
and never a library of available objects that are not in the scene
"""
def __init__(self, string_or_iterable):
if isinstance(string_or_iterable, str):
abs = abs_util_orig.AbsUtil(string_or_iterable)
if abs.obj is None:
super().__init__()
else:
super().__init__(Clipart(*c) for c in abs.obj)
else:
super().__init__(string_or_iterable)
def __repr__(self):
return "<AbstractScene " + super().__repr__() + ">"
def __str__(self):
return super().__repr__()
def _repr_svg_(self):
return abs_render.svg_from_cliparts(self)
def stringify(self):
scene_str = ""
scene_str += f"{len(self)},"
for i, clipart in enumerate(self):
img_name = abs_render.get_image_name(clipart)
prefix, num = img_name[:-5].split('_')
prefix = ['s', 'p', 'hb0', 'hb1', 'a', 'c', 'e', 't'].index(prefix)
num = int(num)
scene_str += f"{img_name},"
scene_str += f"{i},"
scene_str += f"{num},"
scene_str += f"{prefix},"
scene_str += f"{clipart.x},"
scene_str += f"{clipart.y},"
scene_str += f"{clipart.depth},"
scene_str += f"{clipart.flip},"
return scene_str
#%% Data loading helper for a particular split
def data_for_splits(split_or_splits):
if isinstance(split_or_splits, str):
splits = [split_or_splits]
else:
splits = split_or_splits
data_all = json.loads(DATASET_PATH.read_text())['data']
keys_train = sorted([k for k in data_all.keys() if k.startswith('train')])
keys_dev = sorted([k for k in data_all.keys() if k.startswith('val')])
keys_test = sorted([k for k in data_all.keys() if k.startswith('test')])
keys_all = sorted(data_all.keys())
half_train_len = len(keys_train) // 2
keys_from_split = {
'train_a': keys_train[:half_train_len],
'a': keys_train[:half_train_len],
'train_b': keys_train[half_train_len:],
'b': keys_train[half_train_len:],
'train_full': keys_train,
'dev': keys_dev,
'test': keys_test,
'all': keys_all,
}
res = []
for split in splits:
data_split = {k: data_all[k] for k in keys_from_split[split]}
res.append(data_split)
return res
def cached_split_wrapper(fn):
"""
Modifies the function to accept a split or list of splits instead of a
a raw data dictionary for a single split, and caches results so they don't
have to be recalculated.
"""
fn.split_to_results = {}
def deco(split_or_splits):
if isinstance(split_or_splits, str):
splits = [split_or_splits]
else:
splits = split_or_splits
uncached_splits = [split for split in splits if split not in fn.split_to_results]
uncached_splits_data = data_for_splits(uncached_splits)
for split, data in zip(uncached_splits, uncached_splits_data):
result = fn(data)
if inspect.isgenerator(result):
result = list(result)
fn.split_to_results[split] = result
if isinstance(split_or_splits, str):
return fn.split_to_results[split_or_splits]
else:
return [fn.split_to_results[split] for split in split_or_splits]
return deco
#%% An event-based view of the CoDraw dataset
# TODO(nikita): Agent class and actor/observer are currently doing nothing.
# Is there a need for them?
class Agent(Enum):
TELLER = 0
DRAWER = 1
class Event:
def __init__(self, actor=None, observer=None):
self.actor = actor
self.observer = observer
class ObserveTruth(Event):
def __init__(self, scene):
super().__init__(observer=Agent.TELLER)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}()"
class SelectClipart(Event):
def __init__(self, clipart):
super().__init__(actor=Agent.TELLER, observer=None)
self.clipart = clipart
def __repr__(self):
return f"{type(self).__name__}(clipart={self.clipart})"
class TellerIntention(Event):
def __init__(self, drawn=None, undrawn=None, draw_next=None):
super().__init__(actor=Agent.TELLER, observer=None)
self.drawn = drawn
self.undrawn = undrawn
self.draw_next = draw_next
def __repr__(self):
return f"{type(self).__name__}(drawn={self.drawn}, undrawn={self.undrawn}, draw_next={self.draw_next})"
class TellGroup(Event):
# group because each word is an action
def __init__(self, msg):
super().__init__(actor=Agent.TELLER, observer=Agent.DRAWER)
self.msg = msg
def __repr__(self):
return f"{type(self).__name__}(msg={repr(self.msg)})"
class Peek(Event):
def __init__(self):
super().__init__(actor=Agent.TELLER, observer=None)
def __repr__(self):
return f"{type(self).__name__}()"
class TellerObserveCanvas(Event):
def __init__(self, scene):
super().__init__(observer=Agent.TELLER)
if not isinstance(scene, AbstractScene):
scene = AbstractScene(scene)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}({self.scene})"
class ObserveCanvas(Event):
def __init__(self, scene):
super().__init__(observer=Agent.DRAWER)
if not isinstance(scene, AbstractScene):
scene = AbstractScene(scene)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}({self.scene})"
class DrawClipart(Event):
# Draws or moves a clipart
# Since multiple copies of the same clipart are not allowed, duplicate draw
# events with the same id will result in the removal of the older instance
# of the clipart to make way for the new one.
def __init__(self, clipart):
super().__init__(actor=Agent.DRAWER, observer=None)
self.clipart = clipart
def __repr__(self):
return f"{type(self).__name__}(clipart={self.clipart})"
class DrawGroup(Event):
# Draws or moves multiple (or no) cliparts at the same time
# Since multiple copies of the same clipart are not allowed, duplicate draw
# events with the same id will result in the removal of the older instance
# of the clipart to make way for the new one.
def __init__(self, cliparts):
super().__init__(actor=Agent.DRAWER, observer=None)
self.cliparts = cliparts
def __repr__(self):
return f"{type(self).__name__}(cliparts={self.cliparts})"
class SetDrawing(Event):
# Updates the drawer canvas to exactly match the scene argumentt
# This was added for transcripts of humans performing the task because
# neither DrawClipart nor DrawGroup have support for removing clipart.
def __init__(self, scene):
super().__init__(actor=Agent.DRAWER, observer=None)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}({self.scene})"
class ReplyGroup(Event):
# group because each word is an action
def __init__(self, msg):
super().__init__(actor=Agent.DRAWER, observer=Agent.TELLER)
self.msg = msg
def __repr__(self):
return f"{type(self).__name__}(msg={repr(self.msg)})"
#%%
def events_from_datum_place_one(datum):
# TODO(nikita): this filtering keeps just over 25% of conversational rounds
# What do I need to do to match the 37.6% number in the arxiv paper?
# perhaps I should include the cases where a clipart is updated? But that
# only seems to bring me up to around 31%
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
strictly_additive = len(set(abs_b) - set(abs_d)) == 0
added_cliparts = set(abs_d) - set(abs_b)
if strictly_additive and len(added_cliparts) == 1 and entry['msg_t']:
added_clipart = list(added_cliparts)[0]
buffer.append(SelectClipart(added_clipart))
buffer.append(TellGroup(entry['msg_t']))
buffer.append(DrawClipart(added_clipart))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_place_one(data):
for datum in data.values():
yield from events_from_datum_place_one(datum)
#%%
def events_from_datum_place_many(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
added_cliparts = set(abs_d) - set(abs_b)
added_cliparts = sorted(added_cliparts, key=lambda c: c.render_order_key)
buffer.append(TellGroup(entry['msg_t']))
buffer.append(DrawGroup(added_cliparts))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_place_many(data):
for datum in data.values():
yield from events_from_datum_place_many(datum)
#%%
def events_from_datum_contextual_place_many(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
added_cliparts = set(abs_d) - set(abs_b)
added_cliparts = sorted(added_cliparts, key=lambda c: c.render_order_key)
buffer.append(TellGroup(entry['msg_t']))
buffer.append(ObserveCanvas(abs_b))
buffer.append(DrawGroup(added_cliparts))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_contextual_place_many(data):
for datum in data.values():
yield from events_from_datum_contextual_place_many(datum)
# %%
def events_from_datum_set_clipart(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
buffer.append(TellGroup(entry['msg_t']))
buffer.append(ObserveCanvas(abs_b))
buffer.append(SetDrawing(abs_d))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_set_clipart(data):
for datum in data.values():
yield from events_from_datum_set_clipart(datum)
# %%
def events_from_datum_set_clipart_pre_peek(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
if entry.get('peeked', False):
# Note that Peek happens before TellGroup
break
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
buffer.append(TellGroup(entry['msg_t']))
buffer.append(ObserveCanvas(abs_b))
buffer.append(SetDrawing(abs_d))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_set_clipart_pre_peek(data):
for datum in data.values():
yield from events_from_datum_set_clipart_pre_peek(datum)
# %%
@cached_split_wrapper
def get_scenes(data):
for datum in data.values():
yield AbstractScene(datum['abs_t'])
# %%
@cached_split_wrapper
def get_scenes_and_scripts(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
script = []
for entry in datum['dialog']:
if entry.get('peeked', False):
script.append(Peek())
script.append(TellerObserveCanvas(AbstractScene(entry['abs_b'])))
if entry['msg_t']:
script.append(TellGroup(entry['msg_t']))
yield (scene, script)
# %%
@cached_split_wrapper
def get_scenes_and_scripts_with_peek(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
script = []
have_peeked = False
for entry in datum['dialog']:
if entry.get('peeked', False):
script.append(Peek())
script.append(TellerObserveCanvas(AbstractScene(entry['abs_b'])))
have_peeked = True
if entry['msg_t']:
script.append(TellGroup(entry['msg_t']))
# Exclude events with no Peek action, or no messages sent afterwards
if have_peeked and not isinstance(script[-1], TellerObserveCanvas):
yield (scene, script)
# %%
@cached_split_wrapper
def get_truth_and_human_scenes(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
scene_after = None
for entry in datum['dialog']:
scene_after = entry['abs_d']
assert scene_after is not None
scene_after = AbstractScene(scene_after)
yield (scene, scene_after)
@cached_split_wrapper
def get_truth_and_human_scenes_pre_peek(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
scene_after = None
for entry in datum['dialog']:
if entry.get('peeked', False):
break
scene_after = entry['abs_d']
assert scene_after is not None
scene_after = AbstractScene(scene_after)
yield (scene, scene_after)
@cached_split_wrapper
def get_truth_and_human_scenes_with_js_scores(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
scene_after = None
score_after = None
for entry in datum['dialog']:
if entry.get('score', None) is not None:
score_after = entry['score']
scene_after = entry['abs_d']
assert scene_after is not None
assert score_after is not None
scene_after = AbstractScene(scene_after)
yield (scene, scene_after, score_after)
| codraw-models-master | codraw_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Multi-headed attention implementation
"""
#%%
import numpy as np
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
#%%
class AttentionSeqToMasked(nn.Module):
def __init__(self,
d_pre_q, d_pre_k, d_pre_v,
d_qk, d_v, num_heads,
attn_dropout):
super().__init__()
self.d_qk = d_qk
self.d_v = d_v
self.num_heads = num_heads
self.q_proj = nn.Linear(d_pre_q, self.num_heads * self.d_qk)
self.k_proj = nn.Linear(d_pre_k, self.num_heads * self.d_qk)
self.v_proj = nn.Linear(d_pre_v, self.num_heads * self.d_v)
self.attn_dropout = nn.Dropout(attn_dropout)
self.d_out = self.num_heads * self.d_v
def split_heads(self, tensor):
"""
[...dims, a, num_heads x b] -> [...dims, num_heads, a, b]
"""
return tensor.view(*tensor.shape[:-1], self.num_heads, -1).transpose(-3, -2)
def join_heads(self, tensor):
"""
[...dims, num_heads, a, b] -> [...dims, a, num_heads x b]
"""
res = tensor.transpose(-3, -2).contiguous()
return res.view(*res.shape[:-2], -1)
def precompute_kv(self, pre_ks, pre_vs):
assert not self.training
ks = self.split_heads(self.k_proj(pre_ks))
vs = self.split_heads(self.v_proj(pre_vs))
return ks, vs
def forward(self, pre_qs=None, pre_ks=None, pre_vs=None, ks=None, vs=None, k_mask=None):
if isinstance(pre_qs, nn.utils.rnn.PackedSequence):
pre_qs, lengths = nn.utils.rnn.pad_packed_sequence(pre_qs, batch_first=True)
else:
lengths = None
qs = self.split_heads(self.q_proj(pre_qs))
if ks is None:
ks = self.split_heads(self.k_proj(pre_ks))
if vs is None:
vs = self.split_heads(self.v_proj(pre_vs))
attn_logits = torch.matmul(qs, ks.transpose(-2, -1)) / np.sqrt(self.d_qk)
if k_mask is not None:
# k_mask is [batch, pre_ks.shape[1]] mask signalling which values
# are valid attention targets
attn_logits = torch.where(
k_mask[:, None, None, :],
attn_logits,
torch.full_like(attn_logits, float('-inf'))
)
attn_probs = F.softmax(attn_logits, dim=-1)
attn_probs = self.attn_dropout(attn_probs)
res = self.join_heads(torch.matmul(attn_probs, vs))
if lengths is not None:
res = nn.utils.rnn.pack_padded_sequence(res, lengths, batch_first=True)
return res
| codraw-models-master | attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import heapq
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import BOWAddUpdateData, NearestNeighborData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns
from model import scripted_tell, scripted_tell_before_peek, scripted_tell_after_peek
# %%
class BaseAddOnlyDrawer(Model, torch.nn.Module):
datagen_cls = BOWAddUpdateData
def init_full(self, d_hidden):
# Helps overcome class imbalance (most cliparts are not drawn most of
# the time)
self.positive_scaling_coeff = 3.
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
dg = self.datagen
self.canvas_binary_to_hidden = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(dg.NUM_BINARY, d_hidden, bias=False),
)
self.canvas_numerical_to_hidden = nn.Sequential(
nn.Linear(dg.NUM_INDEX * dg.NUM_NUMERICAL, d_hidden, bias=False),
)
d_out = dg.NUM_INDEX * (dg.NUM_ALL + 1)
self.hidden_to_clipart = nn.Sequential(
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
def lang_to_hidden(self, msg_idxs, offsets=None):
# Offsets is None only when batch_size is 1
raise NotImplementedError("Subclasses should override this")
def forward(self, example_batch):
dg = self.datagen
hidden_feats = (
self.lang_to_hidden(example_batch['msg_idxs'], example_batch['offsets'])
+ self.canvas_binary_to_hidden(example_batch['canvas_binary'].float())
+ self.canvas_numerical_to_hidden(example_batch['canvas_numerical'])
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, dg.NUM_ALL + 1)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
correct_mask = example_batch['clipart_added_mask']
clipart_idx_scores = clipart_scores[:,:,0]
idx_losses = F.binary_cross_entropy_with_logits(clipart_idx_scores, correct_mask.to(torch.float), reduce=False)
idx_losses = torch.where(correct_mask, self.positive_scaling_coeff * idx_losses, idx_losses)
per_example_idx_loss = idx_losses.sum(1)
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
subtype_losses = F.cross_entropy(logits_subtype, correct_categorical[:,:,0].view((-1,)), reduce=False).view_as(correct_categorical[:,:,0])
depth_losses = F.cross_entropy(logits_depth, correct_categorical[:,:,1].view((-1,)), reduce=False).view_as(correct_categorical[:,:,1])
flip_losses = F.cross_entropy(logits_flip, correct_categorical[:,:,2].view((-1,)), reduce=False).view_as(correct_categorical[:,:,2])
vals_losses = F.mse_loss(vals_numerical, correct_numerical.view((-1, dg.NUM_NUMERICAL)), reduce=False).view_as(correct_numerical).sum(-1)
all_losses = torch.stack([subtype_losses, depth_losses, flip_losses, vals_losses], -1).sum(-1)
per_example_loss = torch.where(correct_mask, all_losses, all_losses.new_zeros(1)).sum(-1)
loss = per_example_idx_loss.mean() + per_example_loss.mean()
return loss
@respond_to(codraw_data.ObserveCanvas)
def draw(self, episode):
dg = self.datagen
msg = episode.get_last(codraw_data.TellGroup).msg
# assert msg != ""
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
canvas_context = episode.get_last(codraw_data.ObserveCanvas).scene
canvas_binary = np.zeros((dg.NUM_INDEX, 1 + dg.NUM_DEPTH + dg.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, dg.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((dg.NUM_INDEX, dg.NUM_NUMERICAL))
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + dg.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)[None,:].to(cuda_if_available)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)[None,:].to(cuda_if_available)
hidden_feats = (
self.lang_to_hidden(msg_idxs[None,:], None)
+ self.canvas_binary_to_hidden(canvas_binary.float())
+ self.canvas_numerical_to_hidden(canvas_numerical)
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, (dg.NUM_ALL + 1))
cliparts = []
prior_idxs = set([c.idx for c in canvas_context])
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
vals_numerical = vals_numerical.cpu().detach().numpy()
clipart_idx_scores = clipart_scores[0,:,0].cpu().detach().numpy()
for idx in np.where(clipart_idx_scores > 0)[0]:
if idx in prior_idxs:
continue
nx, ny = vals_numerical[idx,:]
clipart = Clipart(idx, int(logits_subtype[idx,:].argmax()), int(logits_depth[idx,:].argmax()), int(logits_flip[idx,:].argmax()), normed_x=nx, normed_y=ny)
cliparts.append(clipart)
episode.append(codraw_data.DrawGroup(cliparts))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [drawer_observe_canvas, self.draw]
# %%
class BOWAddOnlyDrawer(BaseAddOnlyDrawer):
def init_full(self, d_embeddings=512, d_hidden=512):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
)
super().init_full(d_hidden)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
self.lang_to_hidden_module = nn.Linear(d_embeddings, d_hidden)
self.to(cuda_if_available)
def lang_to_hidden(self, msg_idxs, offsets=None):
bow_feats = self.word_embs(msg_idxs, offsets)
return self.lang_to_hidden_module(bow_feats)
# %%
class LSTMAddOnlyDrawer(BaseAddOnlyDrawer):
def init_full(self, d_embeddings=256, d_hidden=512, d_lstm=256, num_lstm_layers=1, pre_lstm_dropout=0.4, lstm_dropout=0.0):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
d_lstm=256,
num_lstm_layers=num_lstm_layers,
pre_lstm_dropout=pre_lstm_dropout,
lstm_dropout=lstm_dropout,
)
super().init_full(d_hidden)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.Embedding(len(self.datagen.vocabulary_dict), d_embeddings)
self.pre_lstm_dropout = nn.Dropout(pre_lstm_dropout)
self.lstm = nn.LSTM(d_embeddings, d_lstm, bidirectional=True, num_layers=num_lstm_layers, dropout=lstm_dropout)
# self.post_lstm_project = nn.Linear(d_lstm * 2 * num_lstm_layers, d_hidden)
# self.post_lstm_project = lambda x: x #nn.Linear(d_lstm * 2 * num_lstm_layers, d_hidden)
self.post_lstm_project = lambda x: x[:,:d_hidden]
self.to(cuda_if_available)
def lang_to_hidden(self, msg_idxs, offsets=None):
# global dump
# dump = msg_idxs, offsets
# assert False
# bow_feats = self.word_embs(msg_idxs, offsets)
# return self.lang_to_hidden_module(bow_feats)
if offsets is not None:
start = offsets.cpu()
end = torch.cat([start[1:], torch.tensor([msg_idxs.shape[-1]])])
undo_sorting = np.zeros(start.shape[0], dtype=int)
undo_sorting[(start - end).numpy().argsort()] = np.arange(start.shape[0], dtype=int)
words_packed = nn.utils.rnn.pack_sequence(sorted([msg_idxs[i:j] for i, j in list(zip(start.numpy(), end.numpy()))], key=lambda x: -x.shape[0]))
else:
words_packed = nn.utils.rnn.pack_sequence([msg_idxs[0,:]])
undo_sorting = np.array([0], dtype=int)
word_vecs = embedded = nn.utils.rnn.PackedSequence(
self.pre_lstm_dropout(self.word_embs(words_packed.data)),
words_packed.batch_sizes)
_, (h_final, c_final) = self.lstm(word_vecs)
# sentence_reps = h_final[-2:,:,:].permute(1, 2, 0).contiguous().view(undo_sorting.size, -1)
sentence_reps = c_final[-2:,:,:].permute(1, 2, 0).contiguous().view(undo_sorting.size, -1)
sentence_reps = self.post_lstm_project(sentence_reps)
if offsets is not None:
sentence_reps = sentence_reps[undo_sorting]
return sentence_reps
# %%
class PragmaticNearestNeighborTeller(Model):
datagen_cls = NearestNeighborData
def init_full(self, drawer_model=None, num_candidates=10):
self.drawer_model = drawer_model
self.num_candidates = num_candidates
def set_drawer_model(self, drawer_model):
self.drawer_model = drawer_model
def get_spec(self):
return dict(num_candidates=self.num_candidates)
@respond_to(codraw_data.SelectClipart)
def tell(self, episode):
clipart = episode.get_last(codraw_data.SelectClipart).clipart
candidate_cliparts = heapq.nlargest(self.num_candidates, self.datagen.clipart_to_msg, key=lambda cand_clipart: clipart_similarity(cand_clipart, clipart))
# global dump
# dump = candidate_cliparts, episode
# assert False
candidate_msgs = [self.datagen.clipart_to_msg[cand_clipart] for cand_clipart in candidate_cliparts]
expected_context = [event.clipart for event in episode if isinstance(event, codraw_data.SelectClipart)][:-1]
candidate_responses = [self.drawer_model.just_draw(msg, expected_context) for msg in candidate_msgs]
best_idx = np.argmax([scene_similarity(response_scene, [clipart]) for response_scene in candidate_responses])
best_msg = candidate_msgs[best_idx]
episode.append(codraw_data.TellGroup(best_msg))
def get_action_fns(self):
return [select_clipart_to_tell, self.tell]
# %%
def load_baseline2():
baseline2_specs = torch_load(Path('models/lstmaddonly_may31.pt'))
models = {}
for k, spec in baseline2_specs.items():
print(k)
models[k] = globals()[spec['class']](spec=spec)
# TODO(nikita): serialize these models to disk
data_nn_a = NearestNeighborData('a')
data_nn_b = NearestNeighborData('b')
print('teller_pragmaticnn_a')
models['teller_pragmaticnn_a'] = PragmaticNearestNeighborTeller(data_nn_a, drawer_model=models['drawer_lstmaddonly_a'])
print('teller_pragmaticnn_b')
models['teller_pragmaticnn_b'] = PragmaticNearestNeighborTeller(data_nn_b, drawer_model=models['drawer_lstmaddonly_b'])
return models
| codraw-models-master | baseline2_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
from saved_models import load_models, make_pairs
from eval_automatic import print_eval
# %%
models = load_models(1, 2, 3, 4)
# HACK while the model is still training
models['teller_rl_b'] = models['teller_scene2seq_aux2_b']
# %%
tellers = make_pairs(models,
# 'teller_nn',
# 'teller_pragmaticnn',
# 'teller_scene2seq',
# 'teller_scene2seq_aux',
# 'teller_scene2seq_aux2',
'teller_rl',
)
drawers = make_pairs(models,
# 'drawer_nn',
# 'drawer_sim',
# 'drawer_bow2c',
# 'drawer_bow2bce',
# 'drawer_bowcanvas2bce',
'drawer_lstmaddonly',
)
# %%
print()
print_eval(do_human=True)
# %%
print()
print()
print_eval(tellers, drawers, limit=None, do_pairwise=True)
# %%
print()
print()
print_eval(tellers, drawers, limit=None, do_script=True, do_components_pairwise=True, do_components_script=True)
| codraw-models-master | baseline4_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import NearestNeighborData, MessageSimilarityData, BOWtoClipartData, ClipartToSeqData, BOWplusCanvasToMultiData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns, scripted_tell
from baseline1_models import NearestNeighborTeller, CharNeighborDrawer
from baseline1_models import BOWNeighborDrawer, BOWtoClipartDrawer, ClipartToSeqTeller
from baseline1_models import BOWtoMultiBCEDrawer, BOWplusCanvasDrawer
#%%
data_nn_a = NearestNeighborData('a')
data_nn_b = NearestNeighborData('b')
teller_nn_a = NearestNeighborTeller(data_nn_a)
teller_nn_b = NearestNeighborTeller(data_nn_b)
drawer_nn_a = CharNeighborDrawer(data_nn_a)
drawer_nn_b = CharNeighborDrawer(data_nn_b)
#%%
data_sim_a = MessageSimilarityData('a')
data_sim_b = MessageSimilarityData('b')
drawer_sim_a = BOWNeighborDrawer(data_sim_a)
drawer_sim_b = BOWNeighborDrawer(data_sim_b)
optimizer_sim_a = torch.optim.Adam(drawer_sim_a.parameters())
optimizer_sim_b = torch.optim.Adam(drawer_sim_b.parameters())
#%%
for epoch in range(500):
drawer_sim_a.train()
for num, ex in enumerate(drawer_sim_a.datagen.get_examples_batch()):
optimizer_sim_a.zero_grad()
loss = drawer_sim_a.forward(ex)
loss.backward()
optimizer_sim_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 25 == 0:
drawer_sim_a.prepare_for_inference()
for splits in ('aa', 'ba'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_sim_a, drawer_sim_b)), limit=100)
print(splits, sims.mean())
drawer_sim_a.prepare_for_inference()
# %%
for epoch in range(500):
drawer_sim_b.train()
for num, ex in enumerate(drawer_sim_b.datagen.get_examples_batch()):
optimizer_sim_b.zero_grad()
loss = drawer_sim_b.forward(ex)
loss.backward()
optimizer_sim_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 25 == 0:
drawer_sim_b.prepare_for_inference()
for splits in ('ab', 'bb'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_sim_a, drawer_sim_b)), limit=100)
print(splits, sims.mean())
drawer_sim_b.prepare_for_inference()
#%%
data_bow2c_a = BOWtoClipartData('a')
data_bow2c_b = BOWtoClipartData('b')
drawer_bow2c_a = BOWtoClipartDrawer(data_bow2c_a)
drawer_bow2c_b = BOWtoClipartDrawer(data_bow2c_b)
optimizer_bow2c_a = torch.optim.Adam(drawer_bow2c_a.parameters())
optimizer_bow2c_b = torch.optim.Adam(drawer_bow2c_b.parameters())
# %%
for epoch in range(20):
drawer_bow2c_a.train()
for num, ex in enumerate(drawer_bow2c_a.datagen.get_examples_batch()):
optimizer_bow2c_a.zero_grad()
loss = drawer_bow2c_a.forward(ex)
loss.backward()
optimizer_bow2c_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('aa', 'ba'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
#%%
for epoch in range(20):
drawer_bow2c_b.train()
for num, ex in enumerate(drawer_bow2c_b.datagen.get_examples_batch()):
optimizer_bow2c_b.zero_grad()
loss = drawer_bow2c_b.forward(ex)
loss.backward()
optimizer_bow2c_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('ab', 'bb'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
#%%
data_c2seq_a = ClipartToSeqData('a')
data_c2seq_b = ClipartToSeqData('b')
teller_c2seq_a = ClipartToSeqTeller(data_c2seq_a)
teller_c2seq_b = ClipartToSeqTeller(data_c2seq_b)
optimizer_c2seq_a = torch.optim.Adam(teller_c2seq_a.parameters())
optimizer_c2seq_b = torch.optim.Adam(teller_c2seq_b.parameters())
#%%
for epoch in range(80):
teller_c2seq_a.train()
for num, ex in enumerate(teller_c2seq_a.datagen.get_examples_batch()):
optimizer_c2seq_a.zero_grad()
loss = teller_c2seq_a(ex)
loss.backward()
optimizer_c2seq_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('aa', 'ab'):
sims = eval_fns(make_fns(splits, (teller_c2seq_a, teller_c2seq_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
if epoch % 50 == 49:
optimizer_c2seq_a.param_groups[0]['lr'] *= 0.5
print("Learning rate reduced to", optimizer_c2seq_a.param_groups[0]['lr'])
#%%
for epoch in range(80):
teller_c2seq_b.train()
for num, ex in enumerate(teller_c2seq_b.datagen.get_examples_batch()):
optimizer_c2seq_b.zero_grad()
loss = teller_c2seq_b(ex)
loss.backward()
optimizer_c2seq_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_c2seq_a, teller_c2seq_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
if epoch % 50 == 49:
optimizer_c2seq_b.param_groups[0]['lr'] *= 0.5
print("Learning rate reduced to", optimizer_c2seq_b.param_groups[0]['lr'])
#%%
data_bowcanvas_a = BOWplusCanvasToMultiData('a')
data_bowcanvas_b = BOWplusCanvasToMultiData('b')
drawer_bow2bce_a = BOWtoMultiBCEDrawer(data_bowcanvas_a)
drawer_bow2bce_b = BOWtoMultiBCEDrawer(data_bowcanvas_b)
optimizer_bow2bce_a = torch.optim.Adam(drawer_bow2bce_a.parameters())
optimizer_bow2bce_b = torch.optim.Adam(drawer_bow2bce_b.parameters())
#%%
for epoch in range(5):
drawer_bow2bce_a.train()
for num, ex in enumerate(drawer_bow2bce_a.datagen.get_examples_batch()):
optimizer_bow2bce_a.zero_grad()
loss = drawer_bow2bce_a.forward(ex)
loss.backward()
optimizer_bow2bce_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('a',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bow2bce_a, drawer_bow2bce_b)), limit=100)
print(split, sims.mean())
#%%
for epoch in range(5):
drawer_bow2bce_b.train()
for num, ex in enumerate(drawer_bow2bce_b.datagen.get_examples_batch()):
optimizer_bow2bce_b.zero_grad()
loss = drawer_bow2bce_b.forward(ex)
loss.backward()
optimizer_bow2bce_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('b',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bow2bce_a, drawer_bow2bce_b)), limit=100)
print(split, sims.mean())
#%%
drawer_bowcanvas2bce_a = BOWplusCanvasDrawer(data_bowcanvas_a)
drawer_bowcanvas2bce_b = BOWplusCanvasDrawer(data_bowcanvas_b)
optimizer_bowcanvas2bce_a = torch.optim.Adam(drawer_bowcanvas2bce_a.parameters())
optimizer_bowcanvas2bce_b = torch.optim.Adam(drawer_bowcanvas2bce_b.parameters())
#%%
for epoch in range(15):
drawer_bowcanvas2bce_a.train()
for num, ex in enumerate(drawer_bowcanvas2bce_a.datagen.get_examples_batch()):
optimizer_bowcanvas2bce_a.zero_grad()
loss = drawer_bowcanvas2bce_a.forward(ex)
loss.backward()
optimizer_bowcanvas2bce_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('a',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bowcanvas2bce_a, drawer_bowcanvas2bce_b)), limit=100)
print(split, sims.mean())
#%%
for epoch in range(15):
drawer_bowcanvas2bce_b.train()
for num, ex in enumerate(drawer_bowcanvas2bce_b.datagen.get_examples_batch()):
optimizer_bowcanvas2bce_b.zero_grad()
loss = drawer_bowcanvas2bce_b.forward(ex)
loss.backward()
optimizer_bowcanvas2bce_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('b',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bowcanvas2bce_a, drawer_bowcanvas2bce_b)), limit=100)
print(split, sims.mean())
#%%
baseline1_specs = dict(
teller_nn_a = teller_nn_a.spec,
teller_nn_b = teller_nn_b.spec,
drawer_nn_a = drawer_nn_a.spec,
drawer_nn_b = drawer_nn_b.spec,
drawer_sim_a = drawer_sim_a.spec,
drawer_sim_b = drawer_sim_b.spec,
drawer_bow2c_a = drawer_bow2c_a.spec,
drawer_bow2c_b = drawer_bow2c_b.spec,
teller_c2seq_a = teller_c2seq_a.spec,
teller_c2seq_b = teller_c2seq_b.spec,
drawer_bow2bce_a = drawer_bow2bce_a.spec,
drawer_bow2bce_b = drawer_bow2bce_b.spec,
drawer_bowcanvas2bce_a = drawer_bowcanvas2bce_a.spec,
drawer_bowcanvas2bce_b = drawer_bowcanvas2bce_b.spec,
)
#%%
torch.save(baseline1_specs, Path('models/baseline1.pt'))
| codraw-models-master | baseline1_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def scene_similarity_orig(pred, target):
"""
DEPRECATED: use scene_similarity instead!
This is a re-implementation of the original CoDraw similarity metric, as per
https://arxiv.org/abs/1712.05558v1
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(7)
denom = np.zeros(7)
num[0] = 1
for c1, c2 in zip(match1, match2):
if c1.idx not in c1.HUMAN_IDXS:
num[1] += int(c1.flip != c2.flip)
denom[1] += 1
else:
num[2] += int(c1.subtype != c2.subtype or c1.flip != c2.flip)
denom[2] += 1
num[3] += int(c1.depth != c2.depth)
num[4] += np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2)
denom[3] += 1
denom[4] += 1
for idx_i in range(len(match1)):
for idx_j in range(len(match1)):
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# NOTE(nikita): the metric, as originally defined, pairs up objects
# with themselves, and also yields misleadingly high results for
# models that place multiple clipart at the exact same location
# (e.g. a model that places all clipart in the center of the canvas
# will receive zero relative-position penalty)
num[5] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) < 0)
num[6] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) < 0)
denom[5] += 1
denom[6] += 1
denom = np.maximum(denom, 1)
score_components = iou * (num / denom)
score_weights = np.array([5,-1,-1,-1,-1,-0.5,-0.5])
return score_components @ score_weights
def scene_similarity_v1(pred, target):
"""
DEPRECATED: use scene_similarity instead!
The similarity metric used for initial experiments prior to June 8, 2018.
Both this metric and scene_similarity_orig have corner cases where adding a
new, correct clipart to the scene can actually cause the similarity score
to decrease.
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(7)
denom = np.zeros(7)
num[0] = 1
for c1, c2 in zip(match1, match2):
if c1.idx not in c1.HUMAN_IDXS:
num[1] += int(c1.flip != c2.flip)
denom[1] += 1
else:
num[2] += int(c1.subtype != c2.subtype or c1.flip != c2.flip)
denom[2] += 1
num[3] += int(c1.depth != c2.depth)
num[4] += np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2)
denom[3] += 1
denom[4] += 1
for idx_i in range(len(match1)):
for idx_j in range(idx_i, len(match1)):
if idx_i == idx_j:
continue
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# TODO(nikita): this doesn't correctly handle the case if two
# cliparts have *exactly* the same x/y coordinates in the target
num[5] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) <= 0)
num[6] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) <= 0)
denom[5] += 1
denom[6] += 1
denom = np.maximum(denom, 1)
score_components = iou * (num / denom)
score_weights = np.array([5,-1,-1,-1,-1,-0.5,-0.5])
return score_components @ score_weights
def scene_similarity_v2(pred, target):
"""
DEPRECATED: use scene_similarity instead!
This version of the scene similarity metric should be monotonic, in the
sense that adding correct clipart should always increase the score, adding
incorrect clipart should decrease it, and removing incorrect clipart should
increase it.
This version jointly scores subtype/flip/depth for humans, which was later
replaced with a more fine-grained scoring
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
intersection_size = len(idx1 & idx2)
union_size = len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(7)
denom = np.zeros(7)
num[0] = intersection_size
for c1, c2 in zip(match1, match2):
if c1.idx not in c1.HUMAN_IDXS:
num[1] += int(c1.flip != c2.flip)
else:
num[2] += int(c1.subtype != c2.subtype or c1.flip != c2.flip)
num[3] += int(c1.depth != c2.depth)
num[4] += np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2)
denom[:5] = union_size
for idx_i in range(len(match1)):
for idx_j in range(idx_i, len(match1)):
if idx_i == idx_j:
continue
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# TODO(nikita): this doesn't correctly handle the case if two
# cliparts have *exactly* the same x/y coordinates in the target
num[5] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) <= 0)
num[6] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) <= 0)
denom[5:] = union_size * (intersection_size - 1)
denom = np.maximum(denom, 1)
score_components = num / denom
score_weights = np.array([5,-1,-1,-1,-1,-1,-1])
return score_components @ score_weights
def scene_similarity(pred, target):
"""
This version of the scene similarity metric should be monotonic, in the
sense that adding correct clipart should always increase the score, adding
incorrect clipart should decrease it, and removing incorrect clipart should
increase it. It also breaks out the different components of Mike/Jenny:
flip, expression, and pose; as well as capping distance error at 1.
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
intersection_size = len(idx1 & idx2)
union_size = len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(8)
denom = np.zeros(8)
num[0] = intersection_size
for c1, c2 in zip(match1, match2):
num[1] += int(c1.flip != c2.flip)
if c1.idx in c1.HUMAN_IDXS:
num[2] += int(c1.expression != c2.expression)
num[3] += int(c1.pose != c2.pose)
num[4] += int(c1.depth != c2.depth)
num[5] += min(1.0, np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2))
denom[:6] = union_size
for idx_i in range(len(match1)):
for idx_j in range(idx_i, len(match1)):
if idx_i == idx_j:
continue
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# TODO(nikita): this doesn't correctly handle the case if two
# cliparts have *exactly* the same x/y coordinates in the target
num[6] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) <= 0)
num[7] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) <= 0)
denom[6:] = union_size * (intersection_size - 1)
denom = np.maximum(denom, 1)
score_components = num / denom
score_weights = np.array([5,-1,-0.5,-0.5,-1,-1,-1,-1])
return score_components @ score_weights
def clipart_similarity_v1(a, b):
"""
DEPRECATED: use clipart_similarity instead!
The original clipart similarity metric, before subtype was split into
pose/expression
"""
if a.idx != b.idx:
return 0
score = 5
score -= int(a.subtype != b.subtype or a.flip != b.flip)
score -= int(a.depth != b.depth)
score -= np.sqrt((a.normed_x - b.normed_x) ** 2 + (a.normed_y - b.normed_y) ** 2)
return score
def clipart_similarity(a, b):
"""
This version of the metric splits out subtype into pose/expression, and caps
distance error at 1.
"""
if a.idx != b.idx:
return 0
score = 5
score -= int(a.flip != b.flip)
score -= 0.5 * int(a.expression != b.expression)
score -= 0.5 * int(a.pose != b.pose)
score -= int(a.depth != b.depth)
score -= min(1.0, np.sqrt((a.normed_x - b.normed_x) ** 2 + (a.normed_y - b.normed_y) ** 2))
return score
| codraw-models-master | abs_metric.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
from IPython.display import display
except ImportError:
assert not INTERACTIVE
def display(*args, **kwargs):
pass
import functools
from pathlib import Path
import datetime
import abs_render
import codraw_data
from abs_metric import scene_similarity
class Episode(list):
def get_last(self, event_type):
for event in reversed(self):
if isinstance(event, event_type):
return event
return None
def reconstruct(self):
reconstructed_scene = []
for event in self:
if isinstance(event, codraw_data.DrawClipart):
reconstructed_scene = [c for c in reconstructed_scene if c.idx != event.clipart.idx]
reconstructed_scene.append(event.clipart)
elif isinstance(event, codraw_data.DrawGroup):
reconstructed_scene = [c for c in reconstructed_scene if c.idx not in [c2.idx for c2 in event.cliparts]]
reconstructed_scene.extend(event.cliparts)
return codraw_data.AbstractScene(reconstructed_scene)
def display(self):
scene = None
for event in self:
if isinstance(event, codraw_data.ObserveTruth):
assert scene is None, "Multiple ObserveTruth events not allowed in an episode"
scene = event.scene
elif isinstance(event, codraw_data.SelectClipart):
display(event.clipart)
elif isinstance(event, codraw_data.DrawClipart):
abs_render.display_cliparts([event.clipart], color='red', scale=0.75)
elif isinstance(event, codraw_data.DrawGroup):
abs_render.display_cliparts(event.cliparts, color='red', scale=0.75)
elif isinstance(event, codraw_data.TellGroup):
print("TELLER:", event.msg)
elif isinstance(event, codraw_data.ReplyGroup):
print("DRAWER:", event.msg)
elif isinstance(event, codraw_data.TellerIntention):
if event.drawn is not None:
abs_render.display_cliparts(event.drawn, color='purple', label='drawn', scale=0.33)
if event.draw_next is not None:
abs_render.display_cliparts(event.draw_next, color='yellow', label='draw next', scale=0.33)
if event.undrawn is not None:
abs_render.display_cliparts(event.undrawn, color='cyan', label='undrawn', scale=0.33)
print('===')
reconstructed_scene = self.reconstruct()
abs_render.display_cliparts(scene, label='ground truth', scale=0.75)
abs_render.display_cliparts(reconstructed_scene, color='red', label='reconstructed', scale=0.75)
print('Similarity =', scene_similarity(reconstructed_scene, scene))
def to_html(self):
res = ""
scene = None
delayed_selected_clipart = ""
for event in self:
if isinstance(event, codraw_data.ObserveTruth):
assert scene is None, "Multiple ObserveTruth events not allowed in an episode"
scene = event.scene
elif isinstance(event, codraw_data.SelectClipart):
delayed_selected_clipart += abs_render.svg_from_cliparts([event.clipart], inline_images=False)
elif isinstance(event, codraw_data.DrawClipart):
res += delayed_selected_clipart
delayed_selected_clipart = ""
res += abs_render.svg_from_cliparts([event.clipart], color='red', inline_images=False)
elif isinstance(event, codraw_data.DrawGroup):
res += delayed_selected_clipart
delayed_selected_clipart = ""
res += abs_render.svg_from_cliparts(event.cliparts, color='red', inline_images=False)
elif isinstance(event, codraw_data.TellGroup):
res += f"<p>TELLER: {event.msg}</p>"
elif isinstance(event, codraw_data.ReplyGroup):
res += f"<p>DRAWER: {event.msg}</p>"
elif isinstance(event, codraw_data.TellerIntention):
if event.drawn is not None:
res += abs_render.svg_from_cliparts(event.drawn, color='purple', label='drawn', scale=0.33)
if event.draw_next is not None:
res += abs_render.svg_from_cliparts(event.draw_next, color='yellow', label='draw next', scale=0.33)
if event.undrawn is not None:
res += abs_render.svg_from_cliparts(event.undrawn, color='cyan', label='undrawn', scale=0.33)
res += f"<p>===</p>"
reconstructed_scene = self.reconstruct()
res += abs_render.svg_from_cliparts(scene, label='ground truth', inline_images=False)
res += abs_render.svg_from_cliparts(reconstructed_scene, color='red', label='reconstructed', inline_images=False)
res += f"<p>Similarity = {scene_similarity(reconstructed_scene, scene)}</p>"
return res
def write_html(self, name_or_path):
if isinstance(name_or_path, Path):
path = name_or_path
else:
path = Path(f"./renders/{name_or_path}.html").resolve()
assert not path.exists(), "File already exists!"
assert path.parent.exists(), "Parent directory does not exist"
path.write_text(self.to_html())
def get_true_scene(self):
scene = None
for event in self:
if isinstance(event, codraw_data.ObserveTruth):
assert scene is None, "Multiple ObserveTruth events not allowed in an episode"
scene = event.scene
assert scene is not None, "Episode has no ObserveTruth events"
return scene
def scene_similarity(self):
return scene_similarity(self.reconstruct(), self.get_true_scene())
@classmethod
def run(cls, scene, fns):
episode = cls([codraw_data.ObserveTruth(scene)])
while True:
for fn in fns:
if type(episode[-1]) in fn._trigger_types:
old_len = len(episode)
fn(episode)
if len(episode) == old_len:
return episode
break
else:
assert False, f"No response for event: {type(episode[-1]).__name__}"
@classmethod
def run_script(cls, scene_and_script, fns):
scene, script = scene_and_script
episode = cls([codraw_data.ObserveTruth(scene)])
episode.script = script
episode.script_index = 0
while True:
for fn in fns:
if type(episode[-1]) in fn._trigger_types:
old_len = len(episode)
fn(episode)
if len(episode) == old_len:
return episode
break
else:
assert False, f"No response for event: {type(episode[-1]).__name__}"
def respond_to(*event_types):
types = set([(x if issubclass(x, codraw_data.Event) else None) for x in event_types])
assert None not in types, "Invalid event type in decorator"
def deco(fn):
if hasattr(fn, '_trigger_types'):
fn._trigger_types |= types
else:
fn._trigger_types = types
return fn
return deco
def response_partial(fn, *args, **kwargs):
res = functools.partial(fn, *args, **kwargs)
res._trigger_types = fn._trigger_types
return res
class Transcriber:
def __init__(self, filename, scenes=None, scenes_description="", scenes_and_scripts=None):
self.filename = filename
if scenes is not None:
self.scene_data = scenes
self.use_script = False
else:
self.scene_data = scenes_and_scripts
self.use_script = True
self.scenes_description = scenes_description
def __call__(self, name_or_path, description="", **partition_to_fns):
if isinstance(name_or_path, Path):
path = name_or_path
else:
path = Path(f"./renders/{name_or_path}.html").resolve()
assert not path.exists(), "File already exists!"
assert path.parent.exists(), "Parent directory does not exist"
assert isinstance(description, str)
res = ""
res += f"<p>Filename: {self.filename}</p>"
res += f"<p>Scenes: {self.scenes_description}</p>"
res += f"<p>Started: {datetime.datetime.now()}</p>"
res += f"<p>Description: {description}</p>"
for partition, fns in partition_to_fns.items():
res += f"<p></p>"
res += f"<h2>Partition {partition}</h2>"
for i, scene_datum in enumerate(self.scene_data):
res += f'<h3 id="{partition}_{i}">Scene {i} <a href="#{partition}_{i}">[here]</a></h3>'
if not self.use_script:
res += Episode.run(scene_datum, fns).to_html()
else:
res += Episode.run_script(scene_datum, fns).to_html()
path.write_text(res)
| codraw-models-master | episode.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import redis
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_PASSWORD = 'YOUR PASSWORD HERE'
REDIS_CONNECTION = None
def connect_to_redis():
global REDIS_CONNECTION
if REDIS_CONNECTION is None:
REDIS_CONNECTION = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=0)
return REDIS_CONNECTION
| codraw-models-master | example.eval_server_common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['cpu', 'cuda_if_available', 'logsumexp', 'torch_load']
import torch
# %%
cpu = torch.device('cpu')
if torch.cuda.is_available():
cuda_if_available = torch.device('cuda')
else:
cuda_if_available = cpu
# %%
# https://github.com/pytorch/pytorch/issues/2591
def logsumexp(x, dim=None, keepdim=False):
if dim is None:
x, dim = x.view(-1), 0
xm, _ = torch.max(x, dim, keepdim=True)
x = torch.where(
(xm == float('inf')) | (xm == float('-inf')),
xm,
xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True)))
return x if keepdim else x.squeeze(dim)
# %%
def torch_load(*args, **kwargs):
if cuda_if_available == cpu:
return torch.load(*args, map_location=lambda storage, loc: storage, **kwargs)
else:
return torch.load(*args, **kwargs)
| codraw-models-master | nkfb_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
from attention import AttentionSeqToMasked
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
from datagen import SceneToSeqData
from model import make_fns, eval_fns
from model import Model
# %%
class SceneToSeqTeller(Model, torch.nn.Module):
datagen_cls = SceneToSeqData
def init_full(self,
d_word_emb=256,
d_tag_emb=128, num_heads=4, d_qkv=128,
pre_attn_tag_dropout=0.2, attn_dropout=0.1,
d_lstm=1024, num_lstm_layers=1,
pre_lstm_emb_dropout=0.5,
pre_lstm_scene_dropout=0.15,
lstm_dropout=0.0,
post_lstm_dropout=0.3,
label_smoothing=0.05,
prediction_loss_scale=5.,
d_clipart_state_hidden=1024,
predict_for_full_library=True,
):
self._args = dict(
d_word_emb=d_word_emb,
d_tag_emb=d_tag_emb, num_heads=num_heads, d_qkv=d_qkv,
pre_attn_tag_dropout=pre_attn_tag_dropout,
attn_dropout=attn_dropout,
d_lstm=d_lstm, num_lstm_layers=num_lstm_layers, pre_lstm_emb_dropout=pre_lstm_emb_dropout,
pre_lstm_scene_dropout=pre_lstm_scene_dropout,
lstm_dropout=lstm_dropout,
post_lstm_dropout=post_lstm_dropout,
label_smoothing=label_smoothing,
prediction_loss_scale=prediction_loss_scale,
d_clipart_state_hidden=d_clipart_state_hidden,
predict_for_full_library=predict_for_full_library,
)
dg = self.datagen
self.tag_embs = nn.Embedding(dg.NUM_TAGS, d_tag_emb)
self.d_clipart_tags = d_tag_emb * dg.NUM_TAGS_PER_INDEX
self.pre_attn_tag_dropout = nn.Dropout(pre_attn_tag_dropout)
self.attn_prelstm = AttentionSeqToMasked(
d_pre_q=d_word_emb,
d_pre_k=self.d_clipart_tags,
d_pre_v=self.d_clipart_tags,
d_qk=d_qkv, d_v=d_qkv,
num_heads=num_heads,
attn_dropout=attn_dropout)
self.attn = AttentionSeqToMasked(
d_pre_q=d_lstm,
d_pre_k=self.d_clipart_tags,
d_pre_v=self.d_clipart_tags,
d_qk=d_qkv, d_v=d_qkv,
num_heads=num_heads,
attn_dropout=attn_dropout)
self.word_embs = nn.Embedding(len(self.datagen.vocabulary_dict), d_word_emb)
self.pre_lstm_emb_dropout = nn.Dropout(pre_lstm_emb_dropout)
self.pre_lstm_scene_dropout = nn.Dropout(pre_lstm_scene_dropout)
self.lstm = nn.LSTM(d_word_emb + self.attn_prelstm.d_out, d_lstm, num_layers=num_lstm_layers, dropout=lstm_dropout)
self.post_lstm_dropout = nn.Dropout(post_lstm_dropout)
self.word_project = nn.Linear(d_lstm + self.attn.d_out, len(self.datagen.vocabulary_dict))
self.label_smoothing = label_smoothing
# Possible auxiliary loss for predicting clipart state
self.prediction_loss_scale = prediction_loss_scale
self.predict_for_full_library = predict_for_full_library
if prediction_loss_scale > 0:
if predict_for_full_library:
d_clipart_state_in = d_lstm + dg.NUM_INDEX
else:
d_clipart_state_in = d_lstm
self.clipart_state_predictor = nn.Sequential(
nn.Linear(d_clipart_state_in, d_clipart_state_hidden),
nn.ReLU(),
nn.Linear(d_clipart_state_hidden, dg.NUM_INDEX * dg.NUM_CLIPART_STATES),
)
else:
self.clipart_state_predictor = None
self.to(cuda_if_available)
self.inference_method = 'greedy'
self.sampling_temperature = 1.0
self.max_rounds = 50 # This is only changed for human eval
def get_spec(self):
return self._args
def print_hparams(self):
print("Hyperparameters:")
for k, v in self._args.items():
print(k, '=', v)
print()
def forward(self, example_batch, return_loss=True, return_nll_count=False):
dg = self.datagen
b_clipart_tags = self.tag_embs(example_batch['b_scene_tags']).view(-1, dg.NUM_INDEX, self.d_clipart_tags)
if not (return_loss or return_nll_count):
ks_prelstm, vs_prelstm = self.attn_prelstm.precompute_kv(b_clipart_tags, b_clipart_tags)
ks, vs = self.attn.precompute_kv(b_clipart_tags, b_clipart_tags)
return example_batch['b_scene_mask'], ks_prelstm, vs_prelstm, ks, vs
packer = example_batch['packer']
ob_clipart_tags = packer.ob_from_b(b_clipart_tags)
ob_clipart_tags = self.pre_attn_tag_dropout(ob_clipart_tags)
ob_scene_mask = packer.ob_from_b(example_batch['b_scene_mask'])
brw_teller_tokens_in = example_batch['brw_teller_tokens_in']
if self.training:
word_dropout_probs = 1. / (1. + example_batch['brw_teller_counts_in'])
brw_word_dropout_mask = torch.rand_like(word_dropout_probs) < word_dropout_probs
brw_teller_tokens_in = torch.where(brw_word_dropout_mask, torch.full_like(brw_teller_tokens_in, dg.unk_index), brw_teller_tokens_in)
brw_embs = self.pre_lstm_emb_dropout(self.word_embs(brw_teller_tokens_in))
orwb_embs = packer.orwb_from_brw_pack(brw_embs)
orwb_attended_values_prelstm = self.attn_prelstm(orwb_embs, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
orwb_lstm_in = nn.utils.rnn.PackedSequence(torch.cat([
orwb_embs.data,
orwb_attended_values_prelstm.data,
], -1), orwb_embs.batch_sizes)
orwb_lstm_out, _ = self.lstm(orwb_lstm_in)
orwb_lstm_out = nn.utils.rnn.PackedSequence(self.post_lstm_dropout(orwb_lstm_out.data), orwb_lstm_out.batch_sizes)
orwb_attended_values = self.attn(orwb_lstm_out, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
brw_pre_project = torch.cat([
packer.brw_from_orwb_unpack(orwb_lstm_out),
packer.brw_from_orwb_unpack(orwb_attended_values),
], -1)
brw_word_logits = self.word_project(brw_pre_project)
brw_word_losses = F.cross_entropy(brw_word_logits, example_batch['brw_teller_tokens_out'], reduce=False)
if self.prediction_loss_scale > 0:
brw_starts_round = (example_batch['brw_teller_tokens_in'] == dg.vocabulary_dict['<S>'])
if self.predict_for_full_library:
br_clipart_state_predictor_in = torch.cat([
packer.brw_from_orwb_unpack(orwb_lstm_out)[brw_starts_round],
packer.br_from_b_expand(example_batch['b_scene_mask']).to(torch.float),
], -1)
else:
br_clipart_state_predictor_in = packer.brw_from_orwb_unpack(orwb_lstm_out)[brw_starts_round]
bri_clipart_state_logits = self.clipart_state_predictor(br_clipart_state_predictor_in).view(-1, dg.NUM_CLIPART_STATES)
bri_clipart_state_losses = F.cross_entropy(bri_clipart_state_logits, example_batch['br_drawer_clipart_state'].view(-1), reduce=False)
if self.predict_for_full_library:
br_clipart_state_losses = bri_clipart_state_losses.view(-1, dg.NUM_INDEX).sum(-1)
else:
br_clipart_state_losses = torch.where(
packer.br_from_b_expand(example_batch['b_scene_mask']),
bri_clipart_state_losses.view(-1, dg.NUM_INDEX),
torch.zeros_like(bri_clipart_state_losses.view(-1, dg.NUM_INDEX))).sum(-1)
if return_loss:
# Label smoothing
eps = (self.label_smoothing / brw_word_logits.shape[-1])
brw_word_losses = (1. - self.label_smoothing) * brw_word_losses + eps * (-F.log_softmax(brw_word_logits, dim=-1).sum(dim=-1))
# TODO(nikita): Packer should implement some reduction operations
per_example_word_losses = nn.utils.rnn.pad_packed_sequence(packer.orwb_from_brw_pack(brw_word_losses))[0].sum(0)
word_loss = per_example_word_losses.mean()
if self.prediction_loss_scale > 0:
per_example_prediction_losses = nn.utils.rnn.pad_packed_sequence(packer.srb_from_br_pack(br_clipart_state_losses))[0].sum(0)
prediction_loss = per_example_prediction_losses.mean()
return self.prediction_loss_scale * prediction_loss + word_loss
else:
return word_loss
if return_nll_count:
# TODO(nikita): the model uses multiple tokens to signal the end of
# the last utterance, followed by the end of the conversation. These
# extra actions make perplexity not quite the same as models that
# do stop tokens differently
brw_non_unk_mask = example_batch['brw_teller_tokens_out'] != dg.unk_index
brw_nll = torch.where(brw_non_unk_mask, brw_word_losses, torch.zeros_like(brw_word_losses))
nll = float(brw_nll.sum())
count = int(brw_non_unk_mask.long().sum())
return nll, count
assert False, "unreachable"
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def tell(self, episode):
if not hasattr(episode, 'to_tell'):
self.prepare(episode)
if episode.to_tell:
events = episode.to_tell.pop(0)
episode.extend(events)
def prepare(self, episode):
true_scene = episode.get_last(codraw_data.ObserveTruth).scene
example_batch = self.datagen.tensors_from_episode(episode)
b_scene_mask, ks_prelstm, vs_prelstm, ks, vs = self.forward(example_batch, return_loss=False)
to_tell = []
lstm_state = None # carried across conversation rounds!
for round in range(self.max_rounds):
tokens = [self.datagen.vocabulary_dict['<S>']]
events_this_round = []
# Longest utterance in all of CoDraw is 39 words
# Humans have a 140-char limit, but this is not easy to enforce with
# word-level tokenization
for wordnum in range(50):
token_emb = self.word_embs(torch.tensor(tokens[-1], dtype=torch.long).to(cuda_if_available))[None,None,:]
attended_values_prelstm = self.attn_prelstm(token_emb, ks=ks_prelstm, vs=vs_prelstm, k_mask=b_scene_mask)
lstm_in = torch.cat([token_emb, attended_values_prelstm], -1)
lstm_out, lstm_state = self.lstm(lstm_in, lstm_state)
attended_values = self.attn(lstm_out, ks=ks, vs=vs, k_mask=b_scene_mask)
pre_project = torch.cat([lstm_out, attended_values], -1)
if tokens[-1] == self.datagen.vocabulary_dict['<S>'] and self.prediction_loss_scale > 0:
assert not events_this_round
if self.predict_for_full_library:
clipart_state_predictor_in = torch.cat([
lstm_out,
b_scene_mask.to(torch.float)[None,:,:],
], -1)
else:
clipart_state_predictor_in = lstm_out
clipart_state_logits = self.clipart_state_predictor(clipart_state_predictor_in).view(self.datagen.NUM_INDEX, self.datagen.NUM_CLIPART_STATES)
clipart_state_selected = clipart_state_logits.argmax(dim=-1)
undrawn = AbstractScene([c for c in true_scene if clipart_state_selected[c.idx] == self.datagen.CLIPART_STATE_UNDRAWN])
intention = codraw_data.TellerIntention(drawn=None, undrawn=undrawn, draw_next=None)
events_this_round.append(intention)
word_logits = self.word_project(pre_project[0,0,:])
word_logits[self.datagen.vocabulary_dict['<S>']] = -float('inf')
if round == 0 and wordnum == 0:
word_logits[self.datagen.vocabulary_dict['</TELL>']] = -float('inf')
if self.inference_method == 'greedy':
next_token = int(word_logits.argmax())
elif self.inference_method == 'sample':
next_token = int(torch.multinomial(F.softmax(word_logits / self.sampling_temperature, dim=-1)[None, :], 1).item())
else:
raise ValueError(f"Invalid inference_method: {self.inference_method}")
assert next_token != self.datagen.vocabulary_dict['<S>']
tokens.append(next_token)
if next_token == self.datagen.vocabulary_dict['</S>']:
break
elif next_token == self.datagen.vocabulary_dict['</TELL>']:
break
if tokens[-1] == self.datagen.vocabulary_dict['</TELL>']:
break
msg = " ".join([self.datagen.vocabulary[i] for i in tokens[1:-1]])
events_this_round.append(codraw_data.TellGroup(msg))
to_tell.append(events_this_round)
episode.to_tell = to_tell
def get_action_fns(self):
return [self.tell]
def calc_split_loss(self, split='dev'):
"""
Calculates teller loss on a full split
"""
datagen_spec = {**self.datagen.spec}
datagen_spec['split'] = split
datagen_dev = self.datagen_cls(spec=datagen_spec)
assert datagen_dev.vocabulary == self.datagen.vocabulary
losses = []
count = 0
with torch.no_grad():
self.eval()
for ex in datagen_dev.get_examples_unshuffled_batch(batch_size=128):
batch_size = ex['b_scene_mask'].shape[0]
loss = self.forward(ex)
loss = float(loss) * batch_size
losses.append(loss)
count += batch_size
return np.array(losses).sum() / count
# %%
def load_baseline3():
baseline3_specs = torch_load(Path('models/scene2seq_july11.pt'))
models = {}
for k, spec in baseline3_specs.items():
print(k)
models[k] = globals()[spec['class']](spec=spec)
models[k].eval()
return models
| codraw-models-master | baseline3_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pathlib import Path
import editdistance
from collections import Counter
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
from packer import Packer
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
#%%
class Datagen:
# the spec contains summaries (like a vocab list), but the events are stored
# as a pointer and not as the actual events dictionary. The events get
# restored only if needed, (which shouldn't really be the case because saved
# models won't need to be trained further.)
def __init__(self, split=None, spec=None, **kwargs):
self._examples_cache = None
if spec is not None:
self.split = spec['split']
self.init_from_spec(**{k: v for (k,v) in spec.items() if k != 'split'})
else:
self.split = split
self.init_full(**kwargs)
def init_full(self):
raise NotImplementedError("Subclasses should override this")
def init_from_spec(self):
raise NotImplementedError("Subclasses should override this")
def calc_derived(self):
pass
def get_spec(self):
return {}
@property
def spec(self):
spec = self.get_spec()
if 'split' not in spec:
spec['split'] = self.split
return spec
def get_examples(self):
raise NotImplementedError("Subclasses should override this")
def collate(self, batch):
raise NotImplementedError("Subclasses should override this")
def get_examples_batch(self, batch_size=16):
if self._examples_cache is None:
self._examples_cache = list(self.get_examples())
batch = []
epoch_examples = self._examples_cache[:]
np.random.shuffle(epoch_examples)
for ex in epoch_examples:
batch.append(ex)
if len(batch) == batch_size:
yield self.collate(batch)
batch = []
def get_examples_unshuffled_batch(self, batch_size=16):
"""
Does not shuffle, and the last batch may contain less elements.
Originally added for perplexity evaluation.
"""
if self._examples_cache is None:
self._examples_cache = list(self.get_examples())
batch = []
epoch_examples = self._examples_cache[:]
for ex in epoch_examples:
batch.append(ex)
if len(batch) == batch_size:
yield self.collate(batch)
batch = []
if batch:
yield self.collate(batch)
#%%
class NearestNeighborData(Datagen):
def init_full(self):
self.build_dicts()
def init_from_spec(self):
self.build_dicts()
def build_dicts(self):
# calculate events
events = codraw_data.get_place_one(self.split)
self.msg_to_clipart = {}
self.clipart_to_msg = {}
it = iter(events)
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
self.msg_to_clipart[msg] = clipart
self.clipart_to_msg[clipart] = msg
#%%
class MessageSimilarityData(Datagen):
def init_full(self):
self.build_dicts()
vocabulary = set()
for msg in self.msg_to_clipart:
vocabulary |= set(msg.split())
self.vocabulary = sorted(vocabulary)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.build_dicts()
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def build_dicts(self):
events = codraw_data.get_place_one(self.split)
self.msg_to_clipart = {}
it = iter(events)
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
assert msg != ""
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
self.msg_to_clipart[msg] = clipart
def calc_derived(self):
self.all_msgs = list(self.msg_to_clipart.keys())
assert "" not in self.all_msgs
all_cliparts = [self.msg_to_clipart[msg] for msg in self.all_msgs]
self.similarity_matrix = np.zeros((len(all_cliparts), len(all_cliparts)))
for i in range(self.similarity_matrix.shape[0]):
for j in range(i, self.similarity_matrix.shape[1]):
self.similarity_matrix[i, j] = clipart_similarity(all_cliparts[i], all_cliparts[j])
for i in range(self.similarity_matrix.shape[0]):
for j in range(i):
self.similarity_matrix[i, j] = self.similarity_matrix[j, i]
# Never suggest the same sentence as both the input and a candidate
for i in range(self.similarity_matrix.shape[0]):
self.similarity_matrix[i, i] = -1
matrix_good = self.similarity_matrix > 4.5
matrix_bad = (self.similarity_matrix < 3.5) & (self.similarity_matrix >= 0)
for i in range(matrix_good.shape[0]):
if not matrix_good[i].any():
matrix_good[i, self.similarity_matrix[i].argmax()] = True
self.cands_good = np.zeros_like(self.similarity_matrix, dtype=int)
self.cands_good_lens = np.zeros(self.cands_good.shape[0], dtype=int)
self.cands_bad = np.zeros_like(self.similarity_matrix, dtype=int)
self.cands_bad_lens = np.zeros(self.cands_bad.shape[0], dtype=int)
where_good_i, where_good_j = np.where(matrix_good)
for i in range(matrix_good.shape[0]):
cands_good = where_good_j[where_good_i == i]
self.cands_good_lens[i] = len(cands_good)
self.cands_good[i,:len(cands_good)] = cands_good
where_bad_i, where_bad_j = np.where(matrix_bad)
unique_vals, unique_indices = np.unique(where_bad_i, return_index=True)
assert (unique_vals == np.arange(self.cands_bad.shape[0])).all()
for i in range(matrix_bad.shape[0]):
start = unique_indices[i]
if i == matrix_bad.shape[0] - 1:
assert (where_bad_i[start:] == i).all()
cands_bad = where_bad_j[start:]
else:
end = unique_indices[i+1]
assert (where_bad_i[start:end] == i).all()
cands_bad = where_bad_j[start:end]
self.cands_bad_lens[i] = len(cands_bad)
self.cands_bad[i,:len(cands_bad)] = cands_bad
def get_candidates_for(self, i):
good = np.random.choice(self.cands_good[i][:self.cands_good_lens[i]])
bad = np.random.choice(self.cands_bad[i][:self.cands_bad_lens[i]], size=19)
return (good, *bad)
def get_examples(self):
for i in np.random.permutation(self.cands_good.shape[0]):
cands = self.get_candidates_for(i)
idxs = (i, *cands)
words = []
offsets = []
next_offset = 0
for idx in idxs:
offsets.append(next_offset)
toks = [self.vocabulary_dict.get(tok, None) for tok in self.all_msgs[idx].split()]
toks = [tok for tok in toks if tok is not None]
words.extend(toks)
next_offset += len(toks)
yield {
'words': torch.LongTensor(words),
'offsets': torch.LongTensor(offsets)
}
def get_examples_batch(self, batch_size=16):
batch = []
for ex in self.get_examples():
batch.append(ex)
if len(batch) == batch_size:
yield self.collate(batch)
batch = []
def collate(self, batch):
offsets = [x['offsets'] for x in batch]
extra = 0
for i in range(len(offsets)):
offsets[i] += extra
extra += len(batch[i]['words'])
return {
'words': torch.cat([x['words'] for x in batch]).to(cuda_if_available),
'offsets': torch.cat(offsets).to(cuda_if_available),
}
#%%
def vocabulary_for_split(split, event_getter=codraw_data.get_place_one):
vocabulary = set()
it = iter(event_getter(split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
msg = event.msg
vocabulary |= set(msg.split())
return sorted(vocabulary)
def vocabulary_counter_for_split(split, event_getter=codraw_data.get_place_one):
vocabulary = Counter()
it = iter(event_getter(split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
msg = event.msg
vocabulary.update(msg.split())
return vocabulary
#%%
class BOWtoClipartData(Datagen):
def init_full(self):
self.vocabulary = vocabulary_for_split(self.split)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_place_one(self.split))
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
clipart_index = torch.LongTensor(np.array(clipart.idx, dtype=int))
clipart_categorical = torch.LongTensor([
clipart.subtype, clipart.depth, clipart.flip])
clipart_numerical = torch.tensor([clipart.normed_x, clipart.normed_y], dtype=torch.float)
msg_idxs = [self.vocabulary_dict.get(word, None) for word in msg.split()]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_index': clipart_index,
'clipart_categorical': clipart_categorical,
'clipart_numerical': clipart_numerical,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
offsets = np.cumsum([0] + [len(x['msg_idxs']) for x in batch])[:-1]
return {
'clipart_index': torch.stack([x['clipart_index'] for x in batch]).to(cuda_if_available),
'clipart_categorical': torch.stack([x['clipart_categorical'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'msg_idxs': torch.cat([x['msg_idxs'] for x in batch]).to(cuda_if_available),
'offsets': torch.tensor(offsets).to(cuda_if_available),
}
#%%
class ClipartToSeqData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_BINARY = NUM_INDEX + NUM_SUBTYPES + NUM_DEPTH + NUM_FLIP
BINARY_OFFSETS = np.cumsum([0, NUM_INDEX, NUM_SUBTYPES, NUM_DEPTH])
NUM_NUMERICAL = 2 # x, y
def init_full(self):
self.vocabulary = ['<S>', '</S>'] + vocabulary_for_split(self.split)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_place_one(self.split))
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
x = clipart.normed_x
y = clipart.normed_y
clipart_numerical = torch.tensor([x, y], dtype=torch.float)
clipart_binary = torch.zeros(self.NUM_BINARY)
for val, offset in zip([clipart.idx, clipart.subtype, clipart.depth, clipart.flip], self.BINARY_OFFSETS):
clipart_binary[val + offset] = 1.
msg_idxs = [self.vocabulary_dict['<S>']] + [self.vocabulary_dict.get(word, None) for word in msg.split()] + [self.vocabulary_dict['</S>']]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_binary': clipart_binary,
'clipart_numerical': clipart_numerical,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
batch = sorted(batch, key=lambda x: -len(x['msg_idxs']))
msg_lens = torch.tensor([len(x['msg_idxs']) - 1 for x in batch], dtype=torch.long)
max_len = int(msg_lens.max())
msg_idxs_input = torch.stack([F.pad(torch.tensor(x['msg_idxs'][:-1]), (0, max_len + 1 - len(x['msg_idxs']))) for x in batch])
msg_idxs_output = torch.stack([F.pad(torch.tensor(x['msg_idxs'][1:]), (0, max_len + 1 - len(x['msg_idxs']))) for x in batch])
return {
'clipart_binary': torch.stack([x['clipart_binary'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'msg_in': nn.utils.rnn.pack_padded_sequence(msg_idxs_input.to(cuda_if_available), msg_lens.to(cuda_if_available), batch_first=True),
'msg_out': nn.utils.rnn.pack_padded_sequence(msg_idxs_output.to(cuda_if_available), msg_lens.to(cuda_if_available), batch_first=True),
}
#%%
class BOWplusCanvasToMultiData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_CATEGORICAL = NUM_SUBTYPES + NUM_DEPTH + NUM_FLIP
NUM_NUMERICAL = 2 # x, y
NUM_ALL = NUM_CATEGORICAL + NUM_NUMERICAL
NUM_BINARY = (NUM_INDEX * (1 + NUM_DEPTH + NUM_FLIP)) + 2 * NUM_SUBTYPES
def init_full(self):
self.vocabulary = vocabulary_for_split(self.split, codraw_data.get_contextual_place_many)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_contextual_place_many(self.split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.ObserveCanvas)
canvas_context = event.scene
event = next(it)
assert isinstance(event, codraw_data.DrawGroup)
cliparts = event.cliparts
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
if not msg:
continue
clipart_chosen_mask = np.zeros(self.NUM_INDEX , dtype=bool)
clipart_categorical = np.zeros((self.NUM_INDEX, 3))
clipart_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
for clipart in cliparts:
clipart_chosen_mask[clipart.idx] = True
clipart_categorical[clipart.idx, :] = [clipart.subtype, clipart.depth, clipart.flip]
clipart_numerical[clipart.idx, :] = [clipart.normed_x, clipart.normed_y]
clipart_chosen_mask = torch.tensor(clipart_chosen_mask.astype(np.uint8), dtype=torch.uint8)
clipart_categorical = torch.tensor(clipart_categorical, dtype=torch.long)
clipart_numerical = torch.tensor(clipart_numerical, dtype=torch.float)
canvas_binary = np.zeros((self.NUM_INDEX, 1 + self.NUM_DEPTH + self.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, self.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + self.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)
msg_idxs = [self.vocabulary_dict.get(word, None) for word in msg.split()]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_chosen_mask': clipart_chosen_mask,
'clipart_categorical': clipart_categorical,
'clipart_numerical': clipart_numerical,
'canvas_binary': canvas_binary,
'canvas_numerical': canvas_numerical,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
offsets = np.cumsum([0] + [len(x['msg_idxs']) for x in batch])[:-1]
return {
'clipart_chosen_mask': torch.stack([x['clipart_chosen_mask'] for x in batch]).to(cuda_if_available),
'clipart_categorical': torch.stack([x['clipart_categorical'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'canvas_binary': torch.stack([x['canvas_binary'] for x in batch]).to(cuda_if_available),
'canvas_numerical': torch.stack([x['canvas_numerical'] for x in batch]).to(cuda_if_available),
'msg_idxs': torch.cat([x['msg_idxs'] for x in batch]).to(cuda_if_available),
'offsets': torch.tensor(offsets).to(cuda_if_available),
}
#%%
class BOWAddUpdateData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_CATEGORICAL = NUM_SUBTYPES + NUM_DEPTH + NUM_FLIP
NUM_NUMERICAL = 2 # x, y
NUM_ALL = NUM_CATEGORICAL + NUM_NUMERICAL
NUM_BINARY = (NUM_INDEX * (1 + NUM_DEPTH + NUM_FLIP)) + 2 * NUM_SUBTYPES
NUM_X_TICKS = 3
NUM_Y_TICKS = 2
NUM_TAGS = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + NUM_X_TICKS + NUM_Y_TICKS + 1
NUM_TAGS_PER_INDEX = 6 # index, subtype, depth, flip, x, y
def init_full(self):
self.vocabulary = vocabulary_for_split(self.split, codraw_data.get_contextual_place_many)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_contextual_place_many(self.split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.ObserveCanvas)
canvas_context = event.scene
event = next(it)
assert isinstance(event, codraw_data.DrawGroup)
cliparts = event.cliparts
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
if not msg:
continue
context_idxs = set([c.idx for c in canvas_context])
clipart_added_mask = np.zeros(self.NUM_INDEX , dtype=bool)
clipart_updated_mask = np.zeros(self.NUM_INDEX , dtype=bool)
clipart_categorical = np.zeros((self.NUM_INDEX, 3))
clipart_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
for clipart in cliparts:
if clipart.idx in context_idxs:
clipart_updated_mask[clipart.idx] = True
else:
clipart_added_mask[clipart.idx] = True
clipart_categorical[clipart.idx, :] = [clipart.subtype, clipart.depth, clipart.flip]
clipart_numerical[clipart.idx, :] = [clipart.normed_x, clipart.normed_y]
clipart_added_mask = torch.tensor(clipart_added_mask.astype(np.uint8), dtype=torch.uint8)
clipart_updated_mask = torch.tensor(clipart_updated_mask.astype(np.uint8), dtype=torch.uint8)
clipart_categorical = torch.tensor(clipart_categorical, dtype=torch.long)
clipart_numerical = torch.tensor(clipart_numerical, dtype=torch.float)
canvas_binary = np.zeros((self.NUM_INDEX, 1 + self.NUM_DEPTH + self.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, self.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
canvas_tags = np.zeros((self.NUM_INDEX + 1, self.NUM_TAGS_PER_INDEX), dtype=int)
canvas_mask = np.zeros(self.NUM_INDEX + 1, dtype=bool)
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + self.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
x_tick = int(np.floor(clipart.normed_x * self.NUM_X_TICKS))
if x_tick < 0:
x_tick = 0
elif x_tick >= self.NUM_X_TICKS:
x_tick = self.NUM_X_TICKS - 1
y_tick = int(np.floor(clipart.normed_y * self.NUM_Y_TICKS))
if y_tick < 0:
y_tick = 0
elif y_tick >= self.NUM_Y_TICKS:
y_tick = self.NUM_Y_TICKS - 1
# Tag features (for attention)
canvas_tags[clipart.idx, 0] = 1 + clipart.idx
canvas_tags[clipart.idx, 1] = 1 + Clipart.NUM_IDX + clipart.subtype
canvas_tags[clipart.idx, 2] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + clipart.depth
canvas_tags[clipart.idx, 3] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + int(clipart.flip)
canvas_tags[clipart.idx, 4] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + x_tick
canvas_tags[clipart.idx, 5] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + self.NUM_X_TICKS + y_tick
canvas_mask[clipart.idx] = True
if not canvas_context:
canvas_tags[-1, 0] = self.NUM_TAGS - 1
canvas_mask[-1] = True
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)
canvas_tags = torch.tensor(canvas_tags, dtype=torch.long)
canvas_mask = torch.tensor(canvas_mask.astype(np.uint8), dtype=torch.uint8)
msg_idxs = [self.vocabulary_dict.get(word, None) for word in msg.split()]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_added_mask': clipart_added_mask,
'clipart_updated_mask': clipart_updated_mask,
'clipart_categorical': clipart_categorical,
'clipart_numerical': clipart_numerical,
'canvas_binary': canvas_binary,
'canvas_numerical': canvas_numerical,
'canvas_tags': canvas_tags,
'canvas_mask': canvas_mask,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
offsets = np.cumsum([0] + [len(x['msg_idxs']) for x in batch])[:-1]
return {
'clipart_added_mask': torch.stack([x['clipart_added_mask'] for x in batch]).to(cuda_if_available),
'clipart_updated_mask': torch.stack([x['clipart_updated_mask'] for x in batch]).to(cuda_if_available),
'clipart_categorical': torch.stack([x['clipart_categorical'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'canvas_binary': torch.stack([x['canvas_binary'] for x in batch]).to(cuda_if_available),
'canvas_numerical': torch.stack([x['canvas_numerical'] for x in batch]).to(cuda_if_available),
'canvas_tags': torch.stack([x['canvas_tags'] for x in batch]).to(cuda_if_available),
'canvas_mask': torch.stack([x['canvas_mask'] for x in batch]).to(cuda_if_available),
'msg_idxs': torch.cat([x['msg_idxs'] for x in batch]).to(cuda_if_available),
'offsets': torch.tensor(offsets).to(cuda_if_available),
}
#%%
class SceneToSeqData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_X_TICKS = 3
NUM_Y_TICKS = 2
NUM_BINARY = (NUM_INDEX * (1 + NUM_DEPTH + NUM_FLIP + NUM_X_TICKS + NUM_Y_TICKS)) + 2 * NUM_SUBTYPES
NUM_TAGS = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + NUM_X_TICKS + NUM_Y_TICKS
NUM_TAGS_PER_INDEX = 6 # index, subtype, depth, flip, x, y
CLIPART_STATE_NOT_UNDRAWN = 0
CLIPART_STATE_UNDRAWN = 1
NUM_CLIPART_STATES = 2
def init_full(self):
self.vocabulary_counts = vocabulary_counter_for_split(self.split, codraw_data.get_set_clipart_pre_peek)
self.vocabulary = ['</TELL>', '<S>', '</S>', '<UNK>'] + sorted(self.vocabulary_counts.keys())
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.unk_index = self.vocabulary_dict['<UNK>']
self.calc_derived()
def init_from_spec(self, vocabulary, vocabulary_counts):
self.vocabulary_counts = vocabulary_counts
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.unk_index = self.vocabulary_dict['<UNK>']
def get_spec(self):
return dict(vocabulary=self.vocabulary, vocabulary_counts=self.vocabulary_counts)
def tensors_from_episode(self, episode, is_train=False):
examples = list(self.get_examples(episode, is_train=is_train))
if not examples:
print(episode)
assert len(examples) > 0, "Episode did not produce any examples"
assert len(examples) == 1, "Episode should not produce multiple examples"
return self.collate(examples, is_train=is_train)
def tensors_from_episodes(self, episodes, is_train=True):
events = []
for episode in episodes:
events.extend(episode)
examples = list(self.get_examples(events, is_train=is_train))
if not examples:
print(episode)
assert len(examples) > 0, "Episode did not produce any examples"
return self.collate(examples, is_train=is_train)
def get_examples(self, events=None, is_train=True):
example = None
scene_present_idxs = None
prev_drawn_idxs = None
num_unfilled_past = None
if events is None:
events = codraw_data.get_set_clipart_pre_peek(self.split)
it = iter(events)
for event in it:
if isinstance(event, codraw_data.ObserveTruth):
if example is not None:
# When doing RL, it's important that the batched data
# matches the decisions taken in step-by-step mode
# If an episode was cut off, don't include a </TELL> token
# All human conversations have less than 50 rounds
if len(example['teller_tokens_in']) < 50:
teller_tokens_stop = [self.vocabulary_dict[x] for x in ('<S>', '</TELL>')]
teller_tokens_stop = torch.tensor(teller_tokens_stop, dtype=torch.long)
example['teller_tokens_in'].append(teller_tokens_stop[:-1])
example['teller_tokens_out'].append(teller_tokens_stop[1:])
example['teller_counts_in'].append(torch.tensor([np.inf], dtype=torch.float))
else:
example['drawer_clipart_state'].pop()
yield example
scene = event.scene
scene_present_idxs = set([c.idx for c in scene])
scene_tags = np.zeros((self.NUM_INDEX, self.NUM_TAGS_PER_INDEX), dtype=int)
scene_mask = np.zeros(self.NUM_INDEX, dtype=bool)
for clipart in scene:
x_tick = int(np.floor(clipart.normed_x * self.NUM_X_TICKS))
if x_tick < 0:
x_tick = 0
elif x_tick >= self.NUM_X_TICKS:
x_tick = self.NUM_X_TICKS - 1
y_tick = int(np.floor(clipart.normed_y * self.NUM_Y_TICKS))
if y_tick < 0:
y_tick = 0
elif y_tick >= self.NUM_Y_TICKS:
y_tick = self.NUM_Y_TICKS - 1
# Tag features (for attention)
scene_tags[clipart.idx, 0] = 1 + clipart.idx
scene_tags[clipart.idx, 1] = 1 + Clipart.NUM_IDX + clipart.subtype
scene_tags[clipart.idx, 2] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + clipart.depth
scene_tags[clipart.idx, 3] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + int(clipart.flip)
scene_tags[clipart.idx, 4] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + x_tick
scene_tags[clipart.idx, 5] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + self.NUM_X_TICKS + y_tick
scene_mask[clipart.idx] = True
scene_tags = torch.tensor(scene_tags, dtype=torch.long)
scene_mask = torch.tensor(scene_mask.astype(np.uint8), dtype=torch.uint8)
if is_train:
assert scene_present_idxs is not None
drawer_clipart_state = np.zeros(self.NUM_INDEX, dtype=int)
for idx in range(self.NUM_INDEX):
if idx not in scene_present_idxs:
# drawer_clipart_state[idx] = self.CLIPART_STATE_NOT_IN_SCENE
drawer_clipart_state[idx] = self.CLIPART_STATE_NOT_UNDRAWN
else:
drawer_clipart_state[idx] = self.CLIPART_STATE_UNDRAWN
drawer_clipart_state = torch.tensor(drawer_clipart_state, dtype=torch.long)
prev_drawn_idxs = set()
num_unfilled_past = 1
example = {
'scene_tags': scene_tags,
'scene_mask': scene_mask,
'teller_tokens_in': [],
'teller_counts_in': [],
'teller_tokens_out': [],
'drawer_clipart_state': [drawer_clipart_state],
}
else:
yield {
'scene_tags': scene_tags,
'scene_mask': scene_mask,
}
# At test time, there shouldn't be anything after the
# ObserveTruth event
continue
if isinstance(event, codraw_data.TellGroup):
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.ObserveCanvas)
canvas_context = event.scene
event = next(it)
assert isinstance(event, codraw_data.SetDrawing)
drawn_scene = event.scene
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
teller_tokens = [self.vocabulary_dict.get(word, self.unk_index) for word in msg.split()]
teller_counts = [self.vocabulary_counts[word] for word in msg.split()]
teller_tokens = [self.vocabulary_dict['<S>']] + teller_tokens + [self.vocabulary_dict['</S>']]
teller_counts = [np.inf] + teller_counts + [np.inf]
# Needed for RL. All human utterances have less than 50 words
# due to a character limit imposed during data collection
if len(teller_tokens) > 51:
teller_tokens = teller_tokens[:51]
teller_counts = teller_counts[:51]
teller_tokens = torch.tensor(teller_tokens, dtype=torch.long)
teller_counts = torch.tensor(teller_counts, dtype=torch.float)
example['teller_tokens_in'].append(teller_tokens[:-1])
example['teller_tokens_out'].append(teller_tokens[1:])
example['teller_counts_in'].append(teller_counts[:-1])
assert scene_present_idxs is not None
drawn_idxs = set([c.idx for c in drawn_scene])
drawer_clipart_state = np.zeros(self.NUM_INDEX, dtype=int)
for idx in range(self.NUM_INDEX):
if idx not in scene_present_idxs or idx in drawn_idxs:
drawer_clipart_state[idx] = self.CLIPART_STATE_NOT_UNDRAWN
else:
drawer_clipart_state[idx] = self.CLIPART_STATE_UNDRAWN
drawer_clipart_state = torch.tensor(drawer_clipart_state, dtype=torch.long)
example['drawer_clipart_state'].append(drawer_clipart_state)
def collate(self, batch, is_train=True):
if is_train:
packer = Packer([x['teller_tokens_in'] for x in batch])
return {
'packer': packer,
'brw_teller_tokens_in': packer.brw_from_list([x['teller_tokens_in'] for x in batch]).to(cuda_if_available),
'brw_teller_counts_in': packer.brw_from_list([x['teller_counts_in'] for x in batch]).to(cuda_if_available),
'brw_teller_tokens_out': packer.brw_from_list([x['teller_tokens_out'] for x in batch]).to(cuda_if_available),
'b_scene_tags': torch.stack([x['scene_tags'] for x in batch]).to(cuda_if_available),
'b_scene_mask': torch.stack([x['scene_mask'] for x in batch]).to(cuda_if_available),
'br_drawer_clipart_state': packer.br_from_list([x['drawer_clipart_state'] for x in batch]).to(cuda_if_available),
}
else:
return {
'b_scene_tags': torch.stack([x['scene_tags'] for x in batch]).to(cuda_if_available),
'b_scene_mask': torch.stack([x['scene_mask'] for x in batch]).to(cuda_if_available),
}
| codraw-models-master | datagen.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
def load_models(*partitions):
if not partitions:
partitions = (1, 2, 3, 4)
models = {}
if 1 in partitions:
from baseline1_models import load_baseline1
models.update(load_baseline1())
if 2 in partitions:
from baseline2_models import load_baseline2
models.update(load_baseline2())
if 3 in partitions:
from baseline3_models import load_baseline3
models.update(load_baseline3())
if 4 in partitions:
from baseline4_models import load_baseline4
models.update(load_baseline4())
return models
#%%
def make_pairs(models, *names):
if models is None:
models = load_models()
res = []
for name in names:
res.append((name, (models[name + '_a'], models[name + '_b'])))
return res
| codraw-models-master | saved_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides the Packer class, which is useful for managing a hierarchy where each
batch element has a variable number of conversation rounds, and each round may
consist of a variable number of messages.
"""
#%%
import numpy as np
import torch
from torch.nn.utils.rnn import PackedSequence
# %%
class Packer:
def __init__(self, list_brw):
coords = []
b_lens = []
br_lens = []
coords_flat = []
b_lens_flat = []
for b, list_rw in enumerate(list_brw):
b_lens.append(len(list_rw))
len_flat = 0
for r, list_w in enumerate(list_rw):
br_lens.append(len(list_w))
for w, _ in enumerate(list_w):
coords.append([b, r, w])
coords_flat.append([b, len_flat + w])
len_flat += len(list_w)
b_lens_flat.append(len_flat)
self.coords_brw = np.array(coords, dtype=int)
self.b_lens = np.array(b_lens, dtype=int)
self.br_lens = np.array(br_lens, dtype=int)
self.coords_flat = np.array(coords_flat, dtype=int)
self.b_lens_flat = np.array(b_lens_flat, dtype=int)
self.coords_br, self.indices_br2brw = np.unique(self.coords_brw[:,:-1], axis=0, return_inverse=True)
_, self.indices_b2br = np.unique(self.coords_br[:,:-1], axis=0, return_inverse=True)
self.indices_b2brw = self.indices_b2br[self.indices_br2brw]
self.dense_shape = np.max(self.coords_brw, 0) + 1
# Must use stable sorts here, which is why kind='mergesort'
self.indices_b2sb = np.argsort(-self.b_lens, kind='mergesort')
sort_by_num_rounds = np.argsort(-self.b_lens[self.indices_b2br], kind='mergesort')
sort_by_round = np.argsort(self.coords_br[sort_by_num_rounds][:,-1], kind='mergesort')
self.indices_br2srb = sort_by_num_rounds[sort_by_round]
self.indices_br2sx = np.argsort(-self.br_lens, kind='mergesort')
sort_by_num_words = np.argsort(-self.br_lens[self.indices_br2brw], kind='mergesort')
sort_by_word_idx = np.argsort(self.coords_brw[sort_by_num_words][:,-1], kind='mergesort')
self.indices_brw2swx = sort_by_num_words[sort_by_word_idx]
_, batch_sizes_srb = np.unique(self.coords_br[self.indices_br2srb][:,-1], return_counts=True)
_, batch_sizes_swx = np.unique(self.coords_brw[self.indices_brw2swx][:,-1], return_counts=True)
self.batch_sizes_srb = torch.tensor(batch_sizes_srb, dtype=torch.long)
self.batch_sizes_swx = torch.tensor(batch_sizes_swx, dtype=torch.long)
self.indices_srb2br = np.argsort(self.indices_br2srb, kind='mergesort')
self.indices_swx2brw = np.argsort(self.indices_brw2swx, kind='mergesort')
self.indices_sb2b = np.argsort(self.indices_b2sb, kind='mergesort')
self.indices_sx2br = np.argsort(self.indices_br2sx, kind='mergesort')
# For flat
self.indices_b2ob = np.argsort(-self.b_lens_flat, kind='mergesort')
sort_by_flat_words = np.argsort(-self.b_lens_flat[self.indices_b2brw], kind='mergesort')
sort_by_flat_word_idx = np.argsort(self.coords_flat[sort_by_flat_words][:,-1], kind='mergesort')
self.indices_brw2orwb = sort_by_flat_words[sort_by_flat_word_idx]
_, batch_sizes_orwb = np.unique(self.coords_flat[self.indices_brw2orwb][:,-1], return_counts=True)
self.batch_sizes_orwb = torch.tensor(batch_sizes_orwb, dtype=torch.long)
self.indices_ob2b = np.argsort(self.indices_b2ob, kind='mergesort')
self.indices_orwb2brw = np.argsort(self.indices_brw2orwb, kind='mergesort')
def brw_from_list(self, list_brw):
vals = []
for list_rw in list_brw:
for list_w in list_rw:
vals.extend(list_w)
assert len(vals) == self.coords_brw.shape[0]
if torch.is_tensor(vals[0]):
return torch.stack(vals)
else:
return torch.tensor(vals)
def br_from_list(self, list_br):
vals = []
for list_r in list_br:
vals.extend(list_r)
assert len(vals) == self.coords_br.shape[0]
if torch.is_tensor(vals[0]):
return torch.stack(vals)
else:
return torch.tensor(vals)
def br_from_b_expand(self, b_in):
return b_in[self.indices_b2br]
def brw_from_br_expand(self, br_in):
return br_in[self.indices_br2brw]
def brw_from_b_expand(self, b_in):
return b_in[self.indices_b2brw]
def srb_from_br_pack(self, br_in):
return PackedSequence(
br_in[self.indices_br2srb],
self.batch_sizes_srb
)
def swx_from_brw_pack(self, brw_in):
return PackedSequence(
brw_in[self.indices_brw2swx],
self.batch_sizes_swx
)
def br_from_srb_unpack(self, srb_in):
return srb_in.data[self.indices_srb2br]
def brw_from_swx_unpack(self, swx_in):
return swx_in.data[self.indices_swx2brw]
def br_from_sx(self, sx_in):
return sx_in[self.indices_sx2br]
def b_from_sb(self, sb_in):
return sb_in[self.indices_sb2b]
def sx_from_br(self, br_in):
return br_in[self.indices_br2sx]
def sb_from_b(self, b_in):
return b_in[self.indices_b2sb]
# For flat
def orwb_from_brw_pack(self, brw_in):
return PackedSequence(
brw_in[self.indices_brw2orwb],
self.batch_sizes_orwb
)
def brw_from_orwb_unpack(self, orwb_in):
return orwb_in.data[self.indices_orwb2brw]
def b_from_ob(self, ob_in):
return ob_in[self.indices_ob2b]
def ob_from_b(self, b_in):
return b_in[self.indices_b2ob]
| codraw-models-master | packer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
#%%
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def select_clipart_to_tell(episode):
cliparts = set(episode.get_last(codraw_data.ObserveTruth).scene)
cliparts -= set([e.clipart for e in episode if isinstance(e, codraw_data.SelectClipart)])
if cliparts:
cliparts = list(sorted(cliparts))
clipart = cliparts[0]
# For now, don't randomize the clipart selection order.
#cliparts[np.random.choice(len(cliparts))]
episode.append(codraw_data.SelectClipart(clipart))
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def scripted_tell(episode):
if episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
if isinstance(event, codraw_data.Peek):
# Skip to the next non-peek event
assert isinstance(episode.script[episode.script_index + 1], codraw_data.TellerObserveCanvas)
episode.script_index += 2
return scripted_tell(episode)
episode.script_index += 1
episode.append(event)
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def scripted_tell_before_peek(episode):
if episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
if isinstance(event, codraw_data.Peek):
return
episode.script_index += 1
episode.append(event)
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def scripted_tell_after_peek(episode):
if episode.script_index == 0:
while episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
episode.script_index += 1
if not isinstance(event, codraw_data.Peek):
continue
event = episode.script[episode.script_index]
assert isinstance(event, codraw_data.TellerObserveCanvas)
start_scene = event.scene
episode.script_index += 1
break
else:
assert False, "Could not find Peek event in the script!"
episode.append(codraw_data.DrawGroup(start_scene))
assert episode.script_index < len(episode.script)
if episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
episode.script_index += 1
episode.append(event)
@respond_to(codraw_data.TellGroup)
def draw_nothing(episode):
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
@respond_to(codraw_data.TellGroup)
def drawer_observe_canvas(episode):
# TODO(nikita): can cache for higher efficiency
scene = episode.reconstruct()
event = codraw_data.ObserveCanvas(scene)
episode.append(event)
def make_fns(splits, *objs_or_pairs):
split_to_use = 0
res = []
for obj_or_pair in objs_or_pairs:
if isinstance(obj_or_pair, tuple):
assert len(obj_or_pair) == 2
if splits[split_to_use] == 'a':
obj = obj_or_pair[0]
elif splits[split_to_use] == 'b':
obj = obj_or_pair[1]
else:
raise ValueError(f"Invalid split: {splits[split_to_use]}")
split_to_use += 1
else:
obj = obj_or_pair
if isinstance(obj, nn.Module):
# Switch pytorch modules to evaluation mode
obj.eval()
if hasattr(obj, 'get_action_fns'):
res.extend(obj.get_action_fns())
else:
res.append(obj)
assert split_to_use == len(splits), "Too many splits specified"
return res
def episodes_from_fns(fns, limit=None, split='dev'):
use_scripts = (scripted_tell in fns) or (scripted_tell_before_peek in fns)
if scripted_tell_after_peek in fns:
use_scripts = True
run_from = codraw_data.get_scenes_and_scripts_with_peek(split)
elif use_scripts:
run_from = codraw_data.get_scenes_and_scripts(split)
else:
run_from = codraw_data.get_scenes(split)
if limit is not None:
run_from = run_from[:limit]
sims = []
with torch.no_grad():
for run_from_single in run_from:
if use_scripts:
episode = Episode.run_script(run_from_single, fns)
else:
episode = Episode.run(run_from_single, fns)
yield episode
def eval_fns(fns, limit=None, split='dev'):
sims = [episode.scene_similarity() for episode in episodes_from_fns(fns, limit=limit, split=split)]
return np.array(sims)
#%%
def calc_perplexity(teller, split='dev'):
"""
Calculates teller perplexity. Does not work with all teller classes, e.g.
perplexity has not been defined for the nearest-neighbor tellers.
"""
datagen_spec = {**teller.datagen.spec}
datagen_spec['split'] = split
datagen_dev = teller.datagen_cls(spec=datagen_spec)
assert datagen_dev.vocabulary == teller.datagen.vocabulary
nlls = []
counts = []
with torch.no_grad():
teller.eval()
for ex in datagen_dev.get_examples_unshuffled_batch(batch_size=128):
nll, count = teller(ex, return_loss=False, return_nll_count=True)
nlls.append(nll)
counts.append(count)
nll_per_word = np.array(nlls).sum() / np.array(counts).sum()
return np.exp(nll_per_word)
#%%
class ComponentEvaluator:
NUM_FEATURES = 7
_instance_cache = {}
@classmethod
def get(cls, split_for_baseline='train_full'):
if split_for_baseline not in cls._instance_cache:
cls._instance_cache[split_for_baseline] = cls(split_for_baseline)
return cls._instance_cache[split_for_baseline]
def __init__(self, split_for_baseline='train_full'):
cliparts_by_idx = {idx: [] for idx in range(58)}
for scene in codraw_data.get_scenes(split_for_baseline):
for clipart in scene:
cliparts_by_idx[clipart.idx].append(clipart)
self.idx_to_exemplar = {}
for idx in cliparts_by_idx:
if idx in Clipart.HUMAN_IDXS:
expression, _ = torch.mode(torch.tensor([c.expression for c in cliparts_by_idx[idx]]))
pose, _ = torch.mode(torch.tensor([c.pose for c in cliparts_by_idx[idx]]))
subtype = pose * Clipart.NUM_EXPRESSION + expression
else:
subtype = 0
depth, _ = torch.mode(torch.tensor([c.depth for c in cliparts_by_idx[idx]]))
flip, _ = torch.mode(torch.tensor([c.flip for c in cliparts_by_idx[idx]]))
x = np.mean([c.x for c in cliparts_by_idx[idx]])
y = np.mean([c.y for c in cliparts_by_idx[idx]])
self.idx_to_exemplar[idx] = Clipart(idx, int(subtype), int(depth), int(flip), x, y)
# Calculate prior baseline, and human performance
human_numer = np.zeros(self.NUM_FEATURES)
human_denom = np.zeros(self.NUM_FEATURES)
baseline_numer = np.zeros(self.NUM_FEATURES)
baseline_denom = np.zeros(self.NUM_FEATURES)
for scene_true, scene_human in codraw_data.get_truth_and_human_scenes('dev'):
ep_numer, ep_denom = self.eval_scene(scene_human, scene_true)
human_numer += ep_numer
human_denom += ep_denom
ep_numer, ep_denom = self.eval_scene([], scene_true)
baseline_numer += ep_numer
baseline_denom += ep_denom
self.human_scores = human_numer / human_denom
self.baseline_scores = baseline_numer / baseline_denom
def eval_scene(self, pred, target):
res_numer = np.zeros(self.NUM_FEATURES)
res_denom = np.zeros(self.NUM_FEATURES)
for truth_clipart in target:
other_cliparts = [c for c in pred if c.idx == truth_clipart.idx]
if other_cliparts:
other_clipart = other_cliparts[0]
else:
other_clipart = self.idx_to_exemplar[truth_clipart.idx]
feats_numer = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
feats_denom = [1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
feats_numer[0] = float(truth_clipart.flip != other_clipart.flip)
if truth_clipart.idx in Clipart.HUMAN_IDXS:
feats_numer[1] = float(truth_clipart.expression != other_clipart.expression)
feats_numer[2] = float(truth_clipart.pose != other_clipart.pose)
feats_denom[1] = 1.0
feats_denom[2] = 1.0
feats_numer[3] = float(truth_clipart.depth != other_clipart.depth)
displacements = np.array([truth_clipart.normed_x - other_clipart.normed_x, truth_clipart.normed_y - other_clipart.normed_y])
feats_numer[4] = np.sum(displacements ** 2)
feats_numer[5], feats_numer[6] = np.abs(displacements)
res_numer += feats_numer
res_denom += feats_denom
return res_numer, res_denom
def eval_episode(self, episode):
return self.eval_scene(episode.reconstruct(), episode.get_true_scene())
def eval_fns(self, fns, limit=None, split='dev', unscaled=False):
numer = np.zeros(self.NUM_FEATURES)
denom = np.zeros(self.NUM_FEATURES)
for episode in episodes_from_fns(fns, limit=limit, split=split):
ep_numer, ep_denom = self.eval_episode(episode)
numer += ep_numer
denom += ep_denom
res = numer / denom
if not unscaled:
res = (res - self.human_scores) / (self.baseline_scores - self.human_scores)
res = 1.0 - res
return res
#%%
class Model(object):
datagen_cls = None
def __init__(self, datagen=None, spec=None, **kwargs):
super().__init__()
if spec is not None:
assert self.datagen_cls is not None
assert self.datagen_cls.__name__ == spec['datagen_class']
self.datagen = self.datagen_cls(spec=spec['datagen_spec'])
self.init_from_spec(**{k: v for (k,v) in spec.items() if k not in ['class', 'datagen_spec', 'datagen_class', 'state_dict']})
if 'state_dict' in spec:
self.load_state_dict(spec['state_dict'])
self.to(cuda_if_available)
self.post_init_from_spec()
else:
assert isinstance(datagen, self.datagen_cls)
self.datagen = datagen
self.init_full(**kwargs)
if hasattr(self, 'state_dict'):
self.to(cuda_if_available)
def init_full(self):
pass
def init_from_spec(self, **kwargs):
self.init_full(**kwargs)
def post_init_from_spec(self):
pass
def get_action_fns(self):
raise NotImplementedError("Subclasses should override this")
def get_spec(self):
return {}
@property
def spec(self):
res = {
'class': type(self).__name__,
'datagen_class': type(self.datagen).__name__,
'datagen_spec': self.datagen.spec,
**self.get_spec(),
}
if hasattr(self, 'state_dict'):
res['state_dict'] = self.state_dict()
return res
# This method doesn't work because models are defined in other files, so
# globals() fails to register them. TODO(nikita): better deserialization
# helper?
# @staticmethod
# def new_from_spec(spec):
# model_class = globals()[spec['class']]
# return model_class(spec=spec)
def just_tell(self, clipart, *args, **kwargs):
assert hasattr(self, 'tell'), "Model is not a teller"
if isinstance(self, nn.Module):
self.eval()
episode = Episode([codraw_data.SelectClipart(clipart)])
self.tell(episode, *args, **kwargs)
return episode.get_last(codraw_data.TellGroup).msg
def just_draw(self, msg, scene=[], *args, **kwargs):
assert hasattr(self, 'draw'), "Model is not a drawer"
episode = Episode([codraw_data.TellGroup(msg), codraw_data.ObserveCanvas(scene)])
if isinstance(self, nn.Module):
self.eval()
self.draw(episode, *args, **kwargs)
event_multi = episode.get_last(codraw_data.DrawGroup)
if event_multi is not None:
return codraw_data.AbstractScene(event_multi.cliparts)
event_single = episode.get_last(codraw_data.DrawClipart)
return event_single.clipart
| codraw-models-master | model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import json
import numpy as np
import codraw_data
import model
from abs_metric import scene_similarity
from pathlib import Path
#%%
TRANSCRIPTS_PATH = Path('transcripts-eval-v1.json')
TRANSCRIPTS_SPLIT = 'test'
#%%
transcripts = json.loads(TRANSCRIPTS_PATH.read_text())
#%%
def get_transcript_results(transcripts):
data = transcripts['data']
for datum in data.values():
model_name = datum['model_name']
scene = codraw_data.AbstractScene(datum['abs_t'])
scene_after = None
for entry in datum['dialog']:
scene_after = entry['abs_d']
assert scene_after is not None
scene_after = codraw_data.AbstractScene(scene_after)
yield (model_name, scene, scene_after)
#%%
compontent_evaluator = model.ComponentEvaluator.get()
#%%
true_to_human = {}
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes(TRANSCRIPTS_SPLIT):
true_to_human[tuple(true_scene)] = human_scene
# %%
model_to_sims = {}
model_to_numer = {}
model_to_denom = {}
true_scenes_set = set()
for model_name, true_scene, reconstructed_scene in get_transcript_results(transcripts):
if model_name not in model_to_sims:
model_to_sims[model_name] = []
if model_name not in model_to_numer:
assert model_name not in model_to_denom
model_to_numer[model_name] = []
model_to_denom[model_name] = []
model_to_sims[model_name].append(scene_similarity(reconstructed_scene, true_scene))
numer, denom = compontent_evaluator.eval_scene(reconstructed_scene, true_scene)
model_to_numer[model_name].append(numer)
model_to_denom[model_name].append(denom)
true_scenes_set.add(tuple(true_scene))
#%%
print("Model \t Scene similarity")
for model_name, sims in model_to_sims.items():
print(f"{model_name:17s}\t {np.array(sims).mean():.2f}")
sims = np.array([scene_similarity(true_to_human[scene], scene) for scene in true_scenes_set])
print(f"{'human':17s}\t {np.array(sims).mean():.2f}")
#%%
print()
print()
#%%
print("Model \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for model_name in model_to_numer:
numer = model_to_numer[model_name]
denom = model_to_denom[model_name]
components = np.array(numer).sum(0) / np.array(denom).sum(0)
components = 1.0 - (components - compontent_evaluator.human_scores) / (compontent_evaluator.baseline_scores - compontent_evaluator.human_scores)
print(f"{model_name:17s}\t", "\t".join(f"{num: .6f}" for num in components))
human_numer_denom = [compontent_evaluator.eval_scene(true_to_human[scene], scene) for scene in true_scenes_set]
components = np.array([x[0] for x in human_numer_denom]).sum(0) / np.array([x[1] for x in human_numer_denom]).sum(0)
components = 1.0 - (components - compontent_evaluator.human_scores) / (compontent_evaluator.baseline_scores - compontent_evaluator.human_scores)
print(f"{'human':17s}\t", "\t".join(f"{num: .6f}" for num in components))
#%%
| codraw-models-master | eval_transcripts.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
from baseline1_models import load_baseline1
from baseline2_models import load_baseline2
import model
from model import make_fns, eval_fns
# %%
compontent_evaluator = model.ComponentEvaluator.get()
# %%
models_baseline1 = load_baseline1()
models_baseline2 = load_baseline2()
# %%
tellers = [
('teller_nn', (models_baseline1['teller_nn_a'], models_baseline1['teller_nn_b'])),
# ('teller_c2seq', (models_baseline1['teller_c2seq_a'], models_baseline1['teller_c2seq_b'])),
('teller_pragmaticnn', (models_baseline2['teller_pragmaticnn_a'], models_baseline2['teller_pragmaticnn_b'])),
]
drawers = [
# ('drawer_nn', (models_baseline1['drawer_nn_a'], models_baseline1['drawer_nn_b'])),
# ('drawer_sim', (models_baseline1['drawer_sim_a'], models_baseline1['drawer_sim_b'])),
# ('drawer_bow2c', (models_baseline1['drawer_bow2c_a'], models_baseline1['drawer_bow2c_b'])),
('drawer_bow2bce', (models_baseline1['drawer_bow2bce_a'], models_baseline1['drawer_bow2bce_b'])),
('drawer_bowcanvas2bce', (models_baseline1['drawer_bowcanvas2bce_a'], models_baseline1['drawer_bowcanvas2bce_b'])),
('drawer_lstmaddonly', (models_baseline2['drawer_lstmaddonly_a'], models_baseline2['drawer_lstmaddonly_b'])),
]
# %%
print()
human_sims = np.array([
scene_similarity(human_scene, true_scene)
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes('dev')
])
print(f"Human scene similarity: mean={human_sims.mean():.6f} std={human_sims.std():.6f} median={np.median(human_sims):.6f}")
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Scene similarity")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", sims.mean())
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Scene similarity")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", sims.mean())
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
components = compontent_evaluator.eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
components = compontent_evaluator.eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
# %%
# %%
# %%
# %%
# %%
# %%
| codraw-models-master | baseline2_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from IPython.display import SVG, display
from PIL import Image
from binascii import b2a_base64
PNGS_PATH = (Path(__file__).parent / '../CoDraw/Pngs').resolve()
EMBED_PNGS_PATH = '../../CoDraw/Pngs'
DEPTH_SCALE = [1.0, 0.7, 0.49]
IMAGE_NAMES = [
's_0s.png',
's_1s.png',
's_2s.png',
's_3s.png',
's_4s.png',
's_5s.png',
's_6s.png',
's_7s.png',
'p_0s.png',
'p_1s.png',
'p_2s.png',
'p_3s.png',
'p_4s.png',
'p_5s.png',
'p_6s.png',
'p_7s.png',
'p_8s.png',
'p_9s.png',
'hb0_0s.png',
'hb0_1s.png',
'hb0_2s.png',
'hb0_3s.png',
'hb0_4s.png',
'hb0_5s.png',
'hb0_6s.png',
'hb0_7s.png',
'hb0_8s.png',
'hb0_9s.png',
'hb0_10s.png',
'hb0_11s.png',
'hb0_12s.png',
'hb0_13s.png',
'hb0_14s.png',
'hb0_15s.png',
'hb0_16s.png',
'hb0_17s.png',
'hb0_18s.png',
'hb0_19s.png',
'hb0_20s.png',
'hb0_21s.png',
'hb0_22s.png',
'hb0_23s.png',
'hb0_24s.png',
'hb0_25s.png',
'hb0_26s.png',
'hb0_27s.png',
'hb0_28s.png',
'hb0_29s.png',
'hb0_30s.png',
'hb0_31s.png',
'hb0_32s.png',
'hb0_33s.png',
'hb0_34s.png',
'hb1_0s.png',
'hb1_1s.png',
'hb1_2s.png',
'hb1_3s.png',
'hb1_4s.png',
'hb1_5s.png',
'hb1_6s.png',
'hb1_7s.png',
'hb1_8s.png',
'hb1_9s.png',
'hb1_10s.png',
'hb1_11s.png',
'hb1_12s.png',
'hb1_13s.png',
'hb1_14s.png',
'hb1_15s.png',
'hb1_16s.png',
'hb1_17s.png',
'hb1_18s.png',
'hb1_19s.png',
'hb1_20s.png',
'hb1_21s.png',
'hb1_22s.png',
'hb1_23s.png',
'hb1_24s.png',
'hb1_25s.png',
'hb1_26s.png',
'hb1_27s.png',
'hb1_28s.png',
'hb1_29s.png',
'hb1_30s.png',
'hb1_31s.png',
'hb1_32s.png',
'hb1_33s.png',
'hb1_34s.png',
'a_0s.png',
'a_1s.png',
'a_2s.png',
'a_3s.png',
'a_4s.png',
'a_5s.png',
'c_0s.png',
'c_1s.png',
'c_2s.png',
'c_3s.png',
'c_4s.png',
'c_5s.png',
'c_6s.png',
'c_7s.png',
'c_8s.png',
'c_9s.png',
'e_0s.png',
'e_1s.png',
'e_2s.png',
'e_3s.png',
'e_4s.png',
'e_5s.png',
'e_6s.png',
't_0s.png',
't_1s.png',
't_2s.png',
't_3s.png',
't_4s.png',
't_5s.png',
't_6s.png',
't_7s.png',
't_8s.png',
't_9s.png',
't_10s.png',
't_11s.png',
't_12s.png',
't_13s.png',
't_14s.png',
]
def get_image_name(clipart):
if clipart.idx < 18:
return IMAGE_NAMES[clipart.idx]
elif clipart.idx < 18 + 2:
return IMAGE_NAMES[18 + (clipart.idx - 18) * 35 + clipart.subtype]
else:
return IMAGE_NAMES[clipart.idx + 34*2]
def snippet_from_clipart(clipart, inline_images=True):
img_name = get_image_name(clipart)
img_path = PNGS_PATH / img_name
img_pil = Image.open(img_path)
width, height = img_pil.width, img_pil.height
if inline_images:
data = b2a_base64(img_path.read_bytes()).decode('ascii')
scale = DEPTH_SCALE[clipart.depth]
width = width * scale
height = height * scale
flip = -1 if bool(clipart.flip) else 1
x = clipart.x - width / 2.0
y = clipart.y - height / 2.0
flipped_sub_x = (-width) if clipart.flip else 0
if inline_images:
href = f"data:image/png;base64,{data}"
else:
href = f"{EMBED_PNGS_PATH}/{img_name}"
return f"""
<g transform="translate({x}, {y})">
<image href="{href}" x="{flipped_sub_x}" y="0" width="{width}" height="{height}"
transform="scale({flip}, 1)"/>
</g>
"""
def svg_from_cliparts(cliparts, color=None, label=None, inline_images=True, scale=1.0):
img_path = PNGS_PATH / 'background.png'
if inline_images:
data = b2a_base64(img_path.read_bytes()).decode('ascii')
href = f"data:image/png;base64,{data}"
else:
href = f"{EMBED_PNGS_PATH}/background.png"
svg = f"""
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="{int(500*scale)}px" height="{int(400*scale)}px" viewBox="0 0 500 400">
<image href="{href}" x="0" y="0" width="100%" height="100%"/>
"""
if color:
svg += f"""
<rect fill="{color}" opacity="0.2" x="0" y="0" width="100%" height="100%"/>
"""
# Sun (idx=3) is always in the back; this hack is also in Abs.js
# All sky objects (idx < 8) are behind any non-sky objects
# Past that, objects are sorted by depth and then by index
for clipart in sorted(cliparts, key=lambda c: c.render_order_key):
svg += snippet_from_clipart(clipart, inline_images=inline_images)
if label:
svg += f"""<text x="95%" y="8%" style="text-anchor: end">{label}</text>"""
svg += "</svg>"
return svg
def display_cliparts(cliparts, color=None, label=None, scale=1.0):
display(SVG(svg_from_cliparts(cliparts, color, label, scale=scale)))
| codraw-models-master | abs_render.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import BOWAddUpdateData
from baseline2_models import BOWAddOnlyDrawer, LSTMAddOnlyDrawer
import model
from model import make_fns, eval_fns
from model import scripted_tell, scripted_tell_before_peek, scripted_tell_after_peek
# %%
data_bowaddupdate_a = BOWAddUpdateData('a')
data_bowaddupdate_b = BOWAddUpdateData('b')
# %%
# drawer_bowaddonly_a = BOWAddOnlyDrawer(data_bowaddupdate_a)
# drawer_bowaddonly_b = BOWAddOnlyDrawer(data_bowaddupdate_b)
#
# optimizer_bowaddonly_a = torch.optim.Adam(drawer_bowaddonly_a.parameters())
# optimizer_bowaddonly_b = torch.optim.Adam(drawer_bowaddonly_b.parameters())
#%%
# for epoch in range(15):
# drawer_bowaddonly_a.train()
# for num, ex in enumerate(drawer_bowaddonly_a.datagen.get_examples_batch()):
# optimizer_bowaddonly_a.zero_grad()
# loss = drawer_bowaddonly_a.forward(ex)
# loss.backward()
# optimizer_bowaddonly_a.step()
#
# print(f'Done epoch {epoch} loss {float(loss)}')
# if epoch % 1 == 0:
# for split in ('a',):
# sims = eval_fns(make_fns(split, scripted_tell, (drawer_bowaddonly_a, drawer_bowaddonly_b)), limit=100)
# print(split, sims.mean())
#
# sims = eval_fns(make_fns(split, scripted_tell_before_peek, (drawer_bowaddonly_a, drawer_bowaddonly_b)), limit=100)
# print(split, 'before', sims.mean())
#
# sims = eval_fns(make_fns(split, scripted_tell_after_peek, (drawer_bowaddonly_a, drawer_bowaddonly_b)), limit=100)
# print(split, 'after', sims.mean())
# %%
drawer_lstmaddonly_a = LSTMAddOnlyDrawer(data_bowaddupdate_a)
drawer_lstmaddonly_b = LSTMAddOnlyDrawer(data_bowaddupdate_b)
optimizer_lstmaddonly_a = torch.optim.Adam(drawer_lstmaddonly_a.parameters())
optimizer_lstmaddonly_b = torch.optim.Adam(drawer_lstmaddonly_b.parameters())
#%%
for epoch in range(15):
drawer_lstmaddonly_a.train()
for num, ex in enumerate(drawer_lstmaddonly_a.datagen.get_examples_batch()):
optimizer_lstmaddonly_a.zero_grad()
loss = drawer_lstmaddonly_a.forward(ex)
loss.backward()
optimizer_lstmaddonly_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('a',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_before_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'before', sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_after_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'after', sims.mean())
#%%
for epoch in range(15):
drawer_lstmaddonly_b.train()
for num, ex in enumerate(drawer_lstmaddonly_b.datagen.get_examples_batch()):
optimizer_lstmaddonly_b.zero_grad()
loss = drawer_lstmaddonly_b.forward(ex)
loss.backward()
optimizer_lstmaddonly_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('b',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_before_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'before', sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_after_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'after', sims.mean())
# %%
lstmaddonly_specs = dict(
drawer_lstmaddonly_a = drawer_lstmaddonly_a.spec,
drawer_lstmaddonly_b = drawer_lstmaddonly_b.spec,
)
#%%
torch.save(lstmaddonly_specs, Path('models/lstmaddonly.pt'))
| codraw-models-master | baseline2_train.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.