python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from memory_transformer_xl import MemoryTransformerXL
from memory_transformer_xl.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 16
MAX_BATCH_SIZE = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LENGTH = 512
GENERATE_LENGTH = 1024
SEQ_LEN = 512
NUM_SEGMENTS = 4
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = MemoryTransformerXL(
num_tokens = 256,
dim = 512,
depth = 8,
seq_len = SEQ_LEN,
mem_len = SEQ_LEN,
lmem_len = SEQ_LEN // 4,
heads = 8,
memory_layers = [6,7,8]
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len, segments):
super().__init__()
self.data = data
self.seq_len = seq_len
self.segments = segments
self.total_len = seq_len * segments
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.total_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.total_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.total_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN, NUM_SEGMENTS)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN, NUM_SEGMENTS)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
grad_accum_every = BATCH_SIZE / MAX_BATCH_SIZE
for loss, is_last in model(next(train_loader), max_batch_size = MAX_BATCH_SIZE, return_loss = True):
(loss / grad_accum_every).backward(retain_graph = True)
print(f'training loss: {loss.item():.4f}')
if is_last:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
for loss, _ in model(next(val_loader), return_loss = True):
print(f'validation loss: {loss.item():.4f}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| memory-transformer-xl-master | examples/enwik8_simple/train.py |
import math
from functools import partial
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
# structs
Return = namedtuple('Return', ['loss', 'is_last_batch'])
# helper functions
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# main class
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.seq_len = net.seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
# take care of default masking
full_mask_like = lambda x: torch.full_like(x, True, dtype=torch.bool, device=x.device)
mask = kwargs.pop('mask', None)
if mask is None:
mask = full_mask_like(out)
# take care of a primed sequence of any length
mem = None
*primes, out = out.split(self.seq_len, dim=1)
*prime_masks, mask = mask.split(self.seq_len, dim=1)
for prime, prime_mask in zip(primes, prime_masks):
_, mem = self.net(prime, memories = mem, mask = prime_mask, **kwargs)
# generate until hit sequence length
input_len = out.shape[1]
for _ in range(seq_len):
logits, mem = self.net(out[:, -input_len:], memories = mem, mask = mask[:, -input_len:], **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
# unlike most models, inputs start from sequence length of 1 once full sequence length is filled
out = torch.cat((out, sample), dim=-1)
mask = F.pad(mask, (0, 1), value=True)
# append sample to accumulated output
input_len = input_len % self.seq_len
input_len += 1
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, max_batch_size = None, return_loss = False, truncate_every = None, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
# help auto-solve an area of confusion around input masks in auto-regressive
# if user supplies a mask that is only off by one from the source sequence, resolve it for them
mask = kwargs.pop('mask', None)
if mask is not None and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
segment_fn = lambda x: x.split(self.seq_len, dim=1)
(xi, xo) = map(segment_fn, (xi, xo))
num_segments = len(xi)
mask = segment_fn(mask) if mask is not None else ((None,) * num_segments)
max_batch_size = x.shape[0] if max_batch_size is None else max_batch_size
split_batch_fn = lambda x: x.split(max_batch_size, dim=0)
grad_accumulate_every = math.ceil(x.shape[0] / max_batch_size)
mems = [None] * grad_accumulate_every
for ind, (xi_seg, xo_seg, mask_seg) in enumerate(zip(xi, xo, mask)):
xi_seg, xo_seg = map(split_batch_fn, (xi_seg, xo_seg))
mask_seg = split_batch_fn(mask_seg) if mask_seg is not None else ((None,) * grad_accumulate_every)
truncate = truncate_every is not None and ((ind + 1) % truncate_every) == 0
new_mems = []
for ind, (xi_seg_b, xo_seg_b, mask_seg_b, mem) in enumerate(zip(xi_seg, xo_seg, mask_seg, mems)):
is_last = ind == (grad_accumulate_every - 1)
logits, new_mem = self.net(xi_seg_b, mask = mask_seg_b, memories = mem, detach_lmem = truncate, **kwargs)
new_mems.append(new_mem)
loss = F.cross_entropy(logits.transpose(1, 2), xo_seg_b, ignore_index = self.ignore_index)
yield Return(loss, is_last)
mems = new_mems
| memory-transformer-xl-master | memory_transformer_xl/autoregressive_wrapper.py |
from memory_transformer_xl.memory_transformer_xl import MemoryTransformerXL
| memory-transformer-xl-master | memory_transformer_xl/__init__.py |
import torch
from torch import nn
import torch.nn.functional as F
from mogrifier import Mogrifier
import math
from collections import namedtuple
from functools import partial
from inspect import isfunction
# structs
Memory = namedtuple('Memory', ['short', 'long'])
# helper functions
def to(t):
return {'dtype': t.dtype, 'device': t.device}
def cast_tuple(el):
return el if isinstance(el, tuple) else (el,)
def default(x, val):
if x is not None:
return x
return val if not isfunction(val) else val()
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def reshape_dim(t, dim, split_dims):
shape = list(t.shape)
num_dims = len(shape)
dim = (dim + num_dims) % num_dims
shape[dim:dim+1] = split_dims
return t.reshape(shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
def queue_fifo(*args, length, dim=-2):
queue = torch.cat(args, dim=dim)
if length > 0:
return split_at_index(dim, -length, queue)
device = queue.device
shape = list(queue.shape)
shape[dim] = 0
return queue, torch.empty(shape, device = device)
def shift(x):
*_, i, j = x.shape
zero_pad = torch.zeros((*_, i, i), **to(x))
x = torch.cat([x, zero_pad], -1)
l = i + j - 1
x = x.view(*_, -1)
zero_pad = torch.zeros(*_, -x.size(-1) % l, **to(x))
shifted = torch.cat([x, zero_pad], -1).view(*_, -1, l)
return shifted[..., :i, i - 1:]
def iterate_tensor(t):
length = t.shape[0]
for ind in range(length):
yield t[ind]
def init_parameter(shape, dim):
t = torch.zeros(shape)
std = 1 / math.sqrt(dim)
t.uniform_(-std, std)
return nn.Parameter(t)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
# neuromodulated bistable recurrent cell and other gating classes
class nBRC(nn.Module):
def __init__(self, dims, hidden_dims):
super().__init__()
self.Ua = nn.Linear(dims, hidden_dims)
self.Wa = nn.Linear(dims, hidden_dims)
self.Uc = nn.Linear(dims, hidden_dims)
self.Wc = nn.Linear(dims, hidden_dims)
self.U = nn.Linear(dims, hidden_dims)
def forward(self, x, h):
l = lambda linear, tensor: F.linear(tensor, linear.weight.clone(), linear.bias.clone())
a = 1 + torch.tanh(l(self.Ua, x) + l(self.Wa, h))
c = torch.sigmoid(l(self.Uc, x) + l(self.Wc, h))
return c * h + (1 - c) * torch.tanh(l(self.U, x) + a * h)
class GRUGating(nn.Module):
def __init__(self, dim, fn, mogrify = False):
super().__init__()
self.dim = dim
self.fn = fn
self.gru = nBRC(dim, dim)
self.mogrify = Mogrifier(dim, factorize_k = dim // 4) if mogrify else None
def forward(self, x, **kwargs):
shape = x.shape
dim = self.dim
y = self.fn(x, **kwargs)
if self.mogrify is not None:
y, x = self.mogrify(y, x)
gated_output = self.gru(
y.reshape(-1, dim),
x.reshape(-1, dim)
)
return gated_output.reshape(shape)
# feedforward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# attention.
class SelfAttention(nn.Module):
def __init__(self, dim, seq_len, mem_len, lmem_len, heads = 8, attn_dropout = 0., dropout = 0., memory_attn_dropout = 0., one_kv_head = False, num_mem_kv = 4):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by the number of heads'
self.heads = heads
self.dim_head = dim // heads
self.seq_len = seq_len
self.mem_len = mem_len
self.lmem_len = lmem_len
self.scale = self.dim_head ** (-0.5)
self.to_q = nn.Linear(dim, dim, bias = False)
kv_dim = self.dim_head if one_kv_head else dim
self.to_kv = nn.Linear(dim, kv_dim * 2, bias = False)
self.to_out = nn.Linear(dim, dim)
self.mem_kv = init_parameter((1, num_mem_kv, dim), dim)
self.attn_dropout = nn.Dropout(attn_dropout)
self.dropout = nn.Dropout(dropout)
self.memory_attn_dropout = nn.Dropout(memory_attn_dropout)
def forward(self, x, memories = None, pos_emb = None, input_mask = None, calc_memory = True, **kwargs):
b, t, e, h, dim_h = *x.shape, self.heads, self.dim_head
memories = default(memories, (None, None))
mem, lmem = memories
init_mem = lambda: torch.empty(b, 0, e, **to(x))
mem = default(mem, init_mem)
lmem = default(lmem, init_mem)
mem_kv = self.mem_kv.expand(b, -1, -1)
mem_len, lmem_len, mem_kv_len = map(lambda t: t.shape[1], (mem, lmem, mem_kv))
q = self.to_q(x)
kv_input = torch.cat((mem_kv, lmem, mem, x), dim=1)
kv_len = kv_input.shape[1]
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
merge_heads = lambda x: reshape_dim(x, -1, (-1, dim_h)).transpose(1, 2)
q, k, v = map(merge_heads, (q, k, v))
k, v = map(lambda x: x.expand(-1, h, -1, -1), (k, v))
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
mask_value = max_neg_value(dots)
if pos_emb is not None:
pos_emb = pos_emb[:, -kv_len:].type(q.dtype)
pos_dots = torch.einsum('bhid,hjd->bhij', q, pos_emb) * self.scale
pos_dots = shift(pos_dots)
pos_dots = F.pad(pos_dots, (dots.shape[-1] - pos_dots.shape[-1], 0), value = 0.)
dots = dots + pos_dots
if input_mask is not None:
mask = input_mask[:, None, :, None] * input_mask[:, None, None, :]
mask = F.pad(mask, (mem_len + lmem_len + mem_kv_len, 0), value = True)
dots.masked_fill_(~mask, mask_value)
total_mem_len = mem_len + lmem_len + mem_kv_len
mask = torch.ones(t, t + total_mem_len, **to(x)).triu_(diagonal = 1 + total_mem_len).bool()
dots.masked_fill_(mask[None, None, ...], mask_value)
attn = dots.softmax(dim=-1)
attn = self.attn_dropout(attn)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = out.transpose(1, 2).reshape(b, t, -1)
out = self.to_out(out)
return self.dropout(out)
# memory attention network
def linear_attn(q, k, v):
q, k = q.softmax(dim=-1), k.softmax(dim=-2)
context = torch.einsum('bhnd,bhne->bhde', k, v)
out = torch.einsum('bhnd,bhde->bhne', q, context)
return out
def full_attn(q, k, v):
dots = torch.einsum('bhid,bhjd->bhij', q, k) * q.shape[-1] ** -0.5
dots = dots.softmax(dim=-1)
out = torch.einsum('bhij,bhjd->bhid', dots, v)
return out
class LinearSelfAttention(nn.Module):
def __init__(self, dim, depth, heads = 8):
super().__init__()
self.dim_head = dim // heads
self.norm = nn.LayerNorm(dim, elementwise_affine = False)
self.to_q = init_parameter((dim, dim), dim)
self.to_kv = init_parameter((dim, 2 * dim), dim)
self.to_out = init_parameter((dim, dim), dim)
def forward(self, x, hiddens = None):
dim_head = self.dim_head
w_q, w_kv, w_out = map(torch.clone, (self.to_q, self.to_kv, self.to_out))
normed_lmem = self.norm(x)
q = torch.einsum('bnd,de->bne', normed_lmem, w_q)
kv_input = torch.cat((normed_lmem, hiddens), dim=1)
k, v = torch.einsum('bnd,de->bne', kv_input, w_kv).chunk(2, dim=-1)
q, k, v = map(lambda t: reshape_dim(t, -1, (-1, dim_head)).transpose(-2, -3), (q, k, v))
out = linear_attn(q, k, v)
out = out.transpose(2, 3).reshape_as(x)
out = torch.einsum('bnd,de->bne', out, w_out)
return out
class MemoryAttentionNetwork(nn.Module):
def __init__(self, dim, num_memory_depth, mem_len, lmem_len, heads = 4, num_attn_steps = 2, num_mem_kv = 4, mem_write_iters = 2):
super().__init__()
self.num_memory_depth = num_memory_depth
self.mem_len = mem_len
self.lmem_len = lmem_len
self.dim = dim
dim_head = dim // heads
self.dim_head = dim_head
self.depth_emb = init_parameter((num_memory_depth, 1, 1, 1), dim)
self.init_lmem = init_parameter((1, 1, dim), dim)
self.lmem_pos_emb = init_parameter((1, lmem_len, dim), dim)
self.mem_kv = init_parameter((1, num_mem_kv, dim), dim)
self.attn = LinearSelfAttention(dim, num_memory_depth, heads = heads)
self.gate = nBRC(dim, dim)
self.mem_write_iters = mem_write_iters
def forward(self, lmem, smem, hiddens, detach_lmem = False):
batch, dim, dim_head, mem_depth, lmem_len = lmem.shape[0], self.dim, self.dim_head, self.num_memory_depth, self.lmem_len
# properly detach hidden state, and detach long term memory if truncate signal is given
hiddens = hiddens.detach()
if detach_lmem:
lmem = lmem.detach()
# initialize long term memory state if none provided
if lmem is None or lmem.shape[1] == 0:
lmem = self.init_lmem.clone().expand(batch, lmem_len, -1)
# use efficient linear attention for updating long term memory
next_lmem = lmem + self.lmem_pos_emb
hiddens_and_smem = torch.cat((smem, hiddens), dim=-2)
all_hiddens = (hiddens_and_smem + self.depth_emb).transpose(0, 1).reshape(batch, -1, dim)
all_hiddens = torch.cat((all_hiddens, self.mem_kv.expand(batch, -1, -1)), dim=1)
for _ in range(self.mem_write_iters):
attn_out = self.attn(next_lmem, hiddens = all_hiddens)
next_lmem = self.gate(attn_out, next_lmem)
# fifo queue the short term memory
_, next_mem = queue_fifo(smem, hiddens, length = self.mem_len, dim = 2)
return Memory(short = next_mem.detach(), long = next_lmem)
# transformer
class MemoryTransformerXL(nn.Module):
def __init__(self, num_tokens, dim, seq_len, depth, emb_dim = None, memory_layers = None, mem_len = None, lmem_len = None, heads = 8, gru_gated_residual = True, mogrify_gru = False, attn_dropout = 0., ff_glu = False, ff_dropout = 0., attn_layer_dropout = 0., one_kv_head = False, num_mem_kv = 0, mem_write_iters = 2):
super().__init__()
emb_dim = default(emb_dim, dim)
mem_len = default(mem_len, seq_len)
lmem_len = default(lmem_len, mem_len)
memory_layers = default(memory_layers, list(range(1, depth + 1)))
assert all([layer > 0 and layer <= depth for layer in memory_layers]), 'one of the indicated memory layers is invalid'
self.mem_len = mem_len
self.seq_len = seq_len
self.depth = depth
self.memory_layers = list(memory_layers)
self.token_emb = nn.Embedding(num_tokens, emb_dim)
self.to_model_dim = nn.Identity() if emb_dim == dim else nn.Linear(emb_dim, dim)
seq_and_mem_len = seq_len + mem_len + lmem_len
self.pos_emb = nn.Parameter(torch.zeros(heads, seq_and_mem_len, dim // heads))
self.to_logits = nn.Sequential(
nn.Identity() if emb_dim == dim else nn.Linear(dim, emb_dim),
nn.Linear(emb_dim, num_tokens)
)
wrapper = partial(GRUGating, dim, mogrify = mogrify_gru) if gru_gated_residual else Residual
self.attn_layers = nn.ModuleList([wrapper(PreNorm(dim, SelfAttention(dim, seq_len, mem_len, lmem_len, heads, dropout = attn_layer_dropout, attn_dropout = attn_dropout, one_kv_head = one_kv_head, num_mem_kv = num_mem_kv))) for _ in range(depth)])
self.ff_layers = nn.ModuleList([wrapper(PreNorm(dim, FeedForward(dim, dropout = ff_dropout, glu = ff_glu))) for _ in range(depth)])
self.memory_network = MemoryAttentionNetwork(dim, len(self.memory_layers), mem_len, lmem_len, num_mem_kv = num_mem_kv, mem_write_iters = mem_write_iters)
def forward(self, x, memories = None, mask = None, detach_lmem = False):
x = self.token_emb(x)
x = self.to_model_dim(x)
b, t, d = x.shape
assert t <= self.seq_len, f'input contains a sequence length {t} that is greater than the designated maximum sequence length {self.seq_len}'
memories = default(memories, (None, None))
mem, lmem = memories
num_memory_layers = len(self.memory_layers)
mem = default(mem, lambda: torch.empty(num_memory_layers, b, 0, d, **to(x)))
lmem = default(lmem, lambda: torch.empty(b, 0, d, **to(x)))
mem_len, lmem_len = map(lambda t: t.shape[2], (mem, lmem))
total_len = mem_len + lmem_len + self.seq_len
pos_emb = self.pos_emb[:, (self.seq_len - t):total_len]
mem_iter = iterate_tensor(mem)
hiddens = []
for ind, (attn, ff) in enumerate(zip(self.attn_layers, self.ff_layers)):
layer_num = ind + 1
use_memory = layer_num in self.memory_layers
memories = (next(mem_iter), lmem) if use_memory else None
if use_memory:
hiddens.append(x)
x = attn(x, memories = memories, input_mask = mask, pos_emb = pos_emb)
x = ff(x)
hiddens = torch.stack(hiddens)
out = self.to_logits(x)
# calculate next memory state
# only push hidden to short term memory if input sequence length is full
if t < self.mem_len:
return out, Memory(short = mem, long = lmem)
next_memory = self.memory_network(lmem, mem, hiddens, detach_lmem = detach_lmem)
return out, next_memory
| memory-transformer-xl-master | memory_transformer_xl/memory_transformer_xl.py |
from setuptools import setup, find_packages
setup(
name = 'linformer',
packages = find_packages(),
version = '0.2.1',
license='MIT',
description = 'Linformer implementation in Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/linformer',
keywords = ['attention', 'artificial intelligence'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | linformer-master | setup.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
def layer_drop(layers, prob):
to_drop = torch.empty(len(layers)).uniform_(0, 1) < prob
blocks = [block for block, drop in zip(layers, to_drop) if not drop]
blocks = layers[:1] if len(blocks) == 0 else blocks
return blocks
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}, layer_dropout = 0.):
super().__init__()
self.args_route = args_route
self.layer_dropout = layer_dropout
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
layers_and_args = list(zip(blocks, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
blocks, args = map(lambda ind: list(map(itemgetter(ind), layers_and_args)), (0, 1))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
| linformer-master | linformer/reversible.py |
from linformer.linformer import LinformerLM, Linformer, LinformerSelfAttention
| linformer-master | linformer/__init__.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from linformer.reversible import ReversibleSequence, SequentialSequence
# helper functions
def default(val, default_val):
return val if val is not None else default_val
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return x + self.fn(x)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
class LinformerSelfAttention(nn.Module):
def __init__(self, dim, seq_len, k = 256, heads = 8, dim_head = None, one_kv_head = False, share_kv = False, dropout = 0.):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by the number of heads'
self.seq_len = seq_len
self.k = k
self.heads = heads
dim_head = default(dim_head, dim // heads)
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias = False)
kv_dim = dim_head if one_kv_head else (dim_head * heads)
self.to_k = nn.Linear(dim, kv_dim, bias = False)
self.proj_k = nn.Parameter(init_(torch.zeros(seq_len, k)))
self.share_kv = share_kv
if not share_kv:
self.to_v = nn.Linear(dim, kv_dim, bias = False)
self.proj_v = nn.Parameter(init_(torch.zeros(seq_len, k)))
self.dropout = nn.Dropout(dropout)
self.to_out = nn.Linear(dim_head * heads, dim)
def forward(self, x, context = None, **kwargs):
b, n, d, d_h, h, k = *x.shape, self.dim_head, self.heads, self.k
kv_len = n if context is None else context.shape[1]
assert kv_len == self.seq_len, f'the sequence length of the key / values must be {self.seq_len} - {kv_len} given'
queries = self.to_q(x)
proj_seq_len = lambda args: torch.einsum('bnd,nk->bkd', *args)
kv_input = x if context is None else context
keys = self.to_k(kv_input)
values = self.to_v(kv_input) if not self.share_kv else keys
kv_projs = (self.proj_k, self.proj_v if not self.share_kv else self.proj_k)
# project keys and values along the sequence length dimension to k
keys, values = map(proj_seq_len, zip((keys, values), kv_projs))
# merge head into batch for queries and key / values
queries = queries.reshape(b, n, h, -1).transpose(1, 2)
merge_key_values = lambda t: t.reshape(b, k, -1, d_h).transpose(1, 2).expand(-1, h, -1, -1)
keys, values = map(merge_key_values, (keys, values))
# attention
dots = torch.einsum('bhnd,bhkd->bhnk', queries, keys) * (d_h ** -0.5)
attn = dots.softmax(dim=-1)
attn = self.dropout(attn)
out = torch.einsum('bhnk,bhkd->bhnd', attn, values)
# split heads
out = out.transpose(1, 2).reshape(b, n, -1)
return self.to_out(out)
class Linformer(nn.Module):
def __init__(self, dim, seq_len, depth, k = 256, heads = 8, dim_head = None, one_kv_head = False, share_kv = False, reversible = False, dropout = 0.):
super().__init__()
layers = nn.ModuleList([])
for _ in range(depth):
attn = LinformerSelfAttention(dim, seq_len, k = k, heads = heads, dim_head = dim_head, one_kv_head = one_kv_head, share_kv = share_kv, dropout = dropout)
ff = FeedForward(dim, dropout = dropout)
layers.append(nn.ModuleList([
PreNorm(dim, attn),
PreNorm(dim, ff)
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
self.net = execute_type(layers)
def forward(self, x):
return self.net(x)
class LinformerLM(nn.Module):
def __init__(self, num_tokens, dim, seq_len, depth, k = 256, heads = 8, dim_head = None, one_kv_head = False, share_kv = False, reversible = False, dropout = 0.):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(seq_len, dim)
self.linformer = Linformer(dim, seq_len, depth, k = k, heads = heads, dim_head = dim_head,
one_kv_head = one_kv_head, share_kv = share_kv, reversible = reversible, dropout = dropout)
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x):
x = self.token_emb(x)
x = self.pos_emb(torch.arange(x.shape[1], device=x.device)) + x
x = self.linformer(x)
out = self.to_logits(x)
return out
| linformer-master | linformer/linformer.py |
from setuptools import setup, find_packages
setup(
name = 'medical-chatgpt',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Medical ChatGPT',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/medical-chatgpt',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'reinforcement learning with human feedback'
],
install_requires=[
'einops>=0.6',
'django-ninja',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| medical-chatgpt-main | setup.py |
medical-chatgpt-main | medical_chatgpt/__init__.py |
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False,
num_null_kv = 0,
dropout = 0.1
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = nn.LayerNorm(dim)
self.context_norm = nn.LayerNorm(dim_context) if norm_context else nn.Identity()
self.attn_dropout = nn.Dropout(dropout)
self.num_null_kv = num_null_kv
self.null_kv = nn.Parameter(torch.randn(2, num_null_kv, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None
):
b = x.shape[0]
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
x = self.norm(x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
if self.num_null_kv > 0:
null_k, null_v = repeat(self.null_kv, 'kv n d -> kv b n d', b = b).unbind(dim = 0)
k = torch.cat((null_k, k), dim = -2)
v = torch.cat((null_v, v), dim = -2)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (self.num_null_kv, 0), value = 0.)
sim = sim + attn_bias
if exists(mask):
mask = F.pad(mask, (self.num_null_kv, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = x.device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
| medical-chatgpt-main | medical_chatgpt/medical_chatgpt.py |
import numpy as np
from string import ascii_uppercase, ascii_lowercase
import urllib.parse
import urllib.request
import time
def parse_a3m(a3m_lines=None, a3m_file=None, filter_qid=0.15, filter_cov=0.5, N=100000):
def seqid(a, b):
return sum(c1 == c2 for c1, c2 in zip(a, b))
def nongaps(a):
return sum(c != "-" for c in a)
def chk(seq, ref_seq):
rL = len(ref_seq)
L = nongaps(seq)
return not (L > filter_cov*rL and seqid(seq, ref_seq) > filter_qid*L)
rm_lower = str.maketrans('','',ascii_lowercase)
# prep inputs
if a3m_lines is None: a3m_lines = open(a3m_file,"r")
else: a3m_lines = a3m_lines.splitlines()
# parse inputs
n,nams,seqs,mtx = 0,[],[],[]
def do_filter():
seq = seqs[-1].translate(rm_lower)
if "_UPI" in nams[-1] or chk(seq,ref_seq):
nams.pop()
seqs.pop()
else:
# deletion matrix
deletion_vec = []
deletion_count = 0
for j in seqs[-1]:
if j.islower():
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
mtx.append(deletion_vec)
seqs[-1] = seq
for line in a3m_lines:
line = line.rstrip()
if line.startswith(">"):
if n == 1:
ref_seq = seqs[0].translate(rm_lower)
if n >= 1:
# filter previous entry
do_filter()
# start new sequence entry
nam = line.split()[0][1:]
nams.append(nam)
seqs.append("")
n += 1
else:
seqs[-1] += line
# filter last entry
do_filter()
if len(seqs) > N+1:
print(f"found too many sequences ({len(seqs)}), taking the top{N} (sorted by qid)")
sid = np.argsort([seqid(seq,ref_seq) for seq in seqs])[::-1][:N+1]
seqs = [seqs[i] for i in sid]
mtx = [mtx[i] for i in sid]
nams = [nams[i] for i in sid]
return seqs[1:],mtx[1:],nams[1:]
def get_uni_jackhmmer(msa, mtx, lab, filter_qid=0.15, filter_cov=0.5):
'''filter entries to uniprot'''
lab_,msa_,mtx_ = [],[],[]
ref_seq = np.array(list(msa[0]))
rL = len(ref_seq)
for l,s,x in zip(lab[1:],msa[1:],mtx[1:]):
if l.startswith("UniRef"):
l = l.split("/")[0]
if "_UPI" not in l:
tar_seq = np.array(list(s))
L = (tar_seq != "-").sum()
qid = (ref_seq == tar_seq).sum()
if L > filter_cov * rL and qid > filter_qid * L:
lab_.append(l)
msa_.append(s)
mtx_.append(x)
return msa_, mtx_, lab_
def uni_num(ids):
########################################
pa = {a:0 for a in ascii_uppercase}
for a in ["O","P","Q"]: pa[a] = 1
ma = [[{} for k in range(6)],[{} for k in range(6)]]
for n,t in enumerate(range(10)):
for i in [0,1]:
for j in [0,4]: ma[i][j][str(t)] = n
for n,t in enumerate(list(ascii_uppercase)+list(range(10))):
for i in [0,1]:
for j in [1,2]: ma[i][j][str(t)] = n
ma[1][3][str(t)] = n
for n,t in enumerate(ascii_uppercase):
ma[0][3][str(t)] = n
for i in [0,1]: ma[i][5][str(t)] = n
########################################
nums = []
for uni in ids:
p = pa[uni[0]]
tot, num = 1,0
if len(uni) == 10:
for n,u in enumerate(reversed(uni[-4:])):
num += ma[p][n][u] * tot
tot *= len(ma[p][n].keys())
for n,u in enumerate(reversed(uni[:6])):
num += ma[p][n][u] * tot
tot *= len(ma[p][n].keys())
nums.append(num)
return nums
def map_retrieve(ids, call_uniprot=False):
if call_uniprot:
mode = "NF100" if "UniRef100" in ids[0] else "NF90"
url = 'https://www.uniprot.org/uploadlists/'
out = []
for i in range(0,len(ids),5000):
params = {
'from': mode,
'to': 'ACC',
'format': 'tab',
'query': " ".join(ids[i:i+5000])
}
data = urllib.parse.urlencode(params)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
with urllib.request.urlopen(req) as f:
response = f.read()
out += [line.split() for line in response.decode('utf-8').splitlines()]
time.sleep(5)
# combine mapping
mapping = {}
for i,j in out:
if i != "From":
if i not in mapping:
mapping[i] = [j]
else:
mapping[i].append(j)
else:
mapping = {}
for i in ids:
if i not in mapping:
mapping[i] = [i.split("_")[1]]
return mapping
def hash_it(_seq, _lab, _mtx, call_uniprot=False):
if _seq is None or _lab is None:
_seq, _lab = parse_a3m(a3m_lines)
_lab_to_seq = {L:S for L,S in zip(_lab,_seq)}
_lab_to_mtx = {L:M for L,M in zip(_lab,_mtx)}
# call uniprot
_lab_to_uni = map_retrieve(_lab, call_uniprot=call_uniprot)
_uni_to_lab = {}
for L,U in _lab_to_uni.items():
for u in U: _uni_to_lab[u] = L
_uni,__lab = [],[]
for U,L in _uni_to_lab.items():
_uni.append(U)
__lab.append(L)
_hash = uni_num(_uni)
_uni_to_hash = {u:h for u,h in zip(_uni,_hash)}
_hash_to_lab = {h:l for h,l in zip(_hash,__lab)}
_lab_to_hash = {}
for L,U in _lab_to_uni.items():
_lab_to_hash[L] = []
for u in U: _lab_to_hash[L].append(_uni_to_hash[u])
return {"_lab_to_seq":_lab_to_seq,
"_lab_to_mtx":_lab_to_mtx,
"_lab_to_hash":_lab_to_hash,
"_hash_to_lab":_hash_to_lab}
import tqdm.notebook
TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
# keeping old function for compatability
def stitch(_hash_a,_hash_b, stitch_min=1, stitch_max=20, filter_id=None):
o = _stitch(_hash_a, _hash_b, stitch_min, stitch_max)
return (*o["seq"],*o["mtx"])
def _stitch(_hash_a,_hash_b, stitch_min=1, stitch_max=20):
_seq, _mtx, _lab, _delta_gene = [[],[]],[[],[]],[[],[]],[]
TOTAL = len(_hash_a["_lab_to_hash"])
with tqdm.notebook.tqdm(total=TOTAL, bar_format=TQDM_BAR_FORMAT) as pbar:
pbar.set_description("STITCHING")
H_A = np.asarray(list(_hash_a["_hash_to_lab"].keys()))
H_B = np.asarray(list(_hash_b["_hash_to_lab"].keys()))
def hit(h,H):
h = np.asarray(h)
match = np.abs(h[:,None]-H[None,:]).min(0)
match_min = match.min()
if match_min >= stitch_min and match_min <= stitch_max:
return True,H[match.argmin()],match_min
else:
return False,None,None
for n,(l_a,h_a) in enumerate(_hash_a["_lab_to_hash"].items()):
chk_b, h_b, dg = hit(h_a,H_B)
if chk_b:
l_b = _hash_b["_hash_to_lab"][h_b]
h_b = _hash_b["_lab_to_hash"][l_b]
chk_c, h_c, _ = hit(h_b,H_A)
if chk_c and _hash_a["_hash_to_lab"][h_c] == l_a:
_seq[0].append(_hash_a["_lab_to_seq"][l_a])
_mtx[0].append(_hash_a["_lab_to_mtx"][l_a])
_lab[0].append(l_a)
_seq[1].append(_hash_b["_lab_to_seq"][l_b])
_mtx[1].append(_hash_b["_lab_to_mtx"][l_b])
_lab[1].append(l_b)
_delta_gene.append(dg)
pbar.update()
return {"seq":_seq,
"mtx":_mtx,
"lab":_lab,
"delta_gene":_delta_gene}
| ColabFold-main | beta/pairmsa.py |
# fmt: off
############################################
# imports
############################################
import jax
import requests
import hashlib
import tarfile
import time
import pickle
import os
import re
import random
import tqdm.notebook
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patheffects
from matplotlib import collections as mcoll
try:
import py3Dmol
except:
pass
from string import ascii_uppercase,ascii_lowercase
pymol_color_list = ["#33ff33","#00ffff","#ff33cc","#ffff00","#ff9999","#e5e5e5","#7f7fff","#ff7f00",
"#7fff7f","#199999","#ff007f","#ffdd5e","#8c3f99","#b2b2b2","#007fff","#c4b200",
"#8cb266","#00bfbf","#b27f7f","#fcd1a5","#ff7f7f","#ffbfdd","#7fffff","#ffff7f",
"#00ff7f","#337fcc","#d8337f","#bfff3f","#ff7fff","#d8d8ff","#3fffbf","#b78c4c",
"#339933","#66b2b2","#ba8c84","#84bf00","#b24c66","#7f7f7f","#3f3fa5","#a5512b"]
pymol_cmap = matplotlib.colors.ListedColormap(pymol_color_list)
alphabet_list = list(ascii_uppercase+ascii_lowercase)
aatypes = set('ACDEFGHIKLMNPQRSTVWY')
###########################################
# control gpu/cpu memory usage
###########################################
def rm(x):
'''remove data from device'''
jax.tree_util.tree_map(lambda y: y.device_buffer.delete(), x)
def to(x,device="cpu"):
'''move data to device'''
d = jax.devices(device)[0]
return jax.tree_util.tree_map(lambda y:jax.device_put(y,d), x)
def clear_mem(device="gpu"):
'''remove all data from device'''
backend = jax.lib.xla_bridge.get_backend(device)
for buf in backend.live_buffers(): buf.delete()
##########################################
# call mmseqs2
##########################################
TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
def run_mmseqs2(x, prefix, use_env=True, use_filter=True,
use_templates=False, filter=None, host_url="https://a3m.mmseqs.com"):
def submit(seqs, mode, N=101):
n,query = N,""
for seq in seqs:
query += f">{n}\n{seq}\n"
n += 1
res = requests.post(f'{host_url}/ticket/msa', data={'q':query,'mode': mode})
try: out = res.json()
except ValueError: out = {"status":"UNKNOWN"}
return out
def status(ID):
res = requests.get(f'{host_url}/ticket/{ID}')
try: out = res.json()
except ValueError: out = {"status":"UNKNOWN"}
return out
def download(ID, path):
res = requests.get(f'{host_url}/result/download/{ID}')
with open(path,"wb") as out: out.write(res.content)
# process input x
seqs = [x] if isinstance(x, str) else x
# compatibility to old option
if filter is not None:
use_filter = filter
# setup mode
if use_filter:
mode = "env" if use_env else "all"
else:
mode = "env-nofilter" if use_env else "nofilter"
# define path
path = f"{prefix}_{mode}"
if not os.path.isdir(path): os.mkdir(path)
# call mmseqs2 api
tar_gz_file = f'{path}/out.tar.gz'
N,REDO = 101,True
# deduplicate and keep track of order
seqs_unique = sorted(list(set(seqs)))
Ms = [N+seqs_unique.index(seq) for seq in seqs]
# lets do it!
if not os.path.isfile(tar_gz_file):
TIME_ESTIMATE = 150 * len(seqs_unique)
with tqdm.notebook.tqdm(total=TIME_ESTIMATE, bar_format=TQDM_BAR_FORMAT) as pbar:
while REDO:
pbar.set_description("SUBMIT")
# Resubmit job until it goes through
out = submit(seqs_unique, mode, N)
while out["status"] in ["UNKNOWN","RATELIMIT"]:
# resubmit
time.sleep(5 + random.randint(0,5))
out = submit(seqs_unique, mode, N)
if out["status"] == "ERROR":
raise Exception(f'MMseqs2 API is giving errors. Please confirm your input is a valid protein sequence. If error persists, please try again an hour later.')
if out["status"] == "MAINTENANCE":
raise Exception(f'MMseqs2 API is undergoing maintenance. Please try again in a few minutes.')
# wait for job to finish
ID,TIME = out["id"],0
pbar.set_description(out["status"])
while out["status"] in ["UNKNOWN","RUNNING","PENDING"]:
t = 5 + random.randint(0,5)
time.sleep(t)
out = status(ID)
pbar.set_description(out["status"])
if out["status"] == "RUNNING":
TIME += t
pbar.update(n=t)
#if TIME > 900 and out["status"] != "COMPLETE":
# # something failed on the server side, need to resubmit
# N += 1
# break
if out["status"] == "COMPLETE":
if TIME < TIME_ESTIMATE:
pbar.update(n=(TIME_ESTIMATE-TIME))
REDO = False
# Download results
download(ID, tar_gz_file)
# prep list of a3m files
a3m_files = [f"{path}/uniref.a3m"]
if use_env: a3m_files.append(f"{path}/bfd.mgnify30.metaeuk30.smag30.a3m")
# extract a3m files
if not os.path.isfile(a3m_files[0]):
with tarfile.open(tar_gz_file) as tar_gz:
tar_gz.extractall(path)
# templates
if use_templates:
templates = {}
print("seq\tpdb\tcid\tevalue")
for line in open(f"{path}/pdb70.m8","r"):
p = line.rstrip().split()
M,pdb,qid,e_value = p[0],p[1],p[2],p[10]
M = int(M)
if M not in templates: templates[M] = []
templates[M].append(pdb)
if len(templates[M]) <= 20:
print(f"{int(M)-N}\t{pdb}\t{qid}\t{e_value}")
template_paths = {}
for k,TMPL in templates.items():
TMPL_PATH = f"{prefix}_{mode}/templates_{k}"
if not os.path.isdir(TMPL_PATH):
os.mkdir(TMPL_PATH)
TMPL_LINE = ",".join(TMPL[:20])
os.system(f"curl -s https://a3m-templates.mmseqs.com/template/{TMPL_LINE} | tar xzf - -C {TMPL_PATH}/")
os.system(f"cp {TMPL_PATH}/pdb70_a3m.ffindex {TMPL_PATH}/pdb70_cs219.ffindex")
os.system(f"touch {TMPL_PATH}/pdb70_cs219.ffdata")
template_paths[k] = TMPL_PATH
# gather a3m lines
a3m_lines = {}
for a3m_file in a3m_files:
update_M,M = True,None
for line in open(a3m_file,"r"):
if len(line) > 0:
if "\x00" in line:
line = line.replace("\x00","")
update_M = True
if line.startswith(">") and update_M:
M = int(line[1:].rstrip())
update_M = False
if M not in a3m_lines: a3m_lines[M] = []
a3m_lines[M].append(line)
# return results
a3m_lines = ["".join(a3m_lines[n]) for n in Ms]
if use_templates:
template_paths_ = []
for n in Ms:
if n not in template_paths:
template_paths_.append(None)
print(f"{n-N}\tno_templates_found")
else:
template_paths_.append(template_paths[n])
template_paths = template_paths_
if isinstance(x, str):
return (a3m_lines[0], template_paths[0]) if use_templates else a3m_lines[0]
else:
return (a3m_lines, template_paths) if use_templates else a3m_lines
#########################################################################
# utils
#########################################################################
def get_hash(x):
return hashlib.sha1(x.encode()).hexdigest()
def homooligomerize(msas, deletion_matrices, homooligomer=1):
if homooligomer == 1:
return msas, deletion_matrices
else:
new_msas = []
new_mtxs = []
for o in range(homooligomer):
for msa,mtx in zip(msas, deletion_matrices):
num_res = len(msa[0])
L = num_res * o
R = num_res * (homooligomer-(o+1))
new_msas.append(["-"*L+s+"-"*R for s in msa])
new_mtxs.append([[0]*L+m+[0]*R for m in mtx])
return new_msas, new_mtxs
# keeping typo for cross-compatibility
def homooliomerize(msas, deletion_matrices, homooligomer=1):
return homooligomerize(msas, deletion_matrices, homooligomer=homooligomer)
def homooligomerize_heterooligomer(msas, deletion_matrices, lengths, homooligomers):
'''
----- inputs -----
msas: list of msas
deletion_matrices: list of deletion matrices
lengths: list of lengths for each component in complex
homooligomers: list of number of homooligomeric copies for each component
----- outputs -----
(msas, deletion_matrices)
'''
if max(homooligomers) == 1:
return msas, deletion_matrices
elif len(homooligomers) == 1:
return homooligomerize(msas, deletion_matrices, homooligomers[0])
else:
frag_ij = [[0,lengths[0]]]
for length in lengths[1:]:
j = frag_ij[-1][-1]
frag_ij.append([j,j+length])
# for every msa
mod_msas, mod_mtxs = [],[]
for msa, mtx in zip(msas, deletion_matrices):
mod_msa, mod_mtx = [],[]
# for every sequence
for n,(s,m) in enumerate(zip(msa,mtx)):
# split sequence
_s,_m,_ok = [],[],[]
for i,j in frag_ij:
_s.append(s[i:j]); _m.append(m[i:j])
_ok.append(max([o != "-" for o in _s[-1]]))
if n == 0:
# if first query sequence
mod_msa.append("".join([x*h for x,h in zip(_s,homooligomers)]))
mod_mtx.append(sum([x*h for x,h in zip(_m,homooligomers)],[]))
elif sum(_ok) == 1:
# elif one fragment: copy each fragment to every homooligomeric copy
a = _ok.index(True)
for h_a in range(homooligomers[a]):
_blank_seq = [["-"*l]*h for l,h in zip(lengths,homooligomers)]
_blank_mtx = [[[0]*l]*h for l,h in zip(lengths,homooligomers)]
_blank_seq[a][h_a] = _s[a]
_blank_mtx[a][h_a] = _m[a]
mod_msa.append("".join(["".join(x) for x in _blank_seq]))
mod_mtx.append(sum([sum(x,[]) for x in _blank_mtx],[]))
else:
# else: copy fragment pair to every homooligomeric copy pair
for a in range(len(lengths)-1):
if _ok[a]:
for b in range(a+1,len(lengths)):
if _ok[b]:
for h_a in range(homooligomers[a]):
for h_b in range(homooligomers[b]):
_blank_seq = [["-"*l]*h for l,h in zip(lengths,homooligomers)]
_blank_mtx = [[[0]*l]*h for l,h in zip(lengths,homooligomers)]
for c,h_c in zip([a,b],[h_a,h_b]):
_blank_seq[c][h_c] = _s[c]
_blank_mtx[c][h_c] = _m[c]
mod_msa.append("".join(["".join(x) for x in _blank_seq]))
mod_mtx.append(sum([sum(x,[]) for x in _blank_mtx],[]))
mod_msas.append(mod_msa)
mod_mtxs.append(mod_mtx)
return mod_msas, mod_mtxs
def chain_break(idx_res, Ls, length=200):
# Minkyung's code
# add big enough number to residue index to indicate chain breaks
L_prev = 0
for L_i in Ls[:-1]:
idx_res[L_prev+L_i:] += length
L_prev += L_i
return idx_res
##################################################
# plotting
##################################################
def plot_plddt_legend(dpi=100):
thresh = ['plDDT:','Very low (<50)','Low (60)','OK (70)','Confident (80)','Very high (>90)']
plt.figure(figsize=(1,0.1),dpi=dpi)
########################################
for c in ["#FFFFFF","#FF0000","#FFFF00","#00FF00","#00FFFF","#0000FF"]:
plt.bar(0, 0, color=c)
plt.legend(thresh, frameon=False,
loc='center', ncol=6,
handletextpad=1,
columnspacing=1,
markerscale=0.5,)
plt.axis(False)
return plt
def plot_ticks(Ls):
Ln = sum(Ls)
L_prev = 0
for L_i in Ls[:-1]:
L = L_prev + L_i
L_prev += L_i
plt.plot([0,Ln],[L,L],color="black")
plt.plot([L,L],[0,Ln],color="black")
ticks = np.cumsum([0]+Ls)
ticks = (ticks[1:] + ticks[:-1])/2
plt.yticks(ticks,alphabet_list[:len(ticks)])
def plot_confidence(plddt, pae=None, Ls=None, dpi=100):
use_ptm = False if pae is None else True
if use_ptm:
plt.figure(figsize=(10,3), dpi=dpi)
plt.subplot(1,2,1);
else:
plt.figure(figsize=(5,3), dpi=dpi)
plt.title('Predicted lDDT')
plt.plot(plddt)
if Ls is not None:
L_prev = 0
for L_i in Ls[:-1]:
L = L_prev + L_i
L_prev += L_i
plt.plot([L,L],[0,100],color="black")
plt.ylim(0,100)
plt.ylabel('plDDT')
plt.xlabel('position')
if use_ptm:
plt.subplot(1,2,2);plt.title('Predicted Aligned Error')
Ln = pae.shape[0]
plt.imshow(pae,cmap="bwr",vmin=0,vmax=30,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
plt.xlabel('Scored residue')
plt.ylabel('Aligned residue')
return plt
def plot_msas(msas, ori_seq=None, sort_by_seqid=True, deduplicate=True, dpi=100, return_plt=True):
'''
plot the msas
'''
if ori_seq is None: ori_seq = msas[0][0]
seqs = ori_seq.replace("/","").split(":")
seqs_dash = ori_seq.replace(":","").split("/")
Ln = np.cumsum(np.append(0,[len(seq) for seq in seqs]))
Ln_dash = np.cumsum(np.append(0,[len(seq) for seq in seqs_dash]))
Nn,lines = [],[]
for msa in msas:
msa_ = set(msa) if deduplicate else msa
if len(msa_) > 0:
Nn.append(len(msa_))
msa_ = np.asarray([list(seq) for seq in msa_])
gap_ = msa_ != "-"
qid_ = msa_ == np.array(list("".join(seqs)))
gapid = np.stack([gap_[:,Ln[i]:Ln[i+1]].max(-1) for i in range(len(seqs))],-1)
seqid = np.stack([qid_[:,Ln[i]:Ln[i+1]].mean(-1) for i in range(len(seqs))],-1).sum(-1) / (gapid.sum(-1) + 1e-8)
non_gaps = gap_.astype(np.float)
non_gaps[non_gaps == 0] = np.nan
if sort_by_seqid:
lines.append(non_gaps[seqid.argsort()]*seqid[seqid.argsort(),None])
else:
lines.append(non_gaps[::-1] * seqid[::-1,None])
Nn = np.cumsum(np.append(0,Nn))
lines = np.concatenate(lines,0)
if return_plt:
plt.figure(figsize=(8,5),dpi=dpi)
plt.title("Sequence coverage")
plt.imshow(lines,
interpolation='nearest', aspect='auto',
cmap="rainbow_r", vmin=0, vmax=1, origin='lower',
extent=(0, lines.shape[1], 0, lines.shape[0]))
for i in Ln[1:-1]:
plt.plot([i,i],[0,lines.shape[0]],color="black")
for i in Ln_dash[1:-1]:
plt.plot([i,i],[0,lines.shape[0]],"--",color="black")
for j in Nn[1:-1]:
plt.plot([0,lines.shape[1]],[j,j],color="black")
plt.plot((np.isnan(lines) == False).sum(0), color='black')
plt.xlim(0,lines.shape[1])
plt.ylim(0,lines.shape[0])
plt.colorbar(label="Sequence identity to query")
plt.xlabel("Positions")
plt.ylabel("Sequences")
if return_plt: return plt
def read_pdb_renum(pdb_filename, Ls=None):
if Ls is not None:
L_init = 0
new_chain = {}
for L,c in zip(Ls, alphabet_list):
new_chain.update({i:c for i in range(L_init,L_init+L)})
L_init += L
n,pdb_out = 1,[]
resnum_,chain_ = 1,"A"
for line in open(pdb_filename,"r"):
if line[:4] == "ATOM":
chain = line[21:22]
resnum = int(line[22:22+5])
if resnum != resnum_ or chain != chain_:
resnum_,chain_ = resnum,chain
n += 1
if Ls is None: pdb_out.append("%s%4i%s" % (line[:22],n,line[26:]))
else: pdb_out.append("%s%s%4i%s" % (line[:21],new_chain[n-1],n,line[26:]))
return "".join(pdb_out)
def show_pdb(pred_output_path, show_sidechains=False, show_mainchains=False,
color="lDDT", chains=None, Ls=None, vmin=50, vmax=90,
color_HP=False, size=(800,480)):
if chains is None:
chains = 1 if Ls is None else len(Ls)
view = py3Dmol.view(js='https://3dmol.org/build/3Dmol.js', width=size[0], height=size[1])
view.addModel(read_pdb_renum(pred_output_path, Ls),'pdb')
if color == "lDDT":
view.setStyle({'cartoon': {'colorscheme': {'prop':'b','gradient': 'roygb','min':vmin,'max':vmax}}})
elif color == "rainbow":
view.setStyle({'cartoon': {'color':'spectrum'}})
elif color == "chain":
for n,chain,color in zip(range(chains),alphabet_list,pymol_color_list):
view.setStyle({'chain':chain},{'cartoon': {'color':color}})
if show_sidechains:
BB = ['C','O','N']
HP = ["ALA","GLY","VAL","ILE","LEU","PHE","MET","PRO","TRP","CYS","TYR"]
if color_HP:
view.addStyle({'and':[{'resn':HP},{'atom':BB,'invert':True}]},
{'stick':{'colorscheme':"yellowCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':HP,'invert':True},{'atom':BB,'invert':True}]},
{'stick':{'colorscheme':"whiteCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"GLY"},{'atom':'CA'}]},
{'sphere':{'colorscheme':"yellowCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"PRO"},{'atom':['C','O'],'invert':True}]},
{'stick':{'colorscheme':"yellowCarbon",'radius':0.3}})
else:
view.addStyle({'and':[{'resn':["GLY","PRO"],'invert':True},{'atom':BB,'invert':True}]},
{'stick':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"GLY"},{'atom':'CA'}]},
{'sphere':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"PRO"},{'atom':['C','O'],'invert':True}]},
{'stick':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
if show_mainchains:
BB = ['C','O','N','CA']
view.addStyle({'atom':BB},{'stick':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
view.zoomTo()
return view
def plot_plddts(plddts, Ls=None, dpi=100, fig=True):
if fig: plt.figure(figsize=(8,5),dpi=100)
plt.title("Predicted lDDT per position")
for n,plddt in enumerate(plddts):
plt.plot(plddt,label=f"rank_{n+1}")
if Ls is not None:
L_prev = 0
for L_i in Ls[:-1]:
L = L_prev + L_i
L_prev += L_i
plt.plot([L,L],[0,100],color="black")
plt.legend()
plt.ylim(0,100)
plt.ylabel("Predicted lDDT")
plt.xlabel("Positions")
return plt
def plot_paes(paes, Ls=None, dpi=100, fig=True):
num_models = len(paes)
if fig: plt.figure(figsize=(3*num_models,2), dpi=dpi)
for n,pae in enumerate(paes):
plt.subplot(1,num_models,n+1)
plt.title(f"rank_{n+1}")
Ln = pae.shape[0]
plt.imshow(pae,cmap="bwr",vmin=0,vmax=30,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
return plt
def plot_adjs(adjs, Ls=None, dpi=100, fig=True):
num_models = len(adjs)
if fig: plt.figure(figsize=(3*num_models,2), dpi=dpi)
for n,adj in enumerate(adjs):
plt.subplot(1,num_models,n+1)
plt.title(f"rank_{n+1}")
Ln = adj.shape[0]
plt.imshow(adj,cmap="binary",vmin=0,vmax=1,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
return plt
def plot_dists(dists, Ls=None, dpi=100, fig=True):
num_models = len(dists)
if fig: plt.figure(figsize=(3*num_models,2), dpi=dpi)
for n,dist in enumerate(dists):
plt.subplot(1,num_models,n+1)
plt.title(f"rank_{n+1}")
Ln = dist.shape[0]
plt.imshow(dist,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
return plt
##########################################################################
##########################################################################
def kabsch(a, b, weights=None, return_v=False):
a = np.asarray(a)
b = np.asarray(b)
if weights is None: weights = np.ones(len(b))
else: weights = np.asarray(weights)
B = np.einsum('ji,jk->ik', weights[:, None] * a, b)
u, s, vh = np.linalg.svd(B)
if np.linalg.det(u @ vh) < 0: u[:, -1] = -u[:, -1]
if return_v: return u
else: return u @ vh
def plot_pseudo_3D(xyz, c=None, ax=None, chainbreak=5,
cmap="gist_rainbow", line_w=2.0,
cmin=None, cmax=None, zmin=None, zmax=None):
def rescale(a,amin=None,amax=None):
a = np.copy(a)
if amin is None: amin = a.min()
if amax is None: amax = a.max()
a[a < amin] = amin
a[a > amax] = amax
return (a - amin)/(amax - amin)
# make segments
xyz = np.asarray(xyz)
seg = np.concatenate([xyz[:-1,None,:],xyz[1:,None,:]],axis=-2)
seg_xy = seg[...,:2]
seg_z = seg[...,2].mean(-1)
ord = seg_z.argsort()
# set colors
if c is None: c = np.arange(len(seg))[::-1]
else: c = (c[1:] + c[:-1])/2
c = rescale(c,cmin,cmax)
if isinstance(cmap, str):
if cmap == "gist_rainbow": c *= 0.75
colors = matplotlib.cm.get_cmap(cmap)(c)
else:
colors = cmap(c)
if chainbreak is not None:
dist = np.linalg.norm(xyz[:-1] - xyz[1:], axis=-1)
colors[...,3] = (dist < chainbreak).astype(np.float)
# add shade/tint based on z-dimension
z = rescale(seg_z,zmin,zmax)[:,None]
tint, shade = z/3, (z+2)/3
colors[:,:3] = colors[:,:3] + (1 - colors[:,:3]) * tint
colors[:,:3] = colors[:,:3] * shade
set_lim = False
if ax is None:
fig, ax = plt.subplots()
fig.set_figwidth(5)
fig.set_figheight(5)
set_lim = True
else:
fig = ax.get_figure()
if ax.get_xlim() == (0,1):
set_lim = True
if set_lim:
xy_min = xyz[:,:2].min() - line_w
xy_max = xyz[:,:2].max() + line_w
ax.set_xlim(xy_min,xy_max)
ax.set_ylim(xy_min,xy_max)
ax.set_aspect('equal')
# determine linewidths
width = fig.bbox_inches.width * ax.get_position().width
linewidths = line_w * 72 * width / np.diff(ax.get_xlim())
lines = mcoll.LineCollection(seg_xy[ord], colors=colors[ord], linewidths=linewidths,
path_effects=[matplotlib.patheffects.Stroke(capstyle="round")])
return ax.add_collection(lines)
def add_text(text, ax):
return plt.text(0.5, 1.01, text, horizontalalignment='center',
verticalalignment='bottom', transform=ax.transAxes)
def plot_protein(protein=None, pos=None, plddt=None, Ls=None, dpi=100, best_view=True, line_w=2.0):
if protein is not None:
pos = np.asarray(protein.atom_positions[:,1,:])
plddt = np.asarray(protein.b_factors[:,0])
# get best view
if best_view:
if plddt is not None:
weights = plddt/100
pos = pos - (pos * weights[:,None]).sum(0,keepdims=True) / weights.sum()
pos = pos @ kabsch(pos, pos, weights, return_v=True)
else:
pos = pos - pos.mean(0,keepdims=True)
pos = pos @ kabsch(pos, pos, return_v=True)
if plddt is not None:
fig, (ax1, ax2) = plt.subplots(1,2)
fig.set_figwidth(6); fig.set_figheight(3)
ax = [ax1, ax2]
else:
fig, ax1 = plt.subplots(1,1)
fig.set_figwidth(3); fig.set_figheight(3)
ax = [ax1]
fig.set_dpi(dpi)
fig.subplots_adjust(top = 0.9, bottom = 0.1, right = 1, left = 0, hspace = 0, wspace = 0)
xy_min = pos[...,:2].min() - line_w
xy_max = pos[...,:2].max() + line_w
for a in ax:
a.set_xlim(xy_min, xy_max)
a.set_ylim(xy_min, xy_max)
a.axis(False)
if Ls is None or len(Ls) == 1:
# color N->C
c = np.arange(len(pos))[::-1]
plot_pseudo_3D(pos, line_w=line_w, ax=ax1)
add_text("colored by N→C", ax1)
else:
# color by chain
c = np.concatenate([[n]*L for n,L in enumerate(Ls)])
if len(Ls) > 40: plot_pseudo_3D(pos, c=c, line_w=line_w, ax=ax1)
else: plot_pseudo_3D(pos, c=c, cmap=pymol_cmap, cmin=0, cmax=39, line_w=line_w, ax=ax1)
add_text("colored by chain", ax1)
if plddt is not None:
# color by pLDDT
plot_pseudo_3D(pos, c=plddt, cmin=50, cmax=90, line_w=line_w, ax=ax2)
add_text("colored by pLDDT", ax2)
return fig
| ColabFold-main | beta/colabfold.py |
import os
from urllib import request
from concurrent import futures
import pickle
import jax
from alphafold.data.tools import jackhmmer
from alphafold.data import parsers
from alphafold.data import pipeline
from alphafold.common import protein
from alphafold.model import config
from alphafold.model import model
from alphafold.model import data
from alphafold.model.tf import shape_placeholders
import tensorflow as tf
from string import ascii_uppercase
import numpy as np
import matplotlib.pyplot as plt
import re
import colabfold as cf
import pairmsa
try:
from google.colab import files
IN_COLAB = True
except:
IN_COLAB = False
import tqdm.notebook
TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
#######################################################################################################################################
# prep_inputs
#######################################################################################################################################
def prep_inputs(sequence, jobname="test", homooligomer="1", output_dir=None, clean=False, verbose=True):
# process inputs
sequence = str(sequence)
sequence = re.sub("[^A-Z:/]", "", sequence.upper())
sequence = re.sub(":+",":",sequence)
sequence = re.sub("/+","/",sequence)
sequence = re.sub("^[:/]+","",sequence)
sequence = re.sub("[:/]+$","",sequence)
jobname = re.sub(r'\W+', '', jobname)
homooligomer = str(homooligomer)
homooligomer = re.sub("[:/]+",":",homooligomer)
homooligomer = re.sub("^[:/]+","",homooligomer)
homooligomer = re.sub("[:/]+$","",homooligomer)
if len(homooligomer) == 0: homooligomer = "1"
homooligomer = re.sub("[^0-9:]", "", homooligomer)
# define inputs
I = {"ori_sequence":sequence,
"sequence":sequence.replace("/","").replace(":",""),
"seqs":sequence.replace("/","").split(":"),
"homooligomer":homooligomer,
"homooligomers":[int(h) for h in homooligomer.split(":")],
"msas":[], "deletion_matrices":[]}
# adjust homooligomer option
if len(I["seqs"]) != len(I["homooligomers"]):
if len(I["homooligomers"]) == 1:
I["homooligomers"] = [I["homooligomers"][0]] * len(I["seqs"])
else:
if verbose:
print("WARNING: Mismatch between number of breaks ':' in 'sequence' and 'homooligomer' definition")
while len(I["seqs"]) > len(I["homooligomers"]):
I["homooligomers"].append(1)
I["homooligomers"] = I["homooligomers"][:len(I["seqs"])]
I["homooligomer"] = ":".join([str(h) for h in I["homooligomers"]])
# define full sequence being modelled
I["full_sequence"] = ''.join([s*h for s,h in zip(I["seqs"],I["homooligomers"])])
I["lengths"] = [len(seq) for seq in I["seqs"]]
# prediction directory
if output_dir is None:
I["output_dir"] = 'prediction_' + jobname + '_' + cf.get_hash(I["full_sequence"])[:5]
else:
I["output_dir"] = output_dir
os.makedirs(I["output_dir"], exist_ok=True)
# delete existing files in working directory
if clean:
for f in os.listdir(I["output_dir"]):
os.remove(os.path.join(I["output_dir"], f))
if verbose and len(I["full_sequence"]) > 1400:
print(f"WARNING: For a typical Google-Colab-GPU (16G) session, the max total length is ~1400 residues. You are at {len(I['full_sequence'])}!")
print(f"Run Alphafold may crash, unless you trim to the protein(s) to a short length. (See trim options below).")
if verbose:
print(f"homooligomer: {I['homooligomer']}")
print(f"total_length: {len(I['full_sequence'])}")
print(f"output_dir: {I['output_dir']}")
return I
#######################################################################################################################################
# prep_msa
#######################################################################################################################################
def run_jackhmmer(sequence, prefix, jackhmmer_binary_path='jackhmmer', verbose=True):
fasta_path = f"{prefix}.fasta"
with open(fasta_path, 'wt') as f:
f.write(f'>query\n{sequence}')
pickled_msa_path = f"{prefix}.jackhmmer.pickle"
if os.path.isfile(pickled_msa_path):
msas_dict = pickle.load(open(pickled_msa_path,"rb"))
msas, deletion_matrices, names = (msas_dict[k] for k in ['msas', 'deletion_matrices', 'names'])
full_msa = []
for msa in msas:
full_msa += msa
else:
# --- Find the closest source ---
test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'
ex = futures.ThreadPoolExecutor(3)
def fetch(source):
request.urlretrieve(test_url_pattern.format(source))
return source
fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]
source = None
for f in futures.as_completed(fs):
source = f.result()
ex.shutdown()
break
dbs = []
num_jackhmmer_chunks = {'uniref90': 59, 'smallbfd': 17, 'mgnify': 71}
total_jackhmmer_chunks = sum(num_jackhmmer_chunks.values())
disable_tqdm = not verbose
with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT, disable=disable_tqdm) as pbar:
def jackhmmer_chunk_callback(i):
pbar.update(n=1)
pbar.set_description('Searching uniref90')
jackhmmer_uniref90_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/uniref90_2021_03.fasta',
get_tblout=True,
num_streamed_chunks=num_jackhmmer_chunks['uniref90'],
streaming_callback=jackhmmer_chunk_callback,
z_value=135301051)
dbs.append(('uniref90', jackhmmer_uniref90_runner.query(fasta_path)))
pbar.set_description('Searching smallbfd')
jackhmmer_smallbfd_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/bfd-first_non_consensus_sequences.fasta',
get_tblout=True,
num_streamed_chunks=num_jackhmmer_chunks['smallbfd'],
streaming_callback=jackhmmer_chunk_callback,
z_value=65984053)
dbs.append(('smallbfd', jackhmmer_smallbfd_runner.query(fasta_path)))
pbar.set_description('Searching mgnify')
jackhmmer_mgnify_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/mgy_clusters_2019_05.fasta',
get_tblout=True,
num_streamed_chunks=num_jackhmmer_chunks['mgnify'],
streaming_callback=jackhmmer_chunk_callback,
z_value=304820129)
dbs.append(('mgnify', jackhmmer_mgnify_runner.query(fasta_path)))
# --- Extract the MSAs and visualize ---
# Extract the MSAs from the Stockholm files.
# NB: deduplication happens later in pipeline.make_msa_features.
mgnify_max_hits = 501
msas = []
deletion_matrices = []
names = []
for db_name, db_results in dbs:
unsorted_results = []
for i, result in enumerate(db_results):
msa, deletion_matrix, target_names = parsers.parse_stockholm(result['sto'])
e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])
e_values = [e_values_dict[t.split('/')[0]] for t in target_names]
zipped_results = zip(msa, deletion_matrix, target_names, e_values)
if i != 0:
# Only take query from the first chunk
zipped_results = [x for x in zipped_results if x[2] != 'query']
unsorted_results.extend(zipped_results)
sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])
db_msas, db_deletion_matrices, db_names, _ = zip(*sorted_by_evalue)
if db_msas:
if db_name == 'mgnify':
db_msas = db_msas[:mgnify_max_hits]
db_deletion_matrices = db_deletion_matrices[:mgnify_max_hits]
db_names = db_names[:mgnify_max_hits]
msas.append(db_msas)
deletion_matrices.append(db_deletion_matrices)
names.append(db_names)
msa_size = len(set(db_msas))
print(f'{msa_size} Sequences Found in {db_name}')
pickle.dump({"msas":msas,
"deletion_matrices":deletion_matrices,
"names":names}, open(pickled_msa_path,"wb"))
return msas, deletion_matrices, names
def prep_msa(I, msa_method="mmseqs2", add_custom_msa=False, msa_format="fas",
pair_mode="unpaired", pair_cov=50, pair_qid=20,
hhfilter_loc="hhfilter", reformat_loc="reformat.pl", TMP_DIR="tmp",
custom_msa=None, precomputed=None,
mmseqs_host_url="https://a3m.mmseqs.com",
verbose=True):
# make temp directory
os.makedirs(TMP_DIR, exist_ok=True)
# clear previous inputs
I["msas"] = []
I["deletion_matrices"] = []
if add_custom_msa:
if IN_COLAB:
print(f"upload custom msa in '{msa_format}' format")
msa_dict = files.upload()
lines = msa_dict[list(msa_dict.keys())[0]].decode()
input_file = os.path.join(I["output_dir"],f"upload.{msa_format}")
with open(input_file,"w") as tmp_upload:
tmp_upload.write(lines)
else:
input_file = custom_msa
if input_file is None or not os.path.isfile(input_file):
raise ValueError("ERROR: `custom_msa` undefined")
else:
# convert to a3m
output_file = os.path.join(I["output_dir"],f"upload.a3m")
os.system(f"{reformat_loc} {msa_format} a3m {input_file} {output_file}")
# parse
msa, mtx = parsers.parse_a3m(open(output_file,"r").read())
I["msas"].append(msa)
I["deletion_matrices"].append(mtx)
if len(I["msas"][0][0]) != len(I["sequence"]):
raise ValueError("ERROR: the length of msa does not match input sequence")
if msa_method == "precomputed":
if IN_COLAB:
print("upload precomputed pickled msa from previous run")
uploaded_dict = files.upload()
uploaded_filename = list(uploaded_dict.keys())[0]
I.update(pickle.loads(uploaded_dict[uploaded_filename]))
elif precomputed is None:
raise ValueError("ERROR: `precomputed` undefined")
else:
I.update(pickle.load(open(precomputed,"rb")))
elif msa_method == "single_sequence":
if len(I["msas"]) == 0:
I["msas"].append([I["sequence"]])
I["deletion_matrices"].append([[0]*len(I["sequence"])])
else:
_blank_seq = ["-" * L for L in I["lengths"]]
_blank_mtx = [[0] * L for L in I["lengths"]]
def _pad(ns,vals,mode):
if mode == "seq": _blank = _blank_seq.copy()
if mode == "mtx": _blank = _blank_mtx.copy()
if isinstance(ns, list):
for n,val in zip(ns,vals): _blank[n] = val
else: _blank[ns] = vals
if mode == "seq": return "".join(_blank)
if mode == "mtx": return sum(_blank,[])
if len(I["seqs"]) == 1 or "unpaired" in pair_mode:
# gather msas
if msa_method == "mmseqs2":
prefix = cf.get_hash(I["sequence"])
prefix = os.path.join(TMP_DIR,prefix)
print(f"running mmseqs2")
A3M_LINES = cf.run_mmseqs2(I["seqs"], prefix, use_filter=True, host_url=mmseqs_host_url)
for n, seq in enumerate(I["seqs"]):
# tmp directory
prefix = cf.get_hash(seq)
prefix = os.path.join(TMP_DIR,prefix)
if msa_method == "mmseqs2":
# run mmseqs2
a3m_lines = A3M_LINES[n]
msa, mtx = parsers.parse_a3m(a3m_lines)
msas_, mtxs_ = [msa],[mtx]
elif msa_method == "jackhmmer":
print(f"running jackhmmer on seq_{n}")
# run jackhmmer
msas_, mtxs_, names_ = ([sum(x,())] for x in run_jackhmmer(seq, prefix))
# pad sequences
for msa_,mtx_ in zip(msas_,mtxs_):
msa,mtx = [I["sequence"]],[[0]*len(I["sequence"])]
for s,m in zip(msa_,mtx_):
msa.append(_pad(n,s,"seq"))
mtx.append(_pad(n,m,"mtx"))
I["msas"].append(msa)
I["deletion_matrices"].append(mtx)
# PAIR_MSA
if len(I["seqs"]) > 1 and (pair_mode == "paired" or pair_mode == "unpaired+paired"):
print("attempting to pair some sequences...")
if msa_method == "mmseqs2":
prefix = cf.get_hash(I["sequence"])
prefix = os.path.join(TMP_DIR,prefix)
print(f"running mmseqs2_noenv_nofilter on all seqs")
A3M_LINES = cf.run_mmseqs2(I["seqs"], prefix, use_env=False, use_filter=False, host_url=mmseqs_host_url)
_data = []
for a in range(len(I["seqs"])):
print(f"prepping seq_{a}")
_seq = I["seqs"][a]
_prefix = os.path.join(TMP_DIR,cf.get_hash(_seq))
if msa_method == "mmseqs2":
a3m_lines = A3M_LINES[a]
_msa, _mtx, _lab = pairmsa.parse_a3m(a3m_lines,
filter_qid=pair_qid/100,
filter_cov=pair_cov/100)
elif msa_method == "jackhmmer":
_msas, _mtxs, _names = run_jackhmmer(_seq, _prefix)
_msa, _mtx, _lab = pairmsa.get_uni_jackhmmer(_msas[0], _mtxs[0], _names[0],
filter_qid=pair_qid/100,
filter_cov=pair_cov/100)
if len(_msa) > 1:
_data.append(pairmsa.hash_it(_msa, _lab, _mtx, call_uniprot=False))
else:
_data.append(None)
Ln = len(I["seqs"])
O = [[None for _ in I["seqs"]] for _ in I["seqs"]]
for a in range(Ln):
if _data[a] is not None:
for b in range(a+1,Ln):
if _data[b] is not None:
print(f"attempting pairwise stitch for {a} {b}")
O[a][b] = pairmsa._stitch(_data[a],_data[b])
_seq_a, _seq_b, _mtx_a, _mtx_b = (*O[a][b]["seq"],*O[a][b]["mtx"])
# filter to remove redundant sequences
ok = []
with open(f"{TMP_DIR}/tmp.fas","w") as fas_file:
fas_file.writelines([f">{n}\n{a+b}\n" for n,(a,b) in enumerate(zip(_seq_a,_seq_b))])
os.system(f"{hhfilter_loc} -maxseq 1000000 -i {TMP_DIR}/tmp.fas -o {TMP_DIR}/tmp.id90.fas -id 90")
for line in open(f"{TMP_DIR}/tmp.id90.fas","r"):
if line.startswith(">"): ok.append(int(line[1:]))
if verbose:
print(f"found {len(_seq_a)} pairs ({len(ok)} after filtering)")
if len(_seq_a) > 0:
msa,mtx = [I["sequence"]],[[0]*len(I["sequence"])]
for s_a,s_b,m_a,m_b in zip(_seq_a, _seq_b, _mtx_a, _mtx_b):
msa.append(_pad([a,b],[s_a,s_b],"seq"))
mtx.append(_pad([a,b],[m_a,m_b],"mtx"))
I["msas"].append(msa)
I["deletion_matrices"].append(mtx)
# save MSA as pickle
pickle.dump({"msas":I["msas"],"deletion_matrices":I["deletion_matrices"]},
open(os.path.join(I["output_dir"],"msa.pickle"),"wb"))
return I
#######################################################################################################################################
# prep_filter
#######################################################################################################################################
def trim_inputs(trim, msas, deletion_matrices, ori_seq=None, inverse=False):
'''
input: trim, msas, deletion_matrices, ori_seq
output: msas, deletion_matrices, ori_seq
'''
if ori_seq is None: ori_seq = msas[0][0]
seqs = ori_seq.replace("/","").split(":")
L_ini = 0
chain_idx = {}
idx_chain = []
for chain,seq in zip(ascii_uppercase,seqs):
L = len(seq)
chain_idx[chain] = dict(zip(range(L),range(L_ini,L_ini+L)))
idx_chain += [f"{chain}{i+1}" for i in range(L)]
L_ini += L
global_idx = dict(zip(range(L_ini),range(L_ini)))
mode = "keeping" if inverse else "trimming"
trim_set = []
for idx in trim.split(","):
i,j = idx.split("-") if "-" in idx else (idx,"")
# set index reference frame
trim_idx_i = trim_idx_j = global_idx
if i != "" and i[0] in ascii_uppercase:
trim_idx_i,i = chain_idx[i[0]], i[1:]
if j != "" and j[0] in ascii_uppercase:
trim_idx_j,j = chain_idx[j[0]], j[1:]
# set which positions to trim
if "-" in idx:
i = trim_idx_i[int(i)-1] if i != "" else trim_idx_i[0]
j = trim_idx_j[int(j)-1] if j != "" else trim_idx_j[len(trim_idx_j) - 1]
trim_set += list(range(i,j+1))
print(f"{mode} positions: {idx_chain[i]}-{idx_chain[j]}")
else:
i = trim_idx_i[int(i)-1]
trim_set.append(i)
print(f"{mode} position: {idx_chain[i]}")
# deduplicate list
trim_set = set(trim_set)
if inverse:
trim_set = set(range(L_ini)) ^ trim_set
trim_set = sorted(list(trim_set))
# trim MSA
mod_msas, mod_mtxs = [],[]
for msa, mtx in zip(msas, deletion_matrices):
mod_msa = np.delete([list(s) for s in msa], trim_set, 1)
ok = (mod_msa != "-").sum(-1) > 0
mod_msas.append(["".join(s) for s in mod_msa[ok]])
mod_mtx = np.asarray(mtx)[ok]
mod_mtxs.append(np.delete(mod_mtx, trim_set, 1).tolist())
# trim original sequence
mod_idx = []
mod_chain = []
mod_ori_seq = []
for n,a in enumerate(ori_seq.replace("/","").replace(":","")):
if n not in trim_set:
mod_ori_seq.append(a)
mod_idx.append(n)
mod_chain.append(idx_chain[n][0])
if len(mod_idx) > 1:
if mod_chain[-1] != mod_chain[-2]:
mod_ori_seq[-1] = ":"
mod_ori_seq.append(a)
elif (mod_idx[-1] - mod_idx[-2]) > 1:
mod_ori_seq[-1] = "/"
mod_ori_seq.append(a)
mod_ori_seq = "".join(mod_ori_seq)
chains = sorted([ascii_uppercase.index(a) for a in set(mod_chain)])
return {"msas":mod_msas, "deletion_matrices":mod_mtxs,
"ori_sequence":mod_ori_seq, "chains":chains}
def cov_qid_filter(msas, deletion_matrices, ori_seq=None, cov=0, qid=0):
if ori_seq is None: ori_seq = msas[0][0]
seqs = ori_seq.replace("/","").split(":")
ref_seq_ = np.array(list("".join(seqs)))
new_msas,new_mtxs = [],[]
L = np.asarray([len(seq) for seq in seqs])
Ln = np.cumsum(np.append(0,L))
for msa, mtx in zip(msas, deletion_matrices):
msa_ = np.asarray([list(seq) for seq in msa])
# coverage (non-gap characters)
cov_ = msa_ != "-"
# sequence identity to query
qid_ = msa_ == ref_seq_
# split by protein (for protein complexes)
cov__ = np.stack([cov_[:,Ln[i]:Ln[i+1]].sum(-1) for i in range(len(seqs))],-1)
qid__ = np.stack([qid_[:,Ln[i]:Ln[i+1]].sum(-1) for i in range(len(seqs))],-1)
not_empty__ = cov__ > 0
ok = []
for n in range(len(msa)):
m = not_empty__[n]
if m.sum() > 0:
q = qid__[n][m].sum() / cov__[n][m].sum()
c = cov__[n][m].sum() / L[m].sum()
if q > qid and c > cov:
ok.append(n)
new_msas.append([msa[n] for n in ok])
new_mtxs.append([mtx[n] for n in ok])
return {"msas":new_msas, "deletion_matrices":new_mtxs}
def prep_filter(I, trim="", trim_inverse=False, cov=0, qid=0, verbose=True):
trim = re.sub("[^0-9A-Z,-]", "", trim.upper())
trim = re.sub(",+",",",trim)
trim = re.sub("^[,]+","",trim)
trim = re.sub("[,]+$","",trim)
if trim != "" or cov > 0 or qid > 0:
mod_I = dict(I)
if trim != "":
mod_I.update(trim_inputs(trim, mod_I["msas"], mod_I["deletion_matrices"],
mod_I["ori_sequence"], inverse=trim_inverse))
mod_I["homooligomers"] = [mod_I["homooligomers"][c] for c in mod_I["chains"]]
mod_I["sequence"] = mod_I["ori_sequence"].replace("/","").replace(":","")
mod_I["seqs"] = mod_I["ori_sequence"].replace("/","").split(":")
mod_I["full_sequence"] = "".join([s*h for s,h in zip(mod_I["seqs"], mod_I["homooligomers"])])
new_length = len(mod_I["full_sequence"])
if verbose:
print(f"total_length: '{new_length}' after trimming")
if cov > 0 or qid > 0:
mod_I.update(cov_qid_filter(mod_I["msas"], mod_I["deletion_matrices"],
mod_I["ori_sequence"], cov=cov/100, qid=qid/100))
return mod_I
else:
return I
#######################################################################################################################################
# prep features
#######################################################################################################################################
def prep_feats(I, clean=False):
def _placeholder_template_feats(num_templates_, num_res_):
return {
'template_aatype': np.zeros([num_templates_, num_res_, 22], np.float32),
'template_all_atom_masks': np.zeros([num_templates_, num_res_, 37], np.float32),
'template_all_atom_positions': np.zeros([num_templates_, num_res_, 37, 3], np.float32),
'template_domain_names': np.zeros([num_templates_], np.float32),
'template_sum_probs': np.zeros([num_templates_], np.float32),
}
# delete old files
if clean:
for f in os.listdir(I["output_dir"]):
if "rank_" in f: os.remove(os.path.join(I["output_dir"], f))
if len(I["msas"]) == 0:
print("WARNING: no MSA found, switching to 'single_sequence' mode")
I["msas"].append([I["sequence"]])
I["deletion_matrices"].append([[0]*len(I["sequence"])])
# homooligomerize
lengths = [len(seq) for seq in I["seqs"]]
msas_mod, deletion_matrices_mod = cf.homooligomerize_heterooligomer(I["msas"], I["deletion_matrices"],
lengths, I["homooligomers"])
# define input features
num_res = len(I["full_sequence"])
feature_dict = {}
feature_dict.update(pipeline.make_sequence_features(I["full_sequence"], 'test', num_res))
feature_dict.update(pipeline.make_msa_features(msas_mod, deletion_matrices=deletion_matrices_mod))
feature_dict.update(_placeholder_template_feats(0, num_res))
# set chainbreaks
Ls = []
for seq,h in zip(I["ori_sequence"].split(":"), I["homooligomers"]):
Ls += [len(s) for s in seq.split("/")] * h
Ls_plot = []
for seq,h in zip(I["seqs"], I["homooligomers"]):
Ls_plot += [len(seq)] * h
feature_dict['residue_index'] = cf.chain_break(feature_dict['residue_index'], Ls)
feature_dict['Ls'] = Ls_plot
feature_dict['output_dir'] = I["output_dir"]
return feature_dict
def make_fixed_size(feat, runner):
'''pad input features'''
opt = runner["opt"]
cfg = runner["model"].config
shape_schema = {k:[None]+v for k,v in dict(cfg.data.eval.feat).items()}
pad_size_map = {
shape_placeholders.NUM_RES: opt["L"],
shape_placeholders.NUM_MSA_SEQ: cfg.data.eval.max_msa_clusters,
shape_placeholders.NUM_EXTRA_SEQ: cfg.data.common.max_extra_msa,
shape_placeholders.NUM_TEMPLATES: 0,
}
for k, v in feat.items():
# Don't transfer this to the accelerator.
if k == 'extra_cluster_assignment':
continue
shape = list(v.shape)
schema = shape_schema[k]
assert len(shape) == len(schema), (
f'Rank mismatch between shape and shape schema for {k}: '
f'{shape} vs {schema}')
pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)]
padding = [(0, p - tf.shape(v)[i]) for i, p in enumerate(pad_size)]
if padding:
feat[k] = tf.pad(v, padding, name=f'pad_to_fixed_{k}')
feat[k].set_shape(pad_size)
return {k:np.asarray(v) for k,v in feat.items()}
#######################################################################################################################################
# run alphafold
#######################################################################################################################################
def clear_mem(device=None):
'''remove all data from device'''
backend = jax.lib.xla_bridge.get_backend(device)
if hasattr(backend,'live_buffers'):
for buf in backend.live_buffers():
buf.delete()
OPT_DEFAULT = {"N":None, "L":None,
"use_ptm":True, "use_turbo":True,
"max_recycles":3, "tol":0, "num_ensemble":1,
"max_msa_clusters":512, "max_extra_msa":1024,
"is_training":False}
def prep_model_runner(opt=None, model_name="model_5", old_runner=None, params_loc='./alphafold/data'):
# setup the [opt]ions
if opt is None:
opt = OPT_DEFAULT.copy()
else:
for k in OPT_DEFAULT:
if k not in opt: opt[k] = OPT_DEFAULT[k]
# if old_runner not defined or [opt]ions changed, start new runner
if old_runner is None or old_runner["opt"] != opt:
clear_mem()
name = f"{model_name}_ptm" if opt["use_ptm"] else model_name
cfg = config.model_config(name)
if opt["use_turbo"]:
if opt["N"] is None:
cfg.data.eval.max_msa_clusters = opt["max_msa_clusters"]
cfg.data.common.max_extra_msa = opt["max_extra_msa"]
else:
msa_clusters = min(opt["N"], opt["max_msa_clusters"])
cfg.data.eval.max_msa_clusters = msa_clusters
cfg.data.common.max_extra_msa = max(min(opt["N"] - msa_clusters, opt["max_extra_msa"]),1)
cfg.data.common.num_recycle = opt["max_recycles"]
cfg.model.num_recycle = opt["max_recycles"]
cfg.model.recycle_tol = opt["tol"]
cfg.data.eval.num_ensemble = opt["num_ensemble"]
params = data.get_model_haiku_params(name, params_loc)
return {"model":model.RunModel(cfg, params, is_training=opt["is_training"]), "opt":opt}
else:
return old_runner
def run_alphafold(feature_dict, opt=None, runner=None, num_models=5, num_samples=1, subsample_msa=True,
pad_feats=False, rank_by="pLDDT", show_images=True, params_loc='./alphafold/data', verbose=True):
def do_subsample_msa(F, random_seed=0):
'''subsample msa to avoid running out of memory'''
N = len(F["msa"])
L = len(F["residue_index"])
N_ = int(3E7/L)
if N > N_:
if verbose:
print(f"whhhaaa... too many sequences ({N}) subsampling to {N_}")
np.random.seed(random_seed)
idx = np.append(0,np.random.permutation(np.arange(1,N)))[:N_]
F_ = {}
F_["msa"] = F["msa"][idx]
F_["deletion_matrix_int"] = F["deletion_matrix_int"][idx]
F_["num_alignments"] = np.full_like(F["num_alignments"],N_)
for k in F.keys():
if k not in F_: F_[k] = F[k]
return F_
else:
return F
def parse_results(prediction_result, processed_feature_dict, r, t, num_res):
'''parse results and convert to numpy arrays'''
to_np = lambda a: np.asarray(a)
def class_to_np(c):
class dict2obj():
def __init__(self, d):
for k,v in d.items(): setattr(self, k, to_np(v))
return dict2obj(c.__dict__)
dist_bins = jax.numpy.append(0,prediction_result["distogram"]["bin_edges"])
dist_logits = prediction_result["distogram"]["logits"][:num_res,:][:,:num_res]
dist_mtx = dist_bins[dist_logits.argmax(-1)]
contact_mtx = jax.nn.softmax(dist_logits)[:,:,dist_bins < 8].sum(-1)
b_factors = prediction_result['plddt'][:,None] * prediction_result['structure_module']['final_atom_mask']
p = protein.from_prediction(processed_feature_dict, prediction_result, b_factors=b_factors)
plddt = prediction_result['plddt'][:num_res]
out = {"unrelaxed_protein": class_to_np(p),
"plddt": to_np(plddt),
"pLDDT": to_np(plddt.mean()),
"dists": to_np(dist_mtx),
"adj": to_np(contact_mtx),
"recycles":to_np(r),
"tol":to_np(t)}
if "ptm" in prediction_result:
out["pae"] = to_np(prediction_result['predicted_aligned_error'][:num_res,:][:,:num_res])
out["pTMscore"] = to_np(prediction_result['ptm'])
return out
num_res = len(feature_dict["residue_index"])
# if [opt]ions not defined
if opt is None:
opt = OPT_DEFAULT.copy()
opt["N"] = len(feature_dict["msa"])
opt["L"] = num_res
else:
for k in OPT_DEFAULT.keys():
if k not in opt: opt[k] = OPT_DEFAULT[k]
model_names = ['model_1', 'model_2', 'model_3', 'model_4', 'model_5'][:num_models]
total = len(model_names) * num_samples
outs = {}
def do_report(key):
o = outs[key]
if verbose:
line = f"{key} recycles:{o['recycles']} tol:{o['tol']:.2f} pLDDT:{o['pLDDT']:.2f}"
if 'pTMscore' in o:
line += f" pTMscore:{o['pTMscore']:.2f}"
print(line)
if show_images:
fig = cf.plot_protein(o['unrelaxed_protein'], Ls=feature_dict["Ls"], dpi=100)
plt.show()
tmp_pdb_path = os.path.join(feature_dict["output_dir"],f'unranked_{key}_unrelaxed.pdb')
pdb_lines = protein.to_pdb(o['unrelaxed_protein'])
with open(tmp_pdb_path, 'w') as f: f.write(pdb_lines)
disable_tqdm = not verbose
with tqdm.notebook.tqdm(total=total, bar_format=TQDM_BAR_FORMAT, disable=disable_tqdm) as pbar:
if opt["use_turbo"]:
if runner is None:
runner = prep_model_runner(opt,params_loc=params_loc)
# go through each random_seed
for seed in range(num_samples):
# prep input features
feat = do_subsample_msa(feature_dict, random_seed=seed) if subsample_msa else feature_dict
processed_feature_dict = runner["model"].process_features(feat, random_seed=seed)
if pad_feats:
processed_feature_dict = make_fixed_size(processed_feature_dict, runner)
# go through each model
for num, model_name in enumerate(model_names):
name = model_name+"_ptm" if opt["use_ptm"] else model_name
key = f"{name}_seed_{seed}"
pbar.set_description(f'Running {key}')
# replace model parameters
params = data.get_model_haiku_params(name, params_loc)
for k in runner["model"].params.keys():
runner["model"].params[k] = params[k]
# predict
prediction_result, (r, t) = runner["model"].predict(processed_feature_dict, random_seed=seed)
outs[key] = parse_results(prediction_result, processed_feature_dict, r=r, t=t, num_res=num_res)
# cleanup
del prediction_result, params, r, t
# report
do_report(key)
pbar.update(n=1)
# cleanup
del processed_feature_dict
if subsample_msa: del feat
else:
# go through each model
for num, model_name in enumerate(model_names):
name = model_name+"_ptm" if opt["use_ptm"] else model_name
model_runner = prep_model_runner(opt, model_name=model_name, params_loc=params_loc)["model"]
# go through each random_seed
for seed in range(num_samples):
key = f"{name}_seed_{seed}"
pbar.set_description(f'Running {key}')
processed_feature_dict = model_runner.process_features(feature_dict, random_seed=seed)
# predict
prediction_result, (r, t) = model_runner.predict(processed_feature_dict, random_seed=seed)
outs[key] = parse_results(prediction_result, processed_feature_dict, r=r, t=t, num_res=num_res)
# cleanup
del processed_feature_dict, prediction_result, r, t
# report
do_report(key)
pbar.update(n=1)
# cleanup
del model_runner
# Find the best model according to the mean pLDDT.
model_rank = list(outs.keys())
model_rank = [model_rank[i] for i in np.argsort([outs[x][rank_by] for x in model_rank])[::-1]]
# Write out the prediction
for n,key in enumerate(model_rank):
prefix = f"rank_{n+1}_{key}"
pred_output_path = os.path.join(feature_dict["output_dir"],f'{prefix}_unrelaxed.pdb')
fig = cf.plot_protein(outs[key]["unrelaxed_protein"], Ls=feature_dict["Ls"], dpi=200)
plt.savefig(os.path.join(feature_dict["output_dir"],f'{prefix}.png'), bbox_inches = 'tight')
plt.close(fig)
pdb_lines = protein.to_pdb(outs[key]["unrelaxed_protein"])
with open(pred_output_path, 'w') as f:
f.write(pdb_lines)
tmp_pdb_path = os.path.join(feature_dict["output_dir"],f'unranked_{key}_unrelaxed.pdb')
if os.path.isfile(tmp_pdb_path):
os.remove(tmp_pdb_path)
############################################################
if verbose:
print(f"model rank based on {rank_by}")
for n,key in enumerate(model_rank):
print(f"rank_{n+1}_{key} {rank_by}:{outs[key][rank_by]:.2f}")
return outs, model_rank
| ColabFold-main | beta/colabfold_alphafold.py |
import pytest
from colabfold.batch import get_queries
def test_get_queries_fasta_dir(pytestconfig, caplog):
dir_path = pytestconfig.rootpath.joinpath("test-data/batch/input")
queries, is_complex = get_queries(dir_path)
assert queries == [("5AWL_1", "YYDPETGTWY", None), ("6A5J", "IKKILSKIKKLLK", None)]
assert not is_complex
assert caplog.messages == [f"{dir_path}/empty.fasta is empty"]
def test_get_queries_empty_a3m(pytestconfig, caplog):
with pytest.raises(ValueError, match="a3m/empty.a3m is empty"):
get_queries(pytestconfig.rootpath.joinpath("test-data/a3m/empty.a3m"))
assert caplog.messages == []
def test_get_queries_csv(pytestconfig, caplog, tmp_path):
queries, is_complex = get_queries(
pytestconfig.rootpath.joinpath("test-data/complex/input.csv")
)
assert queries == [
(
"3G5O_A_3G5O_B",
[
"MRILPISTIKGKLNEFVDAVSSTQDQITITKNGAPAAVLVGADEWESLQETLYWLAQPGIRESIAEADADIASGRTYGEDEIRAEFGVPRRPH",
"MPYTVRFTTTARRDLHKLPPRILAAVVEFAFGDLSREPLRVGKPLRRELAGTFSARRGTYRLLYRIDDEHTTVVILRVDHRADIYRR",
],
None,
),
("5AWL_1", "YYDPETGTWY", None),
]
assert is_complex
assert caplog.messages == []
def test_a3m_input(pytestconfig, caplog, tmp_path):
queries, is_complex = get_queries(pytestconfig.rootpath.joinpath("test-data/a3m"))
assert queries == [
("5AWL1", "YYDPETGTWY", [">101\nYYDPETGTWY"]),
("6A5J", "IKKILSKIKKLLK", [">101\nIKKILSKIKKLLK\n>101\nIKKILSKIKKLLK"]),
]
assert not is_complex
queries, is_complex = get_queries(
pytestconfig.rootpath.joinpath("test-data/a3m/6A5J.a3m")
)
assert queries == [
("6A5J", "IKKILSKIKKLLK", [">101\nIKKILSKIKKLLK\n>101\nIKKILSKIKKLLK"])
]
assert not is_complex
assert caplog.messages == [
f"{pytestconfig.rootpath}/test-data/a3m/empty.a3m is empty"
]
| ColabFold-main | tests/test_utils.py |
import json
from pathlib import Path
if __name__ == "__main__":
for notebook in Path(".").rglob("*.ipynb"):
print(notebook)
data = json.loads(open(notebook).read())
open(notebook, "w").write(json.dumps(data, indent=2, ensure_ascii=False))
| ColabFold-main | tests/reindent_ipynb.py |
from unittest import mock
import haiku
import logging
import pytest
import re
from absl import logging as absl_logging
from functools import lru_cache
from zipfile import ZipFile
from alphafold.model.data import get_model_haiku_params
from alphafold.model.tf import utils
from colabfold.batch import msa_to_str, unserialize_msa, get_queries
from colabfold.batch import run
from colabfold.download import download_alphafold_params
from tests.mock import MockRunModel, MMseqs2Mock
# Without this, we're reading the params each time again which is slow
@lru_cache(maxsize=None)
def get_model_haiku_params_cached(model_name: str, data_dir: str) -> haiku.Params:
return get_model_haiku_params(model_name, data_dir)
@pytest.fixture
def prediction_test(caplog):
caplog.set_level(logging.INFO)
# otherwise jax will tell us about its search for devices
absl_logging.set_verbosity("error")
# We'll also want to mock that out later
download_alphafold_params("AlphaFold2-multimer-v1")
download_alphafold_params("AlphaFold2-ptm")
# alphafold uses a method called `make_random_seed`, which deterministically starts with a seed
# of zero and increases it by one for each protein. This means the input features would become
# dependent on the number and order of tests. Here we just reset the seed to 0
utils.seed_maker = utils.SeedMaker()
# This works because it's used as `data.get_model_haiku_params`
with mock.patch(
"alphafold.model.data.get_model_haiku_params", get_model_haiku_params_cached
):
yield
def test_batch(pytestconfig, caplog, tmp_path, prediction_test):
queries = [("5AWL_1", "YYDPETGTWY", None), ("6A5J", "IKKILSKIKKLLK", None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/batch"), ["5AWL_1", "6A5J"]
)
mock_run_mmseqs = MMseqs2Mock(pytestconfig.rootpath, "batch").mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs):
run(
queries,
tmp_path,
num_models=1,
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=False,
)
messages = [re.sub(r"\d+\.\d+s", "0.0s", i) for i in caplog.messages]
assert messages[1:-1] == [
"Found 5 citations for tools or databases",
"Query 1/2: 5AWL_1 (length 10)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 94.3",
"reranking models by plddt",
"Query 2/2: 6A5J (length 13)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 90.8",
"reranking models by plddt",
]
# Very simple test, it would be better to check coordinates
assert (
len(
tmp_path.joinpath("5AWL_1_unrelaxed_rank_1_model_1.pdb")
.read_text()
.splitlines()
)
== 96
)
assert (
len(
tmp_path.joinpath("6A5J_unrelaxed_rank_1_model_1.pdb")
.read_text()
.splitlines()
)
== 112
)
assert tmp_path.joinpath("config.json").is_file()
def test_zip(pytestconfig, caplog, tmp_path, prediction_test):
queries = [("5AWL_1", "YYDPETGTWY", None), ("6A5J", "IKKILSKIKKLLK", None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/batch"), ["5AWL_1", "6A5J"]
)
mock_run_mmseqs = MMseqs2Mock(pytestconfig.rootpath, "batch").mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs):
run(
queries,
tmp_path,
num_models=1,
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=False,
zip_results=True,
)
# Ensure that the correct files are packaged and that they do not contain the dir prefix
expect_zip = [
"cite.bibtex",
"config.json",
"5AWL_1_predicted_aligned_error_v1.json",
"5AWL_1.a3m",
"5AWL_1_PAE.png",
"5AWL_1_coverage.png",
"5AWL_1_plddt.png",
"5AWL_1_unrelaxed_rank_1_model_1.pdb",
"5AWL_1_unrelaxed_rank_1_model_1_scores.json",
]
with ZipFile(tmp_path.joinpath("5AWL_1.result.zip")) as result_zip:
actual_zip = [i.filename for i in result_zip.infolist()]
assert expect_zip == actual_zip
def test_single_sequence(pytestconfig, caplog, tmp_path, prediction_test):
queries = [("5AWL_1", "YYDPETGTWY", None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/batch"), ["5AWL_1"]
)
mock_run_mmseqs = MMseqs2Mock(pytestconfig.rootpath, "batch").mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs):
run(
queries,
tmp_path,
msa_mode="single_sequence",
num_models=1,
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=False,
stop_at_score=100,
)
messages = [re.sub(r"\d+\.\d+s", "0.0s", i) for i in caplog.messages]
assert messages[1:-1] == [
"Found 2 citations for tools or databases",
"Query 1/1: 5AWL_1 (length 10)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 94.3",
"reranking models by plddt",
]
# Very simple test, it would be better to check coordinates
assert (
len(
tmp_path.joinpath("5AWL_1_unrelaxed_rank_1_model_1.pdb")
.read_text()
.splitlines()
)
== 96
)
assert tmp_path.joinpath("config.json").is_file()
def test_complex(pytestconfig, caplog, tmp_path, prediction_test):
pdb_3g50_A = "MRILPISTIKGKLNEFVDAVSSTQDQITITKNGAPAAVLVGADEWESLQETLYWLAQPGIRESIAEADADIASGRTYGEDEIRAEFGVPRRPH"
pdb_3g50_B = "MPYTVRFTTTARRDLHKLPPRILAAVVEFAFGDLSREPLRVGKPLRRELAGTFSARRGTYRLLYRIDDEHTTVVILRVDHRADIYRR"
queries = [("3G5O_A_3G5O_B", [pdb_3g50_A, pdb_3g50_B], None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/complex"), ["3G5O_A_3G5O_B"]
)
mock_run_mmseqs2 = MMseqs2Mock(pytestconfig.rootpath, "complex").mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs2):
run(
queries,
tmp_path,
num_models=1,
model_type="AlphaFold2-multimer-v1",
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=True,
stop_at_score=100,
)
messages = [re.sub(r"\d+\.\d+s", "0.0s", i) for i in caplog.messages]
assert messages[1:-1] == [
"Found 5 citations for tools or databases",
"Query 1/1: 3G5O_A_3G5O_B (length 180)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 94.4 and ptmscore 0.884",
"reranking models by multimer",
]
def test_complex_ptm(pytestconfig, caplog, tmp_path, prediction_test):
pdb_3g50_A = "MRILPISTIKGKLNEFVDAVSSTQDQITITKNGAPAAVLVGADEWESLQETLYWLAQPGIRESIAEADADIASGRTYGEDEIRAEFGVPRRPH"
pdb_3g50_B = "MPYTVRFTTTARRDLHKLPPRILAAVVEFAFGDLSREPLRVGKPLRRELAGTFSARRGTYRLLYRIDDEHTTVVILRVDHRADIYRR"
queries = [("3G5O_A_3G5O_B", [pdb_3g50_A, pdb_3g50_B], None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/complex_ptm"), ["3G5O_A_3G5O_B"]
)
mock_run_mmseqs2 = MMseqs2Mock(pytestconfig.rootpath, "complex").mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs2):
run(
queries,
tmp_path,
model_type="AlphaFold2-ptm",
num_models=1,
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=True,
stop_at_score=100,
)
messages = [re.sub(r"\d+\.\d+s", "0.0s", i) for i in caplog.messages]
assert messages[1:-1] == [
"Found 5 citations for tools or databases",
"Query 1/1: 3G5O_A_3G5O_B (length 180)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 91.9 and ptmscore 0.846",
"reranking models by ptmscore",
]
def test_complex_monomer_ptm(pytestconfig, caplog, tmp_path, prediction_test):
A = "PIAQIHILEGRSDEQKETLIREVSEAISRSLDAPLTSVRVIITEMAKGHFGIGGELASK"
queries = [("A_A", [A, A], None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/complex_monomer_ptm"), ["A_A"]
)
mock_run_mmseqs2 = MMseqs2Mock(
pytestconfig.rootpath, "complex_monomer"
).mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs2):
run(
queries,
tmp_path,
model_type="AlphaFold2-ptm",
num_models=1,
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=True,
stop_at_score=100,
)
messages = [re.sub(r"\d+\.\d+s", "0.0s", i) for i in caplog.messages]
assert messages[1:-1] == [
"Found 5 citations for tools or databases",
"Query 1/1: A_A (length 118)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 95.5 and ptmscore 0.867",
"reranking models by ptmscore",
]
def test_complex_monomer(pytestconfig, caplog, tmp_path, prediction_test):
A = "PIAQIHILEGRSDEQKETLIREVSEAISRSLDAPLTSVRVIITEMAKGHFGIGGELASK"
queries = [("A_A", [A, A], None)]
mock_run_model = MockRunModel(
pytestconfig.rootpath.joinpath("test-data/complex_monomer"), ["A_A"]
)
mock_run_mmseqs2 = MMseqs2Mock(
pytestconfig.rootpath, "complex_monomer"
).mock_run_mmseqs2
with mock.patch(
"alphafold.model.model.RunModel.predict",
lambda model_runner, feat: mock_run_model.predict(model_runner, feat),
), mock.patch("colabfold.batch.run_mmseqs2", mock_run_mmseqs2):
run(
queries,
tmp_path,
num_models=1,
model_type="AlphaFold2-multimer-v1",
num_recycles=3,
model_order=[1, 2, 3, 4, 5],
is_complex=True,
stop_at_score=100,
)
messages = [re.sub(r"\d+\.\d+s", "0.0s", i) for i in caplog.messages]
assert messages[1:-1] == [
"Found 5 citations for tools or databases",
"Query 1/1: A_A (length 118)",
"Running model_1",
"model_1 took 0.0s (3 recycles) with pLDDT 95.3 and ptmscore 0.865",
"reranking models by multimer",
]
def test_msa_serialization(pytestconfig):
# heteromer
unpaired_alignment = [
">101\nAAAAAAAA\n>UP1\nAACCcccVVAA\n",
">102\nCCCC\n>UP1\nCCCC\n>UP2\nCaCaCC\n",
]
paired_alignment = [">101\nAAAAAAAA\n>UP1\nVVaVVAAAA\n", ">102\nCCCC\n>UP2\nGGGG\n"]
query_sequence = ["AAAAAAAA", "AAAAAAAA", "CCCC"]
query_sequence_unique = ["AAAAAAAA", "CCCC"]
query_sequence_cardinality = [2, 1]
msa = msa_to_str(
unpaired_alignment,
paired_alignment,
query_sequence_unique,
query_sequence_cardinality,
)
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([msa], query_sequence)
assert unpaired_alignment_ret == unpaired_alignment
assert paired_alignment_ret == paired_alignment
assert query_sequence_unique_ret == query_sequence_unique
assert query_sequence_cardinality == query_sequence_cardinality_ret
# heteromer three complex
unpaired_alignment = [
">101\nAAAAAAAA\n>UP1\nAACCcccVVAA\n",
">102\nCCCC\n>UP1\nCCCC\n>UP2\nCaCaCC\n",
">103\nGGGG\n>UP1\nR--R\n",
">104\nW\n",
]
paired_alignment = [
">101\nAAAAAAAA\n>UP1\nVVaVVAAAA\n",
">102\nCCCC\n>UP2\nGGGG\n",
">103\nGGGG\n>UP3\nGGgGG\n",
">104\nW\n>UP4\nW\n",
]
query_sequence = ["AAAAAAAA", "CCCC", "GGGG", "W", "W"]
query_sequence_unique = ["AAAAAAAA", "CCCC", "GGGG", "W"]
query_sequence_cardinality = [1, 1, 1, 2]
msa = msa_to_str(
unpaired_alignment,
paired_alignment,
query_sequence_unique,
query_sequence_cardinality,
)
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([msa], query_sequence)
assert unpaired_alignment_ret == unpaired_alignment
assert paired_alignment_ret == paired_alignment
assert query_sequence_unique_ret == query_sequence_unique
assert query_sequence_cardinality == query_sequence_cardinality_ret
# heteromer with unpaired
unpaired_alignment = [
">101\nAAAAAAAA\n>UP1\nAACCcccVVAA\n",
">102\nCCCC\n>UP1\nCCCC\n>UP2\nCaCaCC\n",
]
paired_alignment = [">101\nAAAAAAAA\n", ">102\nCCCC\n"]
query_sequence = ["AAAAAAAA", "CCCC", "CCCC"]
query_sequence_unique = ["AAAAAAAA", "CCCC"]
query_sequence_cardinality = [1, 2]
msa = msa_to_str(
unpaired_alignment,
paired_alignment,
query_sequence_unique,
query_sequence_cardinality,
)
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([msa], query_sequence)
assert unpaired_alignment_ret == unpaired_alignment
assert paired_alignment_ret == paired_alignment
assert query_sequence_unique_ret == query_sequence_unique
assert query_sequence_cardinality == query_sequence_cardinality_ret
# homooligomer
unpaired_alignment = [">101\nAAAAAAAA\n>UP2\nAAAVVAAA\n>UP1\nA-CCcccVV-A\n"]
paired_alignment = [">101\nAAAAAAAA\n", ">102\nAAAAAAAA\n"]
query_sequence = ["AAAAAAAA", "AAAAAAAA"]
query_sequence_unique = ["AAAAAAAA"]
query_sequence_cardinality = [2]
msa = msa_to_str(
unpaired_alignment,
paired_alignment,
query_sequence_unique,
query_sequence_cardinality,
)
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([msa], query_sequence)
assert unpaired_alignment_ret == unpaired_alignment
assert paired_alignment_ret == paired_alignment
assert query_sequence_unique_ret == query_sequence_unique
assert query_sequence_cardinality == query_sequence_cardinality_ret
# a3m without header
unpaired_alignment = ">101\nAAAAAAAA\n>UP2\nAAAVVAAA\n>UP1\nA-CCcccVV-A"
paired_alignment = None
query_sequence = "AAAAAAAA"
query_sequence_unique = ["AAAAAAAA"]
query_sequence_cardinality = [1]
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([unpaired_alignment], query_sequence)
assert unpaired_alignment_ret == [unpaired_alignment]
assert paired_alignment_ret is None
assert query_sequence_unique_ret == query_sequence_unique
assert query_sequence_cardinality == query_sequence_cardinality_ret
msa = "#10\t1\n>101\nYYDPETGTWY"
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([msa], "YYDPETGTWY")
assert unpaired_alignment_ret == [">101\nYYDPETGTWY\n"]
assert paired_alignment_ret is None
assert query_sequence_unique_ret == ["YYDPETGTWY"]
assert query_sequence_cardinality == [1]
# non-complex a3m files
a3m_file = pytestconfig.rootpath.joinpath("test-data/a3m/5AWL1.a3m")
[(_, query_sequence, _)], is_complex = get_queries(a3m_file)
assert not is_complex
msa = a3m_file.read_text()
(
unpaired_alignment_ret,
paired_alignment_ret,
query_sequence_unique_ret,
query_sequence_cardinality_ret,
template,
) = unserialize_msa([msa], query_sequence)
assert unpaired_alignment_ret
assert not paired_alignment
assert query_sequence_unique_ret == [query_sequence]
assert query_sequence_cardinality_ret == [1]
| ColabFold-main | tests/test_colabfold.py |
from unittest import mock
from colabfold.batch import get_msa_and_templates
from tests.mock import MMseqs2Mock
def test_get_msa_and_templates(pytestconfig, caplog, tmp_path):
Q60262 = "MEIIALLIEEGIIIIKDKKVAERFLKDLESSQGMDWKEIRERAERAKKQLEEGIEWAKKTKL"
for msa_mode, tag, lines in [
("MMseqs2 (UniRef+Environmental)", "uniref_env", 12),
("MMseqs2 (UniRef only)", "uniref", 8),
("single_sequence", "single_sequence", 2),
]:
mmseqs2mock = MMseqs2Mock(pytestconfig.rootpath, f"get_msa_{tag}")
with mock.patch("colabfold.batch.run_mmseqs2", mmseqs2mock.mock_run_mmseqs2):
(
unpaired_msa,
paired_msa,
query_seqs_unique,
query_seqs_cardinality,
template_features,
) = get_msa_and_templates(
"test",
Q60262,
tmp_path,
msa_mode,
False,
None,
"unpaired+paired",
)
assert len(unpaired_msa[0].splitlines()) == lines
assert paired_msa is None
assert query_seqs_unique == [Q60262]
assert query_seqs_cardinality == [1]
assert caplog.messages == []
| ColabFold-main | tests/test_msa.py |
ColabFold-main | tests/__init__.py |
|
import json
import lzma
import os
import pickle
from pathlib import Path
from typing import List, Tuple, Mapping, Any, Dict
import numpy
from alphafold.model.features import FeatureDict
from alphafold.model.model import RunModel
from colabfold.colabfold import run_mmseqs2
# Copy the original method before mocking
original_run_model = RunModel.predict
class MockRunModel:
"""Mocks FeatureDict -> prediction
The class is stateful, i.e. predictions need to be done in the given order
msa_feat is a) large and b) has some variance between machines, so we ignore it
"""
fixture_dir: Path
predictions: List[str]
pos: int
def __init__(self, fixture_dir: Path, predictions: List[str]):
self.fixture_dir = fixture_dir
self.predictions = predictions
self.pos = 0
def predict(
self, model_runner: RunModel, feat: FeatureDict
) -> Tuple[Mapping[str, Any], Tuple[Any, Any]]:
"""feat["msa"] or feat["msa_feat"] for normal/complexes is non-deterministic, so we remove it before storing,
but we keep it for predicting or returning, where we need it for plotting"""
feat_no_msa = dict(feat)
if "msa_feat" in feat_no_msa.keys():
del feat_no_msa["msa_feat"]
elif "msa" in feat_no_msa.keys():
del feat_no_msa["msa"]
else:
raise AssertionError("neither msa nor msa_feat in feat")
prediction_file = self.fixture_dir.joinpath(
self.predictions[self.pos]
).joinpath("model_prediction_result.pkl.xz")
input_fix_file = self.fixture_dir.joinpath(self.predictions[self.pos]).joinpath(
"model_input_fix.pkl.xz"
)
self.pos += 1
if (
not prediction_file.is_file() or not input_fix_file.is_file()
) and os.environ.get("UPDATE_SNAPSHOTS"):
print("Running new prediction")
with lzma.open(input_fix_file) as fp:
pickle.dump(feat_no_msa, fp)
prediction, (_, _) = original_run_model(model_runner, feat)
del prediction["distogram"]
del prediction["experimentally_resolved"]
del prediction["masked_msa"]
del prediction["aligned_confidence_probs"]
with lzma.open(prediction_file) as fp:
pickle.dump(prediction, fp)
with lzma.open(input_fix_file) as input_fix_fp:
input_fix = pickle.load(input_fix_fp)
with lzma.open(prediction_file) as prediction_fp:
prediction = pickle.load(prediction_fp)
is_same = True
for key in input_fix:
if (
key not in feat_no_msa
or feat_no_msa[key].shape != input_fix[key].shape
or not numpy.allclose(feat_no_msa[key], input_fix[key])
):
is_same = False
break
if is_same:
return prediction, (3, 0)
if os.environ.get("UPDATE_SNAPSHOTS"):
print("Running new prediction")
with lzma.open(input_fix_file, "wb") as fp:
pickle.dump(feat_no_msa, fp)
prediction, (_, _) = original_run_model(model_runner, feat)
with lzma.open(prediction_file, "wb") as fp:
pickle.dump(prediction, fp)
return prediction, (None, None)
else:
for key in input_fix:
# Generate a more helpful error message
assert feat_no_msa[key].shape != input_fix[
key
].shape and numpy.allclose(feat_no_msa[key], input_fix[key]), key
class MMseqs2Mock:
"""Mocks out the call to the mmseqs2 api
Each test has its own json file which contains the run_mmseqs2 input data in the
config field and the saved response. To update responses or to add new tests,
set the UPDATE_SNAPSHOTS env var (e.g. `UPDATE_SNAPSHOTS=1 pytest`
"""
data_file: Path
saved_responses: List[Dict[str, Any]]
def __init__(self, rootpath: Path, name: str):
self.data_file = (
rootpath.joinpath("test-data/mmseqs-api-reponses")
.joinpath(name)
.with_suffix(".json")
)
if os.environ.get("UPDATE_SNAPSHOTS") and not self.data_file.is_file():
self.data_file.write_text("[]")
with self.data_file.open() as fp:
self.saved_responses = []
for saved_response in json.load(fp):
# Join lines we've split before
response = join_lines(saved_response["response"])
self.saved_responses.append(
{"config": saved_response["config"], "response": response}
)
def mock_run_mmseqs2(
self,
query,
prefix,
use_env=True,
use_filter=True,
use_templates=False,
filter=None,
use_pairing=False,
host_url="https://a3m.mmseqs.com",
):
assert prefix
config = {
"query": query,
"use_env": use_env,
"use_filter": use_filter,
"use_templates": use_templates,
"filter": filter,
"use_pairing": use_pairing,
}
for saved_response in self.saved_responses:
if saved_response["config"] == config:
return saved_response["response"]
if os.environ.get("UPDATE_SNAPSHOTS"):
print(f"\nrun_mmseqs2 with {config}")
response = run_mmseqs2(
x=config["query"],
prefix=prefix,
use_env=config["use_env"],
use_filter=config["use_filter"],
use_templates=config["use_templates"],
filter=config["filter"],
use_pairing=config["use_pairing"],
host_url=host_url,
)
# Split lines so we get a readable json file
response = split_lines(response)
self.saved_responses.append({"config": config, "response": response})
self.data_file.write_text(json.dumps(self.saved_responses, indent=2))
else:
assert False, config
def split_lines(x):
"""Split each files into a list of lines"""
if isinstance(x, list):
return [split_lines(i) for i in x]
elif isinstance(x, str):
return x.splitlines()
else:
raise TypeError(f"{type(x)} {str(x)[:20]}")
def join_lines(x):
"""Inverse of split_lines"""
if all(isinstance(i, str) for i in x):
return "\n".join(x)
elif all(isinstance(i, list) for i in x):
return [join_lines(i) for i in x]
else:
raise TypeError(f"{[type(i) for i in x]} {str(x)[:20]}")
| ColabFold-main | tests/mock.py |
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
citations = {
"Mirdita2021": """@article{Mirdita2021,
author= {Mirdita, Milot and Schütze, Konstantin and Moriwaki, Yoshitaka and Heo, Lim and Ovchinnikov, Sergey and Steinegger, Martin },
doi = {10.1101/2021.08.15.456425v2},
journal = {bioRxiv},
title = {{ColabFold - Making Protein folding accessible to all}},
year = {2021},
comment = {ColabFold including MMseqs2 MSA server}
}""",
"Mitchell2019": """@article{Mitchell2019,
author = {Mitchell, Alex L and Almeida, Alexandre and Beracochea, Martin and Boland, Miguel and Burgin, Josephine and Cochrane, Guy and Crusoe, Michael R and Kale, Varsha and Potter, Simon C and Richardson, Lorna J and Sakharova, Ekaterina and Scheremetjew, Maxim and Korobeynikov, Anton and Shlemov, Alex and Kunyavskaya, Olga and Lapidus, Alla and Finn, Robert D},
doi = {10.1093/nar/gkz1035},
journal = {Nucleic Acids Res.},
title = {{MGnify: the microbiome analysis resource in 2020}},
year = {2019},
comment = {MGnify database}
}""",
"Eastman2017": """@article{Eastman2017,
author = {Eastman, Peter and Swails, Jason and Chodera, John D. and McGibbon, Robert T. and Zhao, Yutong and Beauchamp, Kyle A. and Wang, Lee-Ping and Simmonett, Andrew C. and Harrigan, Matthew P. and Stern, Chaya D. and Wiewiora, Rafal P. and Brooks, Bernard R. and Pande, Vijay S.},
doi = {10.1371/journal.pcbi.1005659},
journal = {PLOS Comput. Biol.},
number = {7},
title = {{OpenMM 7: Rapid development of high performance algorithms for molecular dynamics}},
volume = {13},
year = {2017},
comment = {Amber relaxation}
}""",
"Jumper2021": """@article{Jumper2021,
author = {Jumper, John and Evans, Richard and Pritzel, Alexander and Green, Tim and Figurnov, Michael and Ronneberger, Olaf and Tunyasuvunakool, Kathryn and Bates, Russ and {\v{Z}}{\'{i}}dek, Augustin and Potapenko, Anna and Bridgland, Alex and Meyer, Clemens and Kohl, Simon A. A. and Ballard, Andrew J. and Cowie, Andrew and Romera-Paredes, Bernardino and Nikolov, Stanislav and Jain, Rishub and Adler, Jonas and Back, Trevor and Petersen, Stig and Reiman, David and Clancy, Ellen and Zielinski, Michal and Steinegger, Martin and Pacholska, Michalina and Berghammer, Tamas and Bodenstein, Sebastian and Silver, David and Vinyals, Oriol and Senior, Andrew W. and Kavukcuoglu, Koray and Kohli, Pushmeet and Hassabis, Demis},
doi = {10.1038/s41586-021-03819-2},
journal = {Nature},
pmid = {34265844},
title = {{Highly accurate protein structure prediction with AlphaFold.}},
year = {2021},
comment = {AlphaFold2 + BFD Database}
}""",
"Evans2021": """@article{Evans2021,
author = {Evans, Richard and O'Neill, Michael and Pritzel, Alexander and Antropova, Natasha and Senior, Andrew and Green, Tim and Zidek, Augustin and Bates, Russ and Blackwell, Sam and Yim, Jason and Ronneberger, Olaf and Bodenstein, Sebastian and Zielinski, Michal and Bridgland, Alex and Potapenko, Anna and Cowie, Andrew and Tunyasuvunakool, Kathryn and Jain, Rishub and Clancy, Ellen and Kohli, Pushmeet and Jumper, John and Hassabis, Demis},
doi = {10.1101/2021.10.04.463034v1},
journal = {bioRxiv},
title = {{Protein complex prediction with AlphaFold-Multimer}},
year = {2021},
comment = {AlphaFold2-multimer}
}""",
"Mirdita2019": """@article{Mirdita2019,
author = {Mirdita, Milot and Steinegger, Martin and S{\"{o}}ding, Johannes},
doi = {10.1093/bioinformatics/bty1057},
journal = {Bioinformatics},
number = {16},
pages = {2856--2858},
pmid = {30615063},
title = {{MMseqs2 desktop and local web server app for fast, interactive sequence searches}},
volume = {35},
year = {2019},
comment = {MMseqs2 search server}
}""",
"Steinegger2019": """@article{Steinegger2019,
author = {Steinegger, Martin and Meier, Markus and Mirdita, Milot and V{\"{o}}hringer, Harald and Haunsberger, Stephan J. and S{\"{o}}ding, Johannes},
doi = {10.1186/s12859-019-3019-7},
journal = {BMC Bioinform.},
number = {1},
pages = {473},
pmid = {31521110},
title = {{HH-suite3 for fast remote homology detection and deep protein annotation}},
volume = {20},
year = {2019},
comment = {PDB70 database}
}""",
"Mirdita2017": """@article{Mirdita2017,
author = {Mirdita, Milot and von den Driesch, Lars and Galiez, Clovis and Martin, Maria J. and S{\"{o}}ding, Johannes and Steinegger, Martin},
doi = {10.1093/nar/gkw1081},
journal = {Nucleic Acids Res.},
number = {D1},
pages = {D170--D176},
pmid = {27899574},
title = {{Uniclust databases of clustered and deeply annotated protein sequences and alignments}},
volume = {45},
year = {2017},
comment = {Uniclust30/UniRef30 database},
}""",
"Berman2003": """@misc{Berman2003,
author = {Berman, Helen and Henrick, Kim and Nakamura, Haruki},
booktitle = {Nat. Struct. Biol.},
doi = {10.1038/nsb1203-980},
number = {12},
pages = {980},
pmid = {14634627},
title = {{Announcing the worldwide Protein Data Bank}},
volume = {10},
year = {2003},
comment = {templates downloaded from wwPDB server}
}""",
}
def write_bibtex(
model: str,
use_msa: bool,
use_env: bool,
use_templates: bool,
use_amber: bool,
result_dir: Path,
bibtex_file: str = "cite.bibtex",
) -> Path:
to_cite = ["Mirdita2021"]
if model == "AlphaFold2-ptm":
to_cite += ["Jumper2021"]
if model.startswith("AlphaFold2-multimer"):
to_cite += ["Evans2021"]
if use_msa:
to_cite += ["Mirdita2019"]
if use_msa:
to_cite += ["Mirdita2017"]
if use_env:
to_cite += ["Mitchell2019"]
if use_templates:
to_cite += ["Steinegger2019"]
if use_templates:
to_cite += ["Berman2003"]
if use_amber:
to_cite += ["Eastman2017"]
bibtex_file = result_dir.joinpath(bibtex_file)
with bibtex_file.open("w") as writer:
for i in to_cite:
writer.write(citations[i])
writer.write("\n")
logger.info(f"Found {len(to_cite)} citations for tools or databases")
return bibtex_file
| ColabFold-main | colabfold/citations.py |
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
def plot_predicted_alignment_error(
jobname: str, num_models: int, outs: dict, result_dir: Path, show: bool = False
):
plt.figure(figsize=(3 * num_models, 2), dpi=100)
for n, (model_name, value) in enumerate(outs.items()):
plt.subplot(1, num_models, n + 1)
plt.title(model_name)
plt.imshow(value["pae"], label=model_name, cmap="bwr", vmin=0, vmax=30)
plt.colorbar()
plt.savefig(result_dir.joinpath(jobname + "_PAE.png"))
if show:
plt.show()
plt.close()
def plot_msa(msa, query_sequence, seq_len_list, total_seq_len, dpi=100):
# gather MSA info
prev_pos = 0
msa_parts = []
Ln = np.cumsum(np.append(0, [len for len in seq_len_list]))
for id, l in enumerate(seq_len_list):
chain_seq = np.array(query_sequence[prev_pos : prev_pos + l])
chain_msa = np.array(msa[:, prev_pos : prev_pos + l])
seqid = np.array(
[
np.count_nonzero(chain_seq == msa_line[prev_pos : prev_pos + l])
/ len(chain_seq)
for msa_line in msa
]
)
non_gaps = (chain_msa != 21).astype(float)
non_gaps[non_gaps == 0] = np.nan
msa_parts.append((non_gaps[:] * seqid[:, None]).tolist())
prev_pos += l
lines = []
lines_to_sort = []
prev_has_seq = [True] * len(seq_len_list)
for line_num in range(len(msa_parts[0])):
has_seq = [True] * len(seq_len_list)
for id in range(len(seq_len_list)):
if np.sum(~np.isnan(msa_parts[id][line_num])) == 0:
has_seq[id] = False
if has_seq == prev_has_seq:
line = []
for id in range(len(seq_len_list)):
line += msa_parts[id][line_num]
lines_to_sort.append(np.array(line))
else:
lines_to_sort = np.array(lines_to_sort)
lines_to_sort = lines_to_sort[np.argsort(-np.nanmax(lines_to_sort, axis=1))]
lines += lines_to_sort.tolist()
lines_to_sort = []
line = []
for id in range(len(seq_len_list)):
line += msa_parts[id][line_num]
lines_to_sort.append(line)
prev_has_seq = has_seq
lines_to_sort = np.array(lines_to_sort)
lines_to_sort = lines_to_sort[np.argsort(-np.nanmax(lines_to_sort, axis=1))]
lines += lines_to_sort.tolist()
# Nn = np.cumsum(np.append(0, Nn))
# lines = np.concatenate(lines, 1)
xaxis_size = len(lines[0])
yaxis_size = len(lines)
plt.figure(figsize=(8, 5), dpi=dpi)
plt.title("Sequence coverage")
plt.imshow(
lines[::-1],
interpolation="nearest",
aspect="auto",
cmap="rainbow_r",
vmin=0,
vmax=1,
origin="lower",
extent=(0, xaxis_size, 0, yaxis_size),
)
for i in Ln[1:-1]:
plt.plot([i, i], [0, yaxis_size], color="black")
# for i in Ln_dash[1:-1]:
# plt.plot([i, i], [0, lines.shape[0]], "--", color="black")
# for j in Nn[1:-1]:
# plt.plot([0, lines.shape[1]], [j, j], color="black")
plt.plot((np.isnan(lines) == False).sum(0), color="black")
plt.xlim(0, xaxis_size)
plt.ylim(0, yaxis_size)
plt.colorbar(label="Sequence identity to query")
plt.xlabel("Positions")
plt.ylabel("Sequences")
return plt
| ColabFold-main | colabfold/plot.py |
import logging
import tarfile
from pathlib import Path
import appdirs
import requests
import tqdm
logger = logging.getLogger(__name__)
# The data dir location logic switches between a version with and one without "params" because alphafold
# always internally joins "params". (We should probably patch alphafold)
default_data_dir = Path(appdirs.user_cache_dir(__package__ or "colabfold"))
def download_alphafold_params(model_type: str, data_dir: Path = default_data_dir):
params_dir = data_dir.joinpath("params")
if model_type == "AlphaFold2-multimer-v2":
url = "https://storage.googleapis.com/alphafold/alphafold_params_colab_2022-03-02.tar"
success_marker = params_dir.joinpath(
"download_complexes_multimer-v2_finished.txt"
)
elif model_type == "AlphaFold2-multimer-v1":
url = "https://storage.googleapis.com/alphafold/alphafold_params_colab_2021-10-27.tar"
success_marker = params_dir.joinpath(
"download_complexes_multimer-v1_finished.txt"
)
else:
url = "https://storage.googleapis.com/alphafold/alphafold_params_2021-07-14.tar"
success_marker = params_dir.joinpath("download_finished.txt")
if success_marker.is_file():
return
params_dir.mkdir(parents=True, exist_ok=True)
response = requests.get(url, stream=True)
file_size = int(response.headers.get("Content-Length", 0))
with tqdm.tqdm.wrapattr(
response.raw,
"read",
total=file_size,
desc=f"Downloading alphafold2 weights to {data_dir}",
) as response_raw:
# Open in stream mode ("r|"), as our requests response doesn't support seeking)
file = tarfile.open(fileobj=response_raw, mode="r|")
file.extractall(path=params_dir)
success_marker.touch()
if __name__ == "__main__":
# TODO: Arg to select which one
download_alphafold_params("AlphaFold2-multimer-v2")
download_alphafold_params("AlphaFold2-ptm")
| ColabFold-main | colabfold/download.py |
import os
from Bio.PDB import MMCIFParser
os.environ["TF_FORCE_UNIFIED_MEMORY"] = "1"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "2.0"
import json
import logging
import math
import random
import sys
import time
import zipfile
import io
from argparse import ArgumentParser
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import haiku
import importlib_metadata
import numpy as np
import pandas
from alphafold.notebooks.notebook_utils import get_pae_json
from jax.lib import xla_bridge
from numpy import ndarray
try:
import alphafold
except ModuleNotFoundError:
raise RuntimeError(
"\n\nalphafold is not installed. Please run `pip install colabfold[alphafold]`\n"
)
from alphafold.common import protein, residue_constants
from alphafold.common.protein import Protein
from alphafold.data import (
feature_processing,
msa_pairing,
pipeline,
pipeline_multimer,
templates,
)
from alphafold.data.tools import hhsearch
from alphafold.model import model
from colabfold.alphafold.models import load_models_and_params
from colabfold.alphafold.msa import make_fixed_size
from colabfold.citations import write_bibtex
from colabfold.colabfold import chain_break, plot_paes, plot_plddts, run_mmseqs2
from colabfold.download import default_data_dir, download_alphafold_params
from colabfold.plot import plot_msa
from colabfold.utils import (
ACCEPT_DEFAULT_TERMS,
DEFAULT_API_SERVER,
NO_GPU_FOUND,
get_commit,
safe_filename,
setup_logging,
)
logger = logging.getLogger(__name__)
def mk_mock_template(
query_sequence: Union[List[str], str], num_temp: int = 1
) -> Dict[str, Any]:
ln = (
len(query_sequence)
if isinstance(query_sequence, str)
else sum(len(s) for s in query_sequence)
)
output_templates_sequence = "A" * ln
output_confidence_scores = np.full(ln, 1.0)
templates_all_atom_positions = np.zeros(
(ln, templates.residue_constants.atom_type_num, 3)
)
templates_all_atom_masks = np.zeros((ln, templates.residue_constants.atom_type_num))
templates_aatype = templates.residue_constants.sequence_to_onehot(
output_templates_sequence, templates.residue_constants.HHBLITS_AA_TO_ID
)
template_features = {
"template_all_atom_positions": np.tile(
templates_all_atom_positions[None], [num_temp, 1, 1, 1]
),
"template_all_atom_masks": np.tile(
templates_all_atom_masks[None], [num_temp, 1, 1]
),
"template_sequence": [f"none".encode()] * num_temp,
"template_aatype": np.tile(np.array(templates_aatype)[None], [num_temp, 1, 1]),
"template_confidence_scores": np.tile(
output_confidence_scores[None], [num_temp, 1]
),
"template_domain_names": [f"none".encode()] * num_temp,
"template_release_date": [f"none".encode()] * num_temp,
"template_sum_probs": np.zeros([num_temp], dtype=np.float32),
}
return template_features
def mk_template(
a3m_lines: str, template_path: str, query_sequence: str
) -> Dict[str, Any]:
template_featurizer = templates.HhsearchHitFeaturizer(
mmcif_dir=template_path,
max_template_date="2100-01-01",
max_hits=20,
kalign_binary_path="kalign",
release_dates_path=None,
obsolete_pdbs_path=None,
)
hhsearch_pdb70_runner = hhsearch.HHSearch(
binary_path="hhsearch", databases=[f"{template_path}/pdb70"]
)
hhsearch_result = hhsearch_pdb70_runner.query(a3m_lines)
hhsearch_hits = pipeline.parsers.parse_hhr(hhsearch_result)
templates_result = template_featurizer.get_templates(
query_sequence=query_sequence, hits=hhsearch_hits
)
return dict(templates_result.features)
def mk_hhsearch_db(template_dir: str):
template_path = Path(template_dir)
cif_files = template_path.glob("*.cif")
pdb70_db_files = template_path.glob("pdb70*")
for f in pdb70_db_files:
os.remove(f)
with open(template_path.joinpath("pdb70_a3m.ffdata"), "w") as a3m, open(
template_path.joinpath("pdb70_cs219.ffindex"), "w"
) as cs219_index, open(
template_path.joinpath("pdb70_a3m.ffindex"), "w"
) as a3m_index, open(
template_path.joinpath("pdb70_cs219.ffdata"), "w"
) as cs219:
id = 1000000
index_offset = 0
for cif_file in cif_files:
with open(cif_file) as f:
cif_string = f.read()
cif_fh = io.StringIO(cif_string)
parser = MMCIFParser(QUIET=True)
structure = parser.get_structure("none", cif_fh)
models = list(structure.get_models())
if len(models) != 1:
raise ValueError(
f"Only single model PDBs are supported. Found {len(models)} models."
)
model = models[0]
for chain in model:
amino_acid_res = []
for res in chain:
if res.id[2] != " ":
raise ValueError(
f"PDB contains an insertion code at chain {chain.id} and residue "
f"index {res.id[1]}. These are not supported."
)
amino_acid_res.append(
residue_constants.restype_3to1.get(res.resname, "X")
)
protein_str = "".join(amino_acid_res)
a3m_str = f">{cif_file.stem}_{chain.id}\n{protein_str}\n\0"
a3m_str_len = len(a3m_str)
a3m_index.write(f"{id}\t{index_offset}\t{a3m_str_len}\n")
cs219_index.write(f"{id}\t{index_offset}\t{len(protein_str)}\n")
index_offset += a3m_str_len
a3m.write(a3m_str)
cs219.write("\n\0")
id += 1
def batch_input(
input_features: model.features.FeatureDict,
model_runner: model.RunModel,
model_name: str,
crop_len: int,
use_templates: bool,
) -> model.features.FeatureDict:
model_config = model_runner.config
eval_cfg = model_config.data.eval
crop_feats = {k: [None] + v for k, v in dict(eval_cfg.feat).items()}
# templates models
if (model_name == "model_1" or model_name == "model_2") and use_templates:
pad_msa_clusters = eval_cfg.max_msa_clusters - eval_cfg.max_templates
else:
pad_msa_clusters = eval_cfg.max_msa_clusters
max_msa_clusters = pad_msa_clusters
# let's try pad (num_res + X)
input_fix = make_fixed_size(
input_features,
crop_feats,
msa_cluster_size=max_msa_clusters, # true_msa (4, 512, 68)
extra_msa_size=5120, # extra_msa (4, 5120, 68)
num_res=crop_len, # aatype (4, 68)
num_templates=4,
) # template_mask (4, 4) second value
return input_fix
def predict_structure(
prefix: str,
result_dir: Path,
feature_dict: Dict[str, Any],
is_complex: bool,
use_templates: bool,
sequences_lengths: List[int],
crop_len: int,
model_type: str,
model_runner_and_params: List[Tuple[str, model.RunModel, haiku.Params]],
do_relax: bool = False,
rank_by: str = "auto",
random_seed: int = 0,
stop_at_score: float = 100,
prediction_callback: Callable[[Any, Any, Any, Any], Any] = None,
):
"""Predicts structure using AlphaFold for the given sequence."""
plddts, paes, ptmscore, iptmscore = [], [], [], []
max_paes = []
unrelaxed_pdb_lines = []
relaxed_pdb_lines = []
prediction_times = []
representations = []
seq_len = sum(sequences_lengths)
model_names = []
for (model_name, model_runner, params) in model_runner_and_params:
logger.info(f"Running {model_name}")
model_names.append(model_name)
# swap params to avoid recompiling
# note: models 1,2 have diff number of params compared to models 3,4,5 (this was handled on construction)
model_runner.params = params
processed_feature_dict = model_runner.process_features(
feature_dict, random_seed=random_seed
)
if not is_complex:
input_features = batch_input(
processed_feature_dict,
model_runner,
model_name,
crop_len,
use_templates,
)
else:
input_features = processed_feature_dict
start = time.time()
# The original alphafold only returns the prediction_result,
# but our patched alphafold also returns a tuple (recycles,tol)
prediction_result, recycles = model_runner.predict(input_features)
prediction_time = time.time() - start
prediction_times.append(prediction_time)
mean_plddt = np.mean(prediction_result["plddt"][:seq_len])
mean_ptm = prediction_result["ptm"]
if rank_by == "plddt":
mean_score = mean_plddt
else:
mean_score = mean_ptm
if is_complex:
logger.info(
f"{model_name} took {prediction_time:.1f}s ({recycles[0]} recycles) "
f"with pLDDT {mean_plddt:.3g} and ptmscore {mean_ptm:.3g}"
)
else:
logger.info(
f"{model_name} took {prediction_time:.1f}s ({recycles[0]} recycles) "
f"with pLDDT {mean_plddt:.3g}"
)
final_atom_mask = prediction_result["structure_module"]["final_atom_mask"]
b_factors = prediction_result["plddt"][:, None] * final_atom_mask
if is_complex and model_type == "AlphaFold2-ptm":
input_features["asym_id"] = feature_dict["asym_id"]
input_features["aatype"] = input_features["aatype"][0]
input_features["residue_index"] = input_features["residue_index"][0]
curr_residue_index = 1
res_index_array = input_features["residue_index"].copy()
res_index_array[0] = 0
for i in range(1, input_features["aatype"].shape[0]):
if (
input_features["residue_index"][i]
- input_features["residue_index"][i - 1]
) > 1:
curr_residue_index = 0
res_index_array[i] = curr_residue_index
curr_residue_index += 1
input_features["residue_index"] = res_index_array
unrelaxed_protein = protein.from_prediction(
features=input_features,
result=prediction_result,
b_factors=b_factors,
remove_leading_feature_dimension=not is_complex,
)
if prediction_callback is not None:
prediction_callback(
unrelaxed_protein, sequences_lengths, prediction_result, input_features
)
representations.append(prediction_result.get("representations", None))
unrelaxed_pdb_lines.append(protein.to_pdb(unrelaxed_protein))
plddts.append(prediction_result["plddt"][:seq_len])
ptmscore.append(prediction_result["ptm"])
if model_type.startswith("AlphaFold2-multimer"):
iptmscore.append(prediction_result["iptm"])
max_paes.append(prediction_result["max_predicted_aligned_error"].item())
paes_res = []
for i in range(seq_len):
paes_res.append(prediction_result["predicted_aligned_error"][i][:seq_len])
paes.append(paes_res)
if do_relax:
from alphafold.common import residue_constants
from alphafold.relax import relax
# Hack so that we don't need to download into the alphafold package itself
residue_constants.stereo_chemical_props_path = "stereo_chemical_props.txt"
# Remove the padding because unlike to_pdb() amber doesn't handle that
remove_padding_mask = np.array(unrelaxed_protein.atom_mask.sum(axis=-1) > 0)
unrelaxed_protein = Protein(
atom_mask=unrelaxed_protein.atom_mask[remove_padding_mask],
atom_positions=unrelaxed_protein.atom_positions[remove_padding_mask],
aatype=unrelaxed_protein.aatype[remove_padding_mask],
residue_index=unrelaxed_protein.residue_index[remove_padding_mask],
b_factors=unrelaxed_protein.b_factors[remove_padding_mask],
chain_index=unrelaxed_protein.chain_index[remove_padding_mask],
)
# Relax the prediction.
amber_relaxer = relax.AmberRelaxation(
max_iterations=0,
tolerance=2.39,
stiffness=10.0,
exclude_residues=[],
max_outer_iterations=20,
)
relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)
# TODO: Those aren't actually used in batch
relaxed_pdb_lines.append(relaxed_pdb_str)
# early stop criteria fulfilled
if mean_score > stop_at_score:
break
# rerank models based on predicted lddt
if rank_by == "ptmscore":
model_rank = np.array(ptmscore).argsort()[::-1]
elif rank_by == "multimer":
rank_array = np.array(iptmscore) * 0.8 + np.array(ptmscore) * 0.2
model_rank = rank_array.argsort()[::-1]
else:
model_rank = np.mean(plddts, -1).argsort()[::-1]
out = {}
logger.info(f"reranking models by {rank_by}")
for n, key in enumerate(model_rank):
unrelaxed_pdb_path = result_dir.joinpath(
f"{prefix}_unrelaxed_rank_{n + 1}_{model_names[key]}.pdb"
)
unrelaxed_pdb_path.write_text(unrelaxed_pdb_lines[key])
if do_relax:
relaxed_pdb_path = result_dir.joinpath(
f"{prefix}_relaxed_rank_{n + 1}_{model_names[key]}.pdb"
)
relaxed_pdb_path.write_text(relaxed_pdb_lines[key])
# Write an easy-to-use format (PAE and plDDT)
scores_file = result_dir.joinpath(
f"{prefix}_unrelaxed_rank_{n + 1}_{model_names[key]}_scores.json"
)
with scores_file.open("w") as fp:
# We use astype(np.float64) to prevent very long stringified floats from float imprecision
scores = {
"max_pae": max_paes[key],
"pae": np.around(np.asarray(paes[key]).astype(np.float64), 2).tolist(),
"plddt": np.around(np.asarray(plddts[key]), 2).tolist(),
"ptm": np.around(ptmscore[key], 2).item(),
}
json.dump(scores, fp)
out[key] = {
"plddt": np.asarray(plddts[key]),
"pae": np.asarray(paes[key]),
"max_pae": max_paes[key],
"pTMscore": ptmscore[key],
"model_name": model_names[key],
"representations": representations[key],
}
return out, model_rank
def parse_fasta(fasta_string: str) -> Tuple[List[str], List[str]]:
"""Parses FASTA string and returns list of strings with amino-acid sequences.
Arguments:
fasta_string: The string contents of a FASTA file.
Returns:
A tuple of two lists:
* A list of sequences.
* A list of sequence descriptions taken from the comment lines. In the
same order as the sequences.
"""
sequences = []
descriptions = []
index = -1
for line in fasta_string.splitlines():
line = line.strip()
if line.startswith("#"):
continue
if line.startswith(">"):
index += 1
descriptions.append(line[1:]) # Remove the '>' at the beginning.
sequences.append("")
continue
elif not line:
continue # Skip blank lines.
sequences[index] += line
return sequences, descriptions
def get_queries(
input_path: Union[str, Path], sort_queries_by: str = "length"
) -> Tuple[List[Tuple[str, str, Optional[List[str]]]], bool]:
"""Reads a directory of fasta files, a single fasta file or a csv file and returns a tuple
of job name, sequence and the optional a3m lines"""
input_path = Path(input_path)
if not input_path.exists():
raise OSError(f"{input_path} could not be found")
if input_path.is_file():
if input_path.suffix == ".csv" or input_path.suffix == ".tsv":
sep = "\t" if input_path.suffix == ".tsv" else ","
df = pandas.read_csv(input_path, sep=sep)
assert "id" in df.columns and "sequence" in df.columns
queries = [
(seq_id, sequence.upper().split(":"), None)
for seq_id, sequence in df[["id", "sequence"]].itertuples(index=False)
]
for i in range(len(queries)):
if len(queries[i][1]) == 1:
queries[i] = (queries[i][0], queries[i][1][0], None)
elif input_path.suffix == ".a3m":
(seqs, header) = parse_fasta(input_path.read_text())
if len(seqs) == 0:
raise ValueError(f"{input_path} is empty")
query_sequence = seqs[0]
# Use a list so we can easily extend this to multiple msas later
a3m_lines = [input_path.read_text()]
queries = [(input_path.stem, query_sequence, a3m_lines)]
elif input_path.suffix in [".fasta", ".faa", ".fa"]:
(sequences, headers) = parse_fasta(input_path.read_text())
queries = []
for sequence, header in zip(sequences, headers):
sequence = sequence.upper()
if sequence.count(":") == 0:
# Single sequence
queries.append((header, sequence, None))
else:
# Complex mode
queries.append((header, sequence.upper().split(":"), None))
else:
raise ValueError(f"Unknown file format {input_path.suffix}")
else:
assert input_path.is_dir(), "Expected either an input file or a input directory"
queries = []
for file in sorted(input_path.iterdir()):
if not file.is_file():
continue
if file.suffix.lower() not in [".a3m", ".fasta", ".faa"]:
logger.warning(f"non-fasta/a3m file in input directory: {file}")
continue
(seqs, header) = parse_fasta(file.read_text())
if len(seqs) == 0:
logger.error(f"{file} is empty")
continue
query_sequence = seqs[0]
if len(seqs) > 1 and file.suffix in [".fasta", ".faa", ".fa"]:
logger.warning(
f"More than one sequence in {file}, ignoring all but the first sequence"
)
if file.suffix.lower() == ".a3m":
a3m_lines = [file.read_text()]
queries.append((file.stem, query_sequence.upper(), a3m_lines))
else:
if query_sequence.count(":") == 0:
# Single sequence
queries.append((file.stem, query_sequence, None))
else:
# Complex mode
queries.append((file.stem, query_sequence.upper().split(":"), None))
# sort by seq. len
if sort_queries_by == "length":
queries.sort(key=lambda t: len(t[1]))
elif sort_queries_by == "random":
random.shuffle(queries)
is_complex = False
for job_number, (raw_jobname, query_sequence, a3m_lines) in enumerate(queries):
if isinstance(query_sequence, list):
is_complex = True
break
if a3m_lines is not None and a3m_lines[0].startswith("#"):
a3m_line = a3m_lines[0].splitlines()[0]
tab_sep_entries = a3m_line[1:].split("\t")
if len(tab_sep_entries) == 2:
query_seq_len = tab_sep_entries[0].split(",")
query_seq_len = list(map(int, query_seq_len))
query_seqs_cardinality = tab_sep_entries[1].split(",")
query_seqs_cardinality = list(map(int, query_seqs_cardinality))
is_single_protein = (
True
if len(query_seq_len) == 1 and query_seqs_cardinality[0] == 1
else False
)
if not is_single_protein:
is_complex = True
break
return queries, is_complex
def pair_sequences(
a3m_lines: List[str], query_sequences: List[str], query_cardinality: List[int]
) -> str:
a3m_line_paired = [""] * len(a3m_lines[0].splitlines())
for n, seq in enumerate(query_sequences):
lines = a3m_lines[n].splitlines()
for i, line in enumerate(lines):
if line.startswith(">"):
if n != 0:
line = line.replace(">", "\t", 1)
a3m_line_paired[i] = a3m_line_paired[i] + line
else:
a3m_line_paired[i] = a3m_line_paired[i] + line * query_cardinality[n]
return "\n".join(a3m_line_paired)
def pad_sequences(
a3m_lines: List[str], query_sequences: List[str], query_cardinality: List[int]
) -> str:
_blank_seq = [
("-" * len(seq))
for n, seq in enumerate(query_sequences)
for _ in range(query_cardinality[n])
]
a3m_lines_combined = []
pos = 0
for n, seq in enumerate(query_sequences):
for j in range(0, query_cardinality[n]):
lines = a3m_lines[n].split("\n")
for a3m_line in lines:
if len(a3m_line) == 0:
continue
if a3m_line.startswith(">"):
a3m_lines_combined.append(a3m_line)
else:
a3m_lines_combined.append(
"".join(_blank_seq[:pos] + [a3m_line] + _blank_seq[pos + 1 :])
)
pos += 1
return "\n".join(a3m_lines_combined)
def get_msa_and_templates(
jobname: str,
query_sequences: Union[str, List[str]],
result_dir: Path,
msa_mode: str,
use_templates: bool,
custom_template_path: str,
pair_mode: str,
host_url: str = DEFAULT_API_SERVER,
) -> Tuple[
Optional[List[str]], Optional[List[str]], List[str], List[int], List[Dict[str, Any]]
]:
use_env = msa_mode == "MMseqs2 (UniRef+Environmental)"
# remove duplicates before searching
query_sequences = (
[query_sequences] if isinstance(query_sequences, str) else query_sequences
)
query_seqs_unique = []
for x in query_sequences:
if x not in query_seqs_unique:
query_seqs_unique.append(x)
query_seqs_cardinality = [0] * len(query_seqs_unique)
for seq in query_sequences:
seq_idx = query_seqs_unique.index(seq)
query_seqs_cardinality[seq_idx] += 1
template_features = []
if use_templates:
a3m_lines_mmseqs2, template_paths = run_mmseqs2(
query_seqs_unique,
str(result_dir.joinpath(jobname)),
use_env,
use_templates=True,
host_url=host_url,
)
if custom_template_path is not None:
template_paths = {}
for index in range(0, len(query_seqs_unique)):
template_paths[index] = custom_template_path
if template_paths is None:
logger.info("No template detected")
for index in range(0, len(query_seqs_unique)):
template_feature = mk_mock_template(query_seqs_unique[index])
template_features.append(template_feature)
else:
for index in range(0, len(query_seqs_unique)):
if template_paths[index] is not None:
template_feature = mk_template(
a3m_lines_mmseqs2[index],
template_paths[index],
query_seqs_unique[index],
)
logger.info(
f"Sequence {index} found templates: {template_feature['template_domain_names']}"
)
else:
template_feature = mk_mock_template(query_seqs_unique[index])
logger.info(f"Sequence {index} found no templates")
template_features.append(template_feature)
else:
for index in range(0, len(query_seqs_unique)):
template_feature = mk_mock_template(query_seqs_unique[index])
template_features.append(template_feature)
if len(query_sequences) == 1:
pair_mode = "none"
if pair_mode == "none" or pair_mode == "unpaired" or pair_mode == "unpaired+paired":
if msa_mode == "single_sequence":
a3m_lines = []
num = 101
for i, seq in enumerate(query_seqs_unique):
a3m_lines.append(">" + str(num + i) + "\n" + seq)
else:
# find normal a3ms
a3m_lines = run_mmseqs2(
query_seqs_unique,
str(result_dir.joinpath(jobname)),
use_env,
use_pairing=False,
host_url=host_url,
)
else:
a3m_lines = None
if pair_mode == "paired" or pair_mode == "unpaired+paired":
# find paired a3m if not a homooligomers
if len(query_seqs_unique) > 1:
paired_a3m_lines = run_mmseqs2(
query_seqs_unique,
str(result_dir.joinpath(jobname)),
use_env,
use_pairing=True,
host_url=host_url,
)
else:
# homooligomers
num = 101
paired_a3m_lines = []
for i in range(0, query_seqs_cardinality[0]):
paired_a3m_lines.append(
">" + str(num + i) + "\n" + query_seqs_unique[0] + "\n"
)
else:
paired_a3m_lines = None
return (
a3m_lines,
paired_a3m_lines,
query_seqs_unique,
query_seqs_cardinality,
template_features,
)
def build_monomer_feature(
sequence: str, unpaired_msa: str, template_features: Dict[str, Any]
):
msa = pipeline.parsers.parse_a3m(unpaired_msa)
# gather features
return {
**pipeline.make_sequence_features(
sequence=sequence, description="none", num_res=len(sequence)
),
**pipeline.make_msa_features([msa]),
**template_features,
}
def build_multimer_feature(paired_msa: str) -> Dict[str, ndarray]:
parsed_paired_msa = pipeline.parsers.parse_a3m(paired_msa)
return {
f"{k}_all_seq": v
for k, v in pipeline.make_msa_features([parsed_paired_msa]).items()
}
def process_multimer_features(
features_for_chain: Dict[str, Dict[str, ndarray]]
) -> Dict[str, ndarray]:
all_chain_features = {}
for chain_id, chain_features in features_for_chain.items():
all_chain_features[chain_id] = pipeline_multimer.convert_monomer_features(
chain_features, chain_id
)
all_chain_features = pipeline_multimer.add_assembly_features(all_chain_features)
# np_example = feature_processing.pair_and_merge(
# all_chain_features=all_chain_features, is_prokaryote=is_prokaryote)
feature_processing.process_unmerged_features(all_chain_features)
np_chains_list = list(all_chain_features.values())
# noinspection PyProtectedMember
pair_msa_sequences = not feature_processing._is_homomer_or_monomer(np_chains_list)
chains = list(np_chains_list)
chain_keys = chains[0].keys()
updated_chains = []
for chain_num, chain in enumerate(chains):
new_chain = {k: v for k, v in chain.items() if "_all_seq" not in k}
for feature_name in chain_keys:
if feature_name.endswith("_all_seq"):
feats_padded = msa_pairing.pad_features(
chain[feature_name], feature_name
)
new_chain[feature_name] = feats_padded
new_chain["num_alignments_all_seq"] = np.asarray(
len(np_chains_list[chain_num]["msa_all_seq"])
)
updated_chains.append(new_chain)
np_chains_list = updated_chains
np_chains_list = feature_processing.crop_chains(
np_chains_list,
msa_crop_size=feature_processing.MSA_CROP_SIZE,
pair_msa_sequences=pair_msa_sequences,
max_templates=feature_processing.MAX_TEMPLATES,
)
np_example = feature_processing.msa_pairing.merge_chain_features(
np_chains_list=np_chains_list,
pair_msa_sequences=pair_msa_sequences,
max_templates=feature_processing.MAX_TEMPLATES,
)
np_example = feature_processing.process_final(np_example)
# Pad MSA to avoid zero-sized extra_msa.
np_example = pipeline_multimer.pad_msa(np_example, min_num_seq=512)
return np_example
def pair_msa(
query_seqs_unique: List[str],
query_seqs_cardinality: List[int],
paired_msa: Optional[List[str]],
unpaired_msa: Optional[List[str]],
) -> str:
if paired_msa is None and unpaired_msa is not None:
a3m_lines = pad_sequences(
unpaired_msa, query_seqs_unique, query_seqs_cardinality
)
elif paired_msa is not None and unpaired_msa is not None:
a3m_lines = (
pair_sequences(paired_msa, query_seqs_unique, query_seqs_cardinality)
+ "\n"
+ pad_sequences(unpaired_msa, query_seqs_unique, query_seqs_cardinality)
)
elif paired_msa is not None and unpaired_msa is None:
a3m_lines = pair_sequences(
paired_msa, query_seqs_unique, query_seqs_cardinality
)
else:
raise ValueError(f"Invalid pairing")
return a3m_lines
def generate_input_feature(
query_seqs_unique: List[str],
query_seqs_cardinality: List[int],
unpaired_msa: List[str],
paired_msa: List[str],
template_features: List[Dict[str, Any]],
is_complex: bool,
model_type: str,
) -> Dict[str, Any]:
input_feature = {}
if is_complex and model_type == "AlphaFold2-ptm":
a3m_lines = pair_msa(
query_seqs_unique, query_seqs_cardinality, paired_msa, unpaired_msa
)
total_sequence = ""
Ls = []
for sequence_index, sequence in enumerate(query_seqs_unique):
for cardinality in range(0, query_seqs_cardinality[sequence_index]):
total_sequence += sequence
Ls.append(len(sequence))
input_feature = build_monomer_feature(
total_sequence, a3m_lines, mk_mock_template(total_sequence)
)
input_feature["residue_index"] = chain_break(input_feature["residue_index"], Ls)
input_feature["asym_id"] = np.array(
[int(n) for n, l in enumerate(Ls) for _ in range(0, l)]
)
else:
features_for_chain = {}
chain_cnt = 0
for sequence_index, sequence in enumerate(query_seqs_unique):
for cardinality in range(0, query_seqs_cardinality[sequence_index]):
if unpaired_msa is None:
input_msa = ">" + str(101 + sequence_index) + "\n" + sequence
else:
input_msa = unpaired_msa[sequence_index]
feature_dict = build_monomer_feature(
sequence, input_msa, template_features[sequence_index]
)
if is_complex:
all_seq_features = build_multimer_feature(
paired_msa[sequence_index]
)
feature_dict.update(all_seq_features)
features_for_chain[protein.PDB_CHAIN_IDS[chain_cnt]] = feature_dict
chain_cnt += 1
# Do further feature post-processing depending on the model type.
if not is_complex:
input_feature = features_for_chain[protein.PDB_CHAIN_IDS[0]]
elif model_type.startswith("AlphaFold2-multimer"):
input_feature = process_multimer_features(features_for_chain)
return input_feature
def unserialize_msa(
a3m_lines: List[str], query_sequence: Union[List[str], str]
) -> Tuple[
Optional[List[str]],
Optional[List[str]],
List[str],
List[int],
List[Dict[str, Any]],
]:
a3m_lines = a3m_lines[0].replace("\x00", "").splitlines()
if not a3m_lines[0].startswith("#") or len(a3m_lines[0][1:].split("\t")) != 2:
assert isinstance(query_sequence, str)
return (
["\n".join(a3m_lines)],
None,
[query_sequence],
[1],
[mk_mock_template(query_sequence)],
)
if len(a3m_lines) < 3:
raise ValueError(f"Unknown file format a3m")
tab_sep_entries = a3m_lines[0][1:].split("\t")
query_seq_len = tab_sep_entries[0].split(",")
query_seq_len = list(map(int, query_seq_len))
query_seqs_cardinality = tab_sep_entries[1].split(",")
query_seqs_cardinality = list(map(int, query_seqs_cardinality))
is_homooligomer = (
True if len(query_seq_len) == 1 and query_seqs_cardinality[0] > 1 else False
)
is_single_protein = (
True if len(query_seq_len) == 1 and query_seqs_cardinality[0] == 1 else False
)
query_seqs_unique = []
prev_query_start = 0
# we store the a3m with cardinality of 1
for n, query_len in enumerate(query_seq_len):
query_seqs_unique.append(
a3m_lines[2][prev_query_start : prev_query_start + query_len]
)
prev_query_start += query_len
paired_msa = [""] * len(query_seq_len)
unpaired_msa = [""] * len(query_seq_len)
already_in = dict()
for i in range(1, len(a3m_lines), 2):
header = a3m_lines[i]
seq = a3m_lines[i + 1]
if (header, seq) in already_in:
continue
already_in[(header, seq)] = 1
has_amino_acid = [False] * len(query_seq_len)
seqs_line = []
prev_pos = 0
for n, query_len in enumerate(query_seq_len):
paired_seq = ""
curr_seq_len = 0
for pos in range(prev_pos, len(seq)):
if curr_seq_len == query_len:
prev_pos = pos
break
paired_seq += seq[pos]
if seq[pos].islower():
continue
if seq[pos] != "-":
has_amino_acid[n] = True
curr_seq_len += 1
seqs_line.append(paired_seq)
# is sequence is paired add them to output
if (
not is_single_protein
and not is_homooligomer
and sum(has_amino_acid) == len(query_seq_len)
):
header_no_faster = header.replace(">", "")
header_no_faster_split = header_no_faster.split("\t")
for j in range(0, len(seqs_line)):
paired_msa[j] += ">" + header_no_faster_split[j] + "\n"
paired_msa[j] += seqs_line[j] + "\n"
else:
for j, seq in enumerate(seqs_line):
if has_amino_acid[j]:
unpaired_msa[j] += header + "\n"
unpaired_msa[j] += seq + "\n"
if is_homooligomer:
# homooligomers
num = 101
paired_msa = [""] * query_seqs_cardinality[0]
for i in range(0, query_seqs_cardinality[0]):
paired_msa[i] = ">" + str(num + i) + "\n" + query_seqs_unique[0] + "\n"
if is_single_protein:
paired_msa = None
template_features = []
for query_seq in query_seqs_unique:
template_feature = mk_mock_template(query_seq)
template_features.append(template_feature)
return (
unpaired_msa,
paired_msa,
query_seqs_unique,
query_seqs_cardinality,
template_features,
)
def msa_to_str(
unpaired_msa: List[str],
paired_msa: List[str],
query_seqs_unique: List[str],
query_seqs_cardinality: List[int],
) -> str:
msa = "#" + ",".join(map(str, map(len, query_seqs_unique))) + "\t"
msa += ",".join(map(str, query_seqs_cardinality)) + "\n"
# build msa with cardinality of 1, it makes it easier to parse and manipulate
query_seqs_cardinality = [1 for _ in query_seqs_cardinality]
msa += pair_msa(query_seqs_unique, query_seqs_cardinality, paired_msa, unpaired_msa)
return msa
def run(
queries: List[Tuple[str, Union[str, List[str]], Optional[List[str]]]],
result_dir: Union[str, Path],
num_models: int,
num_recycles: int,
model_order: List[int],
is_complex: bool,
model_type: str = "auto",
msa_mode: str = "MMseqs2 (UniRef+Environmental)",
use_templates: bool = False,
custom_template_path: str = None,
use_amber: bool = False,
keep_existing_results: bool = True,
rank_by: str = "auto",
pair_mode: str = "unpaired+paired",
data_dir: Union[str, Path] = default_data_dir,
host_url: str = DEFAULT_API_SERVER,
stop_at_score: float = 100,
recompile_padding: float = 1.1,
recompile_all_models: bool = False,
zip_results: bool = False,
prediction_callback: Callable[[Any, Any, Any, Any], Any] = None,
save_single_representations: bool = False,
save_pair_representations: bool = False,
):
version = importlib_metadata.version("colabfold")
commit = get_commit()
if commit:
version += f" ({commit})"
logger.info(f"Running colabfold {version}")
data_dir = Path(data_dir)
result_dir = Path(result_dir)
result_dir.mkdir(exist_ok=True)
model_type = set_model_type(is_complex, model_type)
if model_type == "AlphaFold2-multimer-v1":
model_extension = "_multimer"
elif model_type == "AlphaFold2-multimer-v2":
model_extension = "_multimer_v2"
elif model_type == "AlphaFold2-ptm":
model_extension = "_ptm"
else:
raise ValueError(f"Unknown model_type {model_type}")
if rank_by == "auto":
# score complexes by ptmscore and sequences by plddt
rank_by = "plddt" if not is_complex else "ptmscore"
rank_by = (
"multimer"
if is_complex and model_type.startswith("AlphaFold2-multimer")
else rank_by
)
# Record the parameters of this run
config = {
"num_queries": len(queries),
"use_templates": use_templates,
"use_amber": use_amber,
"msa_mode": msa_mode,
"model_type": model_type,
"num_models": num_models,
"num_recycles": num_recycles,
"model_order": model_order,
"keep_existing_results": keep_existing_results,
"rank_by": rank_by,
"pair_mode": pair_mode,
"host_url": host_url,
"stop_at_score": stop_at_score,
"recompile_padding": recompile_padding,
"recompile_all_models": recompile_all_models,
"commit": get_commit(),
"version": importlib_metadata.version("colabfold"),
}
config_out_file = result_dir.joinpath("config.json")
config_out_file.write_text(json.dumps(config, indent=4))
use_env = msa_mode == "MMseqs2 (UniRef+Environmental)"
use_msa = (
msa_mode == "MMseqs2 (UniRef only)"
or msa_mode == "MMseqs2 (UniRef+Environmental)"
)
bibtex_file = write_bibtex(
model_type, use_msa, use_env, use_templates, use_amber, result_dir
)
save_representations = save_single_representations or save_pair_representations
model_runner_and_params = load_models_and_params(
num_models,
use_templates,
num_recycles,
model_order,
model_extension,
data_dir,
recompile_all_models,
stop_at_score=stop_at_score,
rank_by=rank_by,
return_representations=save_representations,
)
if custom_template_path is not None:
mk_hhsearch_db(custom_template_path)
crop_len = 0
for job_number, (raw_jobname, query_sequence, a3m_lines) in enumerate(queries):
jobname = safe_filename(raw_jobname)
# In the colab version and with --zip we know we're done when a zip file has been written
result_zip = result_dir.joinpath(jobname).with_suffix(".result.zip")
if keep_existing_results and result_zip.is_file():
logger.info(f"Skipping {jobname} (result.zip)")
continue
# In the local version we use a marker file
is_done_marker = result_dir.joinpath(jobname + ".done.txt")
if keep_existing_results and is_done_marker.is_file():
logger.info(f"Skipping {jobname} (already done)")
continue
query_sequence_len = (
len(query_sequence)
if isinstance(query_sequence, str)
else sum(len(s) for s in query_sequence)
)
logger.info(
f"Query {job_number + 1}/{len(queries)}: {jobname} (length {query_sequence_len})"
)
try:
if a3m_lines is not None:
(
unpaired_msa,
paired_msa,
query_seqs_unique,
query_seqs_cardinality,
template_features,
) = unserialize_msa(a3m_lines, query_sequence)
else:
(
unpaired_msa,
paired_msa,
query_seqs_unique,
query_seqs_cardinality,
template_features,
) = get_msa_and_templates(
jobname,
query_sequence,
result_dir,
msa_mode,
use_templates,
custom_template_path,
pair_mode,
host_url,
)
msa = msa_to_str(
unpaired_msa, paired_msa, query_seqs_unique, query_seqs_cardinality
)
result_dir.joinpath(jobname + ".a3m").write_text(msa)
except Exception as e:
logger.exception(f"Could not get MSA/templates for {jobname}: {e}")
continue
try:
input_features = generate_input_feature(
query_seqs_unique,
query_seqs_cardinality,
unpaired_msa,
paired_msa,
template_features,
is_complex,
model_type,
)
except Exception as e:
logger.exception(f"Could not generate input features {jobname}: {e}")
continue
try:
query_sequence_len_array = [
len(query_seqs_unique[i])
for i, cardinality in enumerate(query_seqs_cardinality)
for _ in range(0, cardinality)
]
if sum(query_sequence_len_array) > crop_len:
crop_len = math.ceil(sum(query_sequence_len_array) * recompile_padding)
outs, model_rank = predict_structure(
jobname,
result_dir,
input_features,
is_complex,
use_templates,
sequences_lengths=query_sequence_len_array,
crop_len=crop_len,
model_type=model_type,
model_runner_and_params=model_runner_and_params,
do_relax=use_amber,
rank_by=rank_by,
stop_at_score=stop_at_score,
prediction_callback=prediction_callback,
)
except RuntimeError as e:
# This normally happens on OOM. TODO: Filter for the specific OOM error message
logger.error(f"Could not predict {jobname}. Not Enough GPU memory? {e}")
continue
# Write representations if needed
representation_files = []
if save_representations:
for i, key in enumerate(model_rank):
out = outs[key]
model_id = i + 1
model_name = out["model_name"]
representations = out["representations"]
if save_single_representations:
single_representation = np.asarray(representations["single"])
single_filename = result_dir.joinpath(
f"{jobname}_single_repr_{model_id}_{model_name}"
)
np.save(single_filename, single_representation)
if save_pair_representations:
pair_representation = np.asarray(representations["pair"])
pair_filename = result_dir.joinpath(
f"{jobname}_pair_repr_{model_id}_{model_name}"
)
np.save(pair_filename, pair_representation)
# Write alphafold-db format (PAE)
alphafold_pae_file = result_dir.joinpath(
jobname + "_predicted_aligned_error_v1.json"
)
alphafold_pae_file.write_text(get_pae_json(outs[0]["pae"], outs[0]["max_pae"]))
num_alignment = (
int(input_features["num_alignments"])
if model_type.startswith("AlphaFold2-multimer")
else input_features["num_alignments"][0]
)
msa_plot = plot_msa(
input_features["msa"][0:num_alignment],
input_features["msa"][0],
query_sequence_len_array,
query_sequence_len,
)
coverage_png = result_dir.joinpath(jobname + "_coverage.png")
msa_plot.savefig(str(coverage_png))
msa_plot.close()
paes_plot = plot_paes(
[outs[k]["pae"] for k in model_rank], Ls=query_sequence_len_array, dpi=200
)
pae_png = result_dir.joinpath(jobname + "_PAE.png")
paes_plot.savefig(str(pae_png))
paes_plot.close()
plddt_plot = plot_plddts(
[outs[k]["plddt"] for k in model_rank], Ls=query_sequence_len_array, dpi=200
)
plddt_png = result_dir.joinpath(jobname + "_plddt.png")
plddt_plot.savefig(str(plddt_png))
plddt_plot.close()
result_files = [
bibtex_file,
config_out_file,
alphafold_pae_file,
result_dir.joinpath(jobname + ".a3m"),
pae_png,
coverage_png,
plddt_png,
*representation_files,
]
for i, key in enumerate(model_rank):
result_files.append(
result_dir.joinpath(
f"{jobname}_unrelaxed_rank_{i + 1}_{outs[key]['model_name']}.pdb"
)
)
result_files.append(
result_dir.joinpath(
f"{jobname}_unrelaxed_rank_{i + 1}_{outs[key]['model_name']}_scores.json"
)
)
if use_amber:
result_files.append(
result_dir.joinpath(
f"{jobname}_relaxed_rank_{i + 1}_{outs[key]['model_name']}.pdb"
)
)
if zip_results:
with zipfile.ZipFile(result_zip, "w") as result_zip:
for file in result_files:
result_zip.write(file, arcname=file.name)
# Delete only after the zip was successful, and also not the bibtex and config because we need those again
for file in result_files[2:]:
file.unlink()
else:
is_done_marker.touch()
logger.info("Done")
def set_model_type(is_complex: bool, model_type: str) -> str:
if model_type == "auto" and is_complex:
model_type = "AlphaFold2-multimer-v2"
elif model_type == "auto" and not is_complex:
model_type = "AlphaFold2-ptm"
return model_type
def main():
parser = ArgumentParser()
parser.add_argument(
"input",
default="input",
help="Can be one of the following: "
"Directory with fasta/a3m files, a csv/tsv file, a fasta file or an a3m file",
)
parser.add_argument("results", help="Directory to write the results to")
# Main performance parameter
parser.add_argument(
"--stop-at-score",
help="Compute models until plddt or ptmscore > threshold is reached. "
"This can make colabfold much faster by only running the first model for easy queries.",
type=float,
default=100,
)
parser.add_argument(
"--num-recycle",
help="Number of prediction cycles."
"Increasing recycles can improve the quality but slows down the prediction.",
type=int,
default=3,
)
parser.add_argument("--num-models", type=int, default=5, choices=[1, 2, 3, 4, 5])
parser.add_argument(
"--recompile-padding",
type=float,
default=1.1,
help="Whenever the input length changes, the model needs to be recompiled, which is slow. "
"We pad sequences by this factor, so we can e.g. compute sequence from length 100 to 110 without recompiling. "
"The prediction will become marginally slower for the longer input, "
"but overall performance increases due to not recompiling. "
"Set to 1 to disable.",
)
parser.add_argument("--model-order", default="3,4,5,1,2", type=str)
parser.add_argument("--host-url", default=DEFAULT_API_SERVER)
parser.add_argument("--data")
# TODO: This currently isn't actually used
parser.add_argument(
"--msa-mode",
default="MMseqs2 (UniRef+Environmental)",
choices=[
"MMseqs2 (UniRef+Environmental)",
"MMseqs2 (UniRef only)",
"single_sequence",
],
help="Using an a3m file as input overwrites this option",
)
parser.add_argument(
"--model-type",
help="predict strucutre/complex using the following model."
'Auto will pick "AlphaFold2" (ptm) for structure predictions and "AlphaFold2-multimer-v2" for complexes.',
type=str,
default="auto",
choices=[
"auto",
"AlphaFold2-ptm",
"AlphaFold2-multimer-v1",
"AlphaFold2-multimer-v2",
],
)
parser.add_argument(
"--amber",
default=False,
action="store_true",
help="Use amber for structure refinement",
)
parser.add_argument(
"--templates", default=False, action="store_true", help="Use templates from pdb"
)
parser.add_argument(
"--custom-template-path",
type=str,
default=None,
help="Directory with pdb files to be used as input",
)
parser.add_argument("--env", default=False, action="store_true")
parser.add_argument(
"--cpu",
default=False,
action="store_true",
help="Allow running on the cpu, which is very slow",
)
parser.add_argument(
"--rank",
help="rank models by auto, plddt or ptmscore",
type=str,
default="auto",
choices=["auto", "plddt", "ptmscore", "multimer"],
)
parser.add_argument(
"--pair-mode",
help="rank models by auto, unpaired, paired, unpaired+paired",
type=str,
default="unpaired+paired",
choices=["unpaired", "paired", "unpaired+paired"],
)
parser.add_argument(
"--recompile-all-models",
help="recompile all models instead of just model 1 ane 3",
default=False,
action="store_true",
)
parser.add_argument(
"--sort-queries-by",
help="sort queries by: none, length, random",
type=str,
default="length",
choices=["none", "length", "random"],
)
parser.add_argument(
"--save-single-representations",
default=False,
action="store_true",
help="saves the single representation embeddings of all models",
)
parser.add_argument(
"--save-pair-representations",
default=False,
action="store_true",
help="saves the pair representation embeddings of all models",
)
parser.add_argument(
"--zip",
default=False,
action="store_true",
help="zip all results into one <jobname>.result.zip and delete the original files",
)
parser.add_argument(
"--overwrite-existing-results", default=False, action="store_true"
)
args = parser.parse_args()
setup_logging(Path(args.results).joinpath("log.txt"))
data_dir = Path(args.data or default_data_dir)
# Prevent people from accidentally running on the cpu, which is really slow
if not args.cpu and xla_bridge.get_backend().platform == "cpu":
print(NO_GPU_FOUND, file=sys.stderr)
sys.exit(1)
queries, is_complex = get_queries(args.input, args.sort_queries_by)
model_type = set_model_type(is_complex, args.model_type)
download_alphafold_params(model_type, data_dir)
uses_api = any((query[2] is None for query in queries))
if uses_api and args.host_url == DEFAULT_API_SERVER:
print(ACCEPT_DEFAULT_TERMS, file=sys.stderr)
model_order = [int(i) for i in args.model_order.split(",")]
assert 1 <= args.recompile_padding, "Can't apply negative padding"
run(
queries=queries,
result_dir=args.results,
use_templates=args.templates,
custom_template_path=args.custom_template_path,
use_amber=args.amber,
msa_mode=args.msa_mode,
model_type=model_type,
num_models=args.num_models,
num_recycles=args.num_recycle,
model_order=model_order,
is_complex=is_complex,
keep_existing_results=not args.overwrite_existing_results,
rank_by=args.rank,
pair_mode=args.pair_mode,
data_dir=data_dir,
host_url=args.host_url,
stop_at_score=args.stop_at_score,
recompile_padding=args.recompile_padding,
recompile_all_models=args.recompile_all_models,
zip_results=args.zip,
save_single_representations=args.save_single_representations,
save_pair_representations=args.save_pair_representations,
)
if __name__ == "__main__":
main()
| ColabFold-main | colabfold/batch.py |
ColabFold-main | colabfold/__init__.py |
|
def show_pdb(
use_amber: bool,
jobname: str,
homooligomer,
model_num=1,
show_sidechains=False,
show_mainchains=False,
color="lDDT",
):
import py3Dmol
model_name = f"model_{model_num}"
if use_amber:
pdb_filename = f"{jobname}_relaxed_{model_name}.pdb"
else:
pdb_filename = f"{jobname}_unrelaxed_{model_name}.pdb"
view = py3Dmol.view(js="https://3dmol.org/build/3Dmol.js")
view.addModel(open(pdb_filename, "r").read(), "pdb")
if color == "lDDT":
view.setStyle(
{
"cartoon": {
"colorscheme": {
"prop": "b",
"gradient": "roygb",
"min": 50,
"max": 90,
}
}
}
)
elif color == "rainbow":
view.setStyle({"cartoon": {"color": "spectrum"}})
elif color == "chain":
for n, chain, color in zip(
range(homooligomer),
list("ABCDEFGH"),
["lime", "cyan", "magenta", "yellow", "salmon", "white", "blue", "orange"],
):
view.setStyle({"chain": chain}, {"cartoon": {"color": color}})
if show_sidechains:
BB = ["C", "O", "N"]
view.addStyle(
{
"and": [
{"resn": ["GLY", "PRO"], "invert": True},
{"atom": BB, "invert": True},
]
},
{"stick": {"colorscheme": f"WhiteCarbon", "radius": 0.3}},
)
view.addStyle(
{"and": [{"resn": "GLY"}, {"atom": "CA"}]},
{"sphere": {"colorscheme": f"WhiteCarbon", "radius": 0.3}},
)
view.addStyle(
{"and": [{"resn": "PRO"}, {"atom": ["C", "O"], "invert": True}]},
{"stick": {"colorscheme": f"WhiteCarbon", "radius": 0.3}},
)
if show_mainchains:
BB = ["C", "O", "N", "CA"]
view.addStyle(
{"atom": BB}, {"stick": {"colorscheme": f"WhiteCarbon", "radius": 0.3}}
)
view.zoomTo()
return view
| ColabFold-main | colabfold/pdb.py |
# fmt: off
# @formatter:off
############################################
# imports
############################################
import jax
import requests
import hashlib
import tarfile
import time
import os
from typing import Tuple, List
import random
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patheffects
from matplotlib import collections as mcoll
import logging
logger = logging.getLogger(__name__)
try:
import py3Dmol
except:
pass
from string import ascii_uppercase,ascii_lowercase
pymol_color_list = ["#33ff33","#00ffff","#ff33cc","#ffff00","#ff9999","#e5e5e5","#7f7fff","#ff7f00",
"#7fff7f","#199999","#ff007f","#ffdd5e","#8c3f99","#b2b2b2","#007fff","#c4b200",
"#8cb266","#00bfbf","#b27f7f","#fcd1a5","#ff7f7f","#ffbfdd","#7fffff","#ffff7f",
"#00ff7f","#337fcc","#d8337f","#bfff3f","#ff7fff","#d8d8ff","#3fffbf","#b78c4c",
"#339933","#66b2b2","#ba8c84","#84bf00","#b24c66","#7f7f7f","#3f3fa5","#a5512b"]
pymol_cmap = matplotlib.colors.ListedColormap(pymol_color_list)
alphabet_list = list(ascii_uppercase+ascii_lowercase)
aatypes = set('ACDEFGHIKLMNPQRSTVWY')
###########################################
# control gpu/cpu memory usage
###########################################
def rm(x):
'''remove data from device'''
jax.tree_util.tree_map(lambda y: y.device_buffer.delete(), x)
def to(x,device="cpu"):
'''move data to device'''
d = jax.devices(device)[0]
return jax.tree_util.tree_map(lambda y:jax.device_put(y,d), x)
def clear_mem(device="gpu"):
'''remove all data from device'''
backend = jax.lib.xla_bridge.get_backend(device)
for buf in backend.live_buffers(): buf.delete()
##########################################
# call mmseqs2
##########################################
TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
def run_mmseqs2(x, prefix, use_env=True, use_filter=True,
use_templates=False, filter=None, use_pairing=False,
host_url="https://a3m.mmseqs.com") -> Tuple[List[str], List[str]]:
submission_endpoint = "ticket/pair" if use_pairing else "ticket/msa"
def submit(seqs, mode, N=101):
n, query = N, ""
for seq in seqs:
query += f">{n}\n{seq}\n"
n += 1
res = requests.post(f'{host_url}/{submission_endpoint}', data={'q':query,'mode': mode})
try:
out = res.json()
except ValueError:
logger.error(f"Server didn't reply with json: {res.text}")
out = {"status":"ERROR"}
return out
def status(ID):
res = requests.get(f'{host_url}/ticket/{ID}')
try:
out = res.json()
except ValueError:
logger.error(f"Server didn't reply with json: {res.text}")
out = {"status":"ERROR"}
return out
def download(ID, path):
res = requests.get(f'{host_url}/result/download/{ID}')
with open(path,"wb") as out: out.write(res.content)
# process input x
seqs = [x] if isinstance(x, str) else x
# compatibility to old option
if filter is not None:
use_filter = filter
# setup mode
if use_filter:
mode = "env" if use_env else "all"
else:
mode = "env-nofilter" if use_env else "nofilter"
if use_pairing:
mode = ""
use_templates = False
use_env = False
# define path
path = f"{prefix}_{mode}"
if not os.path.isdir(path): os.mkdir(path)
# call mmseqs2 api
tar_gz_file = f'{path}/out.tar.gz'
N,REDO = 101,True
# deduplicate and keep track of order
seqs_unique = []
#TODO this might be slow for large sets
[seqs_unique.append(x) for x in seqs if x not in seqs_unique]
Ms = [N + seqs_unique.index(seq) for seq in seqs]
# lets do it!
if not os.path.isfile(tar_gz_file):
TIME_ESTIMATE = 150 * len(seqs_unique)
with tqdm(total=TIME_ESTIMATE, bar_format=TQDM_BAR_FORMAT) as pbar:
while REDO:
pbar.set_description("SUBMIT")
# Resubmit job until it goes through
out = submit(seqs_unique, mode, N)
while out["status"] in ["UNKNOWN", "RATELIMIT"]:
sleep_time = 5 + random.randint(0, 5)
logger.error(f"Sleeping for {sleep_time}s. Reason: {out['status']}")
# resubmit
time.sleep(sleep_time)
out = submit(seqs_unique, mode, N)
if out["status"] == "ERROR":
raise Exception(f'MMseqs2 API is giving errors. Please confirm your input is a valid protein sequence. If error persists, please try again an hour later.')
if out["status"] == "MAINTENANCE":
raise Exception(f'MMseqs2 API is undergoing maintenance. Please try again in a few minutes.')
# wait for job to finish
ID,TIME = out["id"],0
pbar.set_description(out["status"])
while out["status"] in ["UNKNOWN","RUNNING","PENDING"]:
t = 5 + random.randint(0,5)
logger.error(f"Sleeping for {t}s. Reason: {out['status']}")
time.sleep(t)
out = status(ID)
pbar.set_description(out["status"])
if out["status"] == "RUNNING":
TIME += t
pbar.update(n=t)
#if TIME > 900 and out["status"] != "COMPLETE":
# # something failed on the server side, need to resubmit
# N += 1
# break
if out["status"] == "COMPLETE":
if TIME < TIME_ESTIMATE:
pbar.update(n=(TIME_ESTIMATE-TIME))
REDO = False
if out["status"] == "ERROR":
REDO = False
raise Exception(f'MMseqs2 API is giving errors. Please confirm your input is a valid protein sequence. If error persists, please try again an hour later.')
# Download results
download(ID, tar_gz_file)
# prep list of a3m files
if use_pairing:
a3m_files = [f"{path}/pair.a3m"]
else:
a3m_files = [f"{path}/uniref.a3m"]
if use_env: a3m_files.append(f"{path}/bfd.mgnify30.metaeuk30.smag30.a3m")
# extract a3m files
if any(not os.path.isfile(a3m_file) for a3m_file in a3m_files):
with tarfile.open(tar_gz_file) as tar_gz:
tar_gz.extractall(path)
# templates
if use_templates:
templates = {}
#print("seq\tpdb\tcid\tevalue")
for line in open(f"{path}/pdb70.m8","r"):
p = line.rstrip().split()
M,pdb,qid,e_value = p[0],p[1],p[2],p[10]
M = int(M)
if M not in templates: templates[M] = []
templates[M].append(pdb)
#if len(templates[M]) <= 20:
# print(f"{int(M)-N}\t{pdb}\t{qid}\t{e_value}")
template_paths = {}
for k,TMPL in templates.items():
TMPL_PATH = f"{prefix}_{mode}/templates_{k}"
if not os.path.isdir(TMPL_PATH):
os.mkdir(TMPL_PATH)
TMPL_LINE = ",".join(TMPL[:20])
os.system(f"curl -s https://a3m-templates.mmseqs.com/template/{TMPL_LINE} | tar xzf - -C {TMPL_PATH}/")
os.system(f"cp {TMPL_PATH}/pdb70_a3m.ffindex {TMPL_PATH}/pdb70_cs219.ffindex")
os.system(f"touch {TMPL_PATH}/pdb70_cs219.ffdata")
template_paths[k] = TMPL_PATH
# gather a3m lines
a3m_lines = {}
for a3m_file in a3m_files:
update_M,M = True,None
for line in open(a3m_file,"r"):
if len(line) > 0:
if "\x00" in line:
line = line.replace("\x00","")
update_M = True
if line.startswith(">") and update_M:
M = int(line[1:].rstrip())
update_M = False
if M not in a3m_lines: a3m_lines[M] = []
a3m_lines[M].append(line)
# return results
a3m_lines = ["".join(a3m_lines[n]) for n in Ms]
if use_templates:
template_paths_ = []
for n in Ms:
if n not in template_paths:
template_paths_.append(None)
#print(f"{n-N}\tno_templates_found")
else:
template_paths_.append(template_paths[n])
template_paths = template_paths_
return (a3m_lines, template_paths) if use_templates else a3m_lines
#########################################################################
# utils
#########################################################################
def get_hash(x):
return hashlib.sha1(x.encode()).hexdigest()
def homooligomerize(msas, deletion_matrices, homooligomer=1):
if homooligomer == 1:
return msas, deletion_matrices
else:
new_msas = []
new_mtxs = []
for o in range(homooligomer):
for msa,mtx in zip(msas, deletion_matrices):
num_res = len(msa[0])
L = num_res * o
R = num_res * (homooligomer-(o+1))
new_msas.append(["-"*L+s+"-"*R for s in msa])
new_mtxs.append([[0]*L+m+[0]*R for m in mtx])
return new_msas, new_mtxs
# keeping typo for cross-compatibility
def homooliomerize(msas, deletion_matrices, homooligomer=1):
return homooligomerize(msas, deletion_matrices, homooligomer=homooligomer)
def homooligomerize_heterooligomer(msas, deletion_matrices, lengths, homooligomers):
'''
----- inputs -----
msas: list of msas
deletion_matrices: list of deletion matrices
lengths: list of lengths for each component in complex
homooligomers: list of number of homooligomeric copies for each component
----- outputs -----
(msas, deletion_matrices)
'''
if max(homooligomers) == 1:
return msas, deletion_matrices
elif len(homooligomers) == 1:
return homooligomerize(msas, deletion_matrices, homooligomers[0])
else:
frag_ij = [[0,lengths[0]]]
for length in lengths[1:]:
j = frag_ij[-1][-1]
frag_ij.append([j,j+length])
# for every msa
mod_msas, mod_mtxs = [],[]
for msa, mtx in zip(msas, deletion_matrices):
mod_msa, mod_mtx = [],[]
# for every sequence
for n,(s,m) in enumerate(zip(msa,mtx)):
# split sequence
_s,_m,_ok = [],[],[]
for i,j in frag_ij:
_s.append(s[i:j]); _m.append(m[i:j])
_ok.append(max([o != "-" for o in _s[-1]]))
if n == 0:
# if first query sequence
mod_msa.append("".join([x*h for x,h in zip(_s,homooligomers)]))
mod_mtx.append(sum([x*h for x,h in zip(_m,homooligomers)],[]))
elif sum(_ok) == 1:
# elif one fragment: copy each fragment to every homooligomeric copy
a = _ok.index(True)
for h_a in range(homooligomers[a]):
_blank_seq = [["-"*l]*h for l,h in zip(lengths,homooligomers)]
_blank_mtx = [[[0]*l]*h for l,h in zip(lengths,homooligomers)]
_blank_seq[a][h_a] = _s[a]
_blank_mtx[a][h_a] = _m[a]
mod_msa.append("".join(["".join(x) for x in _blank_seq]))
mod_mtx.append(sum([sum(x,[]) for x in _blank_mtx],[]))
else:
# else: copy fragment pair to every homooligomeric copy pair
for a in range(len(lengths)-1):
if _ok[a]:
for b in range(a+1,len(lengths)):
if _ok[b]:
for h_a in range(homooligomers[a]):
for h_b in range(homooligomers[b]):
_blank_seq = [["-"*l]*h for l,h in zip(lengths,homooligomers)]
_blank_mtx = [[[0]*l]*h for l,h in zip(lengths,homooligomers)]
for c,h_c in zip([a,b],[h_a,h_b]):
_blank_seq[c][h_c] = _s[c]
_blank_mtx[c][h_c] = _m[c]
mod_msa.append("".join(["".join(x) for x in _blank_seq]))
mod_mtx.append(sum([sum(x,[]) for x in _blank_mtx],[]))
mod_msas.append(mod_msa)
mod_mtxs.append(mod_mtx)
return mod_msas, mod_mtxs
def chain_break(idx_res, Ls, length=200):
# Minkyung's code
# add big enough number to residue index to indicate chain breaks
L_prev = 0
for L_i in Ls[:-1]:
idx_res[L_prev+L_i:] += length
L_prev += L_i
return idx_res
##################################################
# plotting
##################################################
def plot_plddt_legend(dpi=100):
thresh = ['plDDT:','Very low (<50)','Low (60)','OK (70)','Confident (80)','Very high (>90)']
plt.figure(figsize=(1,0.1),dpi=dpi)
########################################
for c in ["#FFFFFF","#FF0000","#FFFF00","#00FF00","#00FFFF","#0000FF"]:
plt.bar(0, 0, color=c)
plt.legend(thresh, frameon=False,
loc='center', ncol=6,
handletextpad=1,
columnspacing=1,
markerscale=0.5,)
plt.axis(False)
return plt
def plot_ticks(Ls):
Ln = sum(Ls)
L_prev = 0
for L_i in Ls[:-1]:
L = L_prev + L_i
L_prev += L_i
plt.plot([0,Ln],[L,L],color="black")
plt.plot([L,L],[0,Ln],color="black")
ticks = np.cumsum([0]+Ls)
ticks = (ticks[1:] + ticks[:-1])/2
plt.yticks(ticks,alphabet_list[:len(ticks)])
def plot_confidence(plddt, pae=None, Ls=None, dpi=100):
use_ptm = False if pae is None else True
if use_ptm:
plt.figure(figsize=(10,3), dpi=dpi)
plt.subplot(1,2,1);
else:
plt.figure(figsize=(5,3), dpi=dpi)
plt.title('Predicted lDDT')
plt.plot(plddt)
if Ls is not None:
L_prev = 0
for L_i in Ls[:-1]:
L = L_prev + L_i
L_prev += L_i
plt.plot([L,L],[0,100],color="black")
plt.ylim(0,100)
plt.ylabel('plDDT')
plt.xlabel('position')
if use_ptm:
plt.subplot(1,2,2);plt.title('Predicted Aligned Error')
Ln = pae.shape[0]
plt.imshow(pae,cmap="bwr",vmin=0,vmax=30,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
plt.xlabel('Scored residue')
plt.ylabel('Aligned residue')
return plt
def plot_msas(msa, ori_seq=None, sort_by_seqid=True, deduplicate=True, dpi=100, return_plt=True):
'''
plot the msas
'''
if ori_seq is None: ori_seq = msa[0]
seqs = ori_seq.replace("/","").split(":")
seqs_dash = ori_seq.replace(":","").split("/")
Ln = np.cumsum(np.append(0,[len(seq) for seq in seqs]))
Ln_dash = np.cumsum(np.append(0,[len(seq) for seq in seqs_dash]))
Nn,lines = [],[]
#for msa in msas:
#msa_ = set(msa) if deduplicate else msa
msa_ = msa
if len(msa_) > 0:
Nn.append(len(msa_))
msa_ = np.asarray([list(seq) for seq in msa_])
gap_ = msa_ != "-"
qid_ = msa_ == np.array(list("".join(seqs)))
gapid = np.stack([gap_[:,Ln[i]:Ln[i+1]].max(-1) for i in range(len(seqs))],-1)
seqid = np.stack([qid_[:,Ln[i]:Ln[i+1]].mean(-1) for i in range(len(seqs))],-1).sum(-1) / (gapid.sum(-1) + 1e-8)
non_gaps = gap_.astype(np.float)
non_gaps[non_gaps == 0] = np.nan
if sort_by_seqid:
lines.append(non_gaps[seqid.argsort()]*seqid[seqid.argsort(),None])
else:
lines.append(non_gaps[::-1] * seqid[::-1,None])
Nn = np.cumsum(np.append(0,Nn))
lines = np.concatenate(lines,0)
if return_plt:
plt.figure(figsize=(8,5),dpi=dpi)
plt.title("Sequence coverage")
plt.imshow(lines,
interpolation='nearest', aspect='auto',
cmap="rainbow_r", vmin=0, vmax=1, origin='lower',
extent=(0, lines.shape[1], 0, lines.shape[0]))
for i in Ln[1:-1]:
plt.plot([i,i],[0,lines.shape[0]],color="black")
for i in Ln_dash[1:-1]:
plt.plot([i,i],[0,lines.shape[0]],"--",color="black")
for j in Nn[1:-1]:
plt.plot([0,lines.shape[1]],[j,j],color="black")
plt.plot((np.isnan(lines) == False).sum(0), color='black')
plt.xlim(0,lines.shape[1])
plt.ylim(0,lines.shape[0])
plt.colorbar(label="Sequence identity to query")
plt.xlabel("Positions")
plt.ylabel("Sequences")
if return_plt: return plt
def read_pdb_renum(pdb_filename, Ls=None):
if Ls is not None:
L_init = 0
new_chain = {}
for L,c in zip(Ls, alphabet_list):
new_chain.update({i:c for i in range(L_init,L_init+L)})
L_init += L
n,pdb_out = 1,[]
resnum_,chain_ = 1,"A"
for line in open(pdb_filename,"r"):
if line[:4] == "ATOM":
chain = line[21:22]
resnum = int(line[22:22+5])
if resnum != resnum_ or chain != chain_:
resnum_,chain_ = resnum,chain
n += 1
if Ls is None: pdb_out.append("%s%4i%s" % (line[:22],n,line[26:]))
else: pdb_out.append("%s%s%4i%s" % (line[:21],new_chain[n-1],n,line[26:]))
return "".join(pdb_out)
def show_pdb(pred_output_path, show_sidechains=False, show_mainchains=False,
color="lDDT", chains=None, Ls=None, vmin=50, vmax=90,
color_HP=False, size=(800,480)):
if chains is None:
chains = 1 if Ls is None else len(Ls)
view = py3Dmol.view(js='https://3dmol.org/build/3Dmol.js', width=size[0], height=size[1])
view.addModel(read_pdb_renum(pred_output_path, Ls),'pdb')
if color == "lDDT":
view.setStyle({'cartoon': {'colorscheme': {'prop':'b','gradient': 'roygb','min':vmin,'max':vmax}}})
elif color == "rainbow":
view.setStyle({'cartoon': {'color':'spectrum'}})
elif color == "chain":
for n,chain,color in zip(range(chains),alphabet_list,pymol_color_list):
view.setStyle({'chain':chain},{'cartoon': {'color':color}})
if show_sidechains:
BB = ['C','O','N']
HP = ["ALA","GLY","VAL","ILE","LEU","PHE","MET","PRO","TRP","CYS","TYR"]
if color_HP:
view.addStyle({'and':[{'resn':HP},{'atom':BB,'invert':True}]},
{'stick':{'colorscheme':"yellowCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':HP,'invert':True},{'atom':BB,'invert':True}]},
{'stick':{'colorscheme':"whiteCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"GLY"},{'atom':'CA'}]},
{'sphere':{'colorscheme':"yellowCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"PRO"},{'atom':['C','O'],'invert':True}]},
{'stick':{'colorscheme':"yellowCarbon",'radius':0.3}})
else:
view.addStyle({'and':[{'resn':["GLY","PRO"],'invert':True},{'atom':BB,'invert':True}]},
{'stick':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"GLY"},{'atom':'CA'}]},
{'sphere':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
view.addStyle({'and':[{'resn':"PRO"},{'atom':['C','O'],'invert':True}]},
{'stick':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
if show_mainchains:
BB = ['C','O','N','CA']
view.addStyle({'atom':BB},{'stick':{'colorscheme':f"WhiteCarbon",'radius':0.3}})
view.zoomTo()
return view
def plot_plddts(plddts, Ls=None, dpi=100, fig=True):
if fig: plt.figure(figsize=(8,5),dpi=100)
plt.title("Predicted lDDT per position")
for n,plddt in enumerate(plddts):
plt.plot(plddt,label=f"rank_{n+1}")
if Ls is not None:
L_prev = 0
for L_i in Ls[:-1]:
L = L_prev + L_i
L_prev += L_i
plt.plot([L,L],[0,100],color="black")
plt.legend()
plt.ylim(0,100)
plt.ylabel("Predicted lDDT")
plt.xlabel("Positions")
return plt
def plot_paes(paes, Ls=None, dpi=100, fig=True):
num_models = len(paes)
if fig: plt.figure(figsize=(3*num_models,2), dpi=dpi)
for n,pae in enumerate(paes):
plt.subplot(1,num_models,n+1)
plt.title(f"rank_{n+1}")
Ln = pae.shape[0]
plt.imshow(pae,cmap="bwr",vmin=0,vmax=30,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
return plt
def plot_adjs(adjs, Ls=None, dpi=100, fig=True):
num_models = len(adjs)
if fig: plt.figure(figsize=(3*num_models,2), dpi=dpi)
for n,adj in enumerate(adjs):
plt.subplot(1,num_models,n+1)
plt.title(f"rank_{n+1}")
Ln = adj.shape[0]
plt.imshow(adj,cmap="binary",vmin=0,vmax=1,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
return plt
def plot_dists(dists, Ls=None, dpi=100, fig=True):
num_models = len(dists)
if fig: plt.figure(figsize=(3*num_models,2), dpi=dpi)
for n,dist in enumerate(dists):
plt.subplot(1,num_models,n+1)
plt.title(f"rank_{n+1}")
Ln = dist.shape[0]
plt.imshow(dist,extent=(0, Ln, Ln, 0))
if Ls is not None and len(Ls) > 1: plot_ticks(Ls)
plt.colorbar()
return plt
##########################################################################
##########################################################################
def kabsch(a, b, weights=None, return_v=False):
a = np.asarray(a)
b = np.asarray(b)
if weights is None: weights = np.ones(len(b))
else: weights = np.asarray(weights)
B = np.einsum('ji,jk->ik', weights[:, None] * a, b)
u, s, vh = np.linalg.svd(B)
if np.linalg.det(u @ vh) < 0: u[:, -1] = -u[:, -1]
if return_v: return u
else: return u @ vh
def plot_pseudo_3D(xyz, c=None, ax=None, chainbreak=5,
cmap="gist_rainbow", line_w=2.0,
cmin=None, cmax=None, zmin=None, zmax=None):
def rescale(a,amin=None,amax=None):
a = np.copy(a)
if amin is None: amin = a.min()
if amax is None: amax = a.max()
a[a < amin] = amin
a[a > amax] = amax
return (a - amin)/(amax - amin)
# make segments
xyz = np.asarray(xyz)
seg = np.concatenate([xyz[:-1,None,:],xyz[1:,None,:]],axis=-2)
seg_xy = seg[...,:2]
seg_z = seg[...,2].mean(-1)
ord = seg_z.argsort()
# set colors
if c is None: c = np.arange(len(seg))[::-1]
else: c = (c[1:] + c[:-1])/2
c = rescale(c,cmin,cmax)
if isinstance(cmap, str):
if cmap == "gist_rainbow": c *= 0.75
colors = matplotlib.cm.get_cmap(cmap)(c)
else:
colors = cmap(c)
if chainbreak is not None:
dist = np.linalg.norm(xyz[:-1] - xyz[1:], axis=-1)
colors[...,3] = (dist < chainbreak).astype(np.float)
# add shade/tint based on z-dimension
z = rescale(seg_z,zmin,zmax)[:,None]
tint, shade = z/3, (z+2)/3
colors[:,:3] = colors[:,:3] + (1 - colors[:,:3]) * tint
colors[:,:3] = colors[:,:3] * shade
set_lim = False
if ax is None:
fig, ax = plt.subplots()
fig.set_figwidth(5)
fig.set_figheight(5)
set_lim = True
else:
fig = ax.get_figure()
if ax.get_xlim() == (0,1):
set_lim = True
if set_lim:
xy_min = xyz[:,:2].min() - line_w
xy_max = xyz[:,:2].max() + line_w
ax.set_xlim(xy_min,xy_max)
ax.set_ylim(xy_min,xy_max)
ax.set_aspect('equal')
# determine linewidths
width = fig.bbox_inches.width * ax.get_position().width
linewidths = line_w * 72 * width / np.diff(ax.get_xlim())
lines = mcoll.LineCollection(seg_xy[ord], colors=colors[ord], linewidths=linewidths,
path_effects=[matplotlib.patheffects.Stroke(capstyle="round")])
return ax.add_collection(lines)
def add_text(text, ax):
return plt.text(0.5, 1.01, text, horizontalalignment='center',
verticalalignment='bottom', transform=ax.transAxes)
def plot_protein(protein=None, pos=None, plddt=None, Ls=None, dpi=100, best_view=True, line_w=2.0):
if protein is not None:
pos = np.asarray(protein.atom_positions[:,1,:])
plddt = np.asarray(protein.b_factors[:,0])
# get best view
if best_view:
if plddt is not None:
weights = plddt/100
pos = pos - (pos * weights[:,None]).sum(0,keepdims=True) / weights.sum()
pos = pos @ kabsch(pos, pos, weights, return_v=True)
else:
pos = pos - pos.mean(0,keepdims=True)
pos = pos @ kabsch(pos, pos, return_v=True)
if plddt is not None:
fig, (ax1, ax2) = plt.subplots(1,2)
fig.set_figwidth(6); fig.set_figheight(3)
ax = [ax1, ax2]
else:
fig, ax1 = plt.subplots(1,1)
fig.set_figwidth(3); fig.set_figheight(3)
ax = [ax1]
fig.set_dpi(dpi)
fig.subplots_adjust(top = 0.9, bottom = 0.1, right = 1, left = 0, hspace = 0, wspace = 0)
xy_min = pos[...,:2].min() - line_w
xy_max = pos[...,:2].max() + line_w
for a in ax:
a.set_xlim(xy_min, xy_max)
a.set_ylim(xy_min, xy_max)
a.axis(False)
if Ls is None or len(Ls) == 1:
# color N->C
c = np.arange(len(pos))[::-1]
plot_pseudo_3D(pos, line_w=line_w, ax=ax1)
add_text("colored by N→C", ax1)
else:
# color by chain
c = np.concatenate([[n]*L for n,L in enumerate(Ls)])
if len(Ls) > 40: plot_pseudo_3D(pos, c=c, line_w=line_w, ax=ax1)
else: plot_pseudo_3D(pos, c=c, cmap=pymol_cmap, cmin=0, cmax=39, line_w=line_w, ax=ax1)
add_text("colored by chain", ax1)
if plddt is not None:
# color by pLDDT
plot_pseudo_3D(pos, c=plddt, cmin=50, cmax=90, line_w=line_w, ax=ax2)
add_text("colored by pLDDT", ax2)
return fig
| ColabFold-main | colabfold/colabfold.py |
import json
import logging
import warnings
from pathlib import Path
from typing import Optional
from absl import logging as absl_logging
from importlib_metadata import distribution
from tqdm import TqdmExperimentalWarning
NO_GPU_FOUND = """ERROR: Jax could not find GPU. This can be either because your machine doesn't have a GPU
or because jax can't find it. You might need to run
pip install --upgrade "jax[cuda]" -f https://storage.googleapis.com/jax-releases/jax_releases.html # Note: wheels only available on linux.
See https://github.com/google/jax/#pip-installation-gpu-cuda for more details.
If you're sure you want to run without a GPU, pass `--cpu`"""
DEFAULT_API_SERVER = "https://api.colabfold.com"
ACCEPT_DEFAULT_TERMS = """WARNING: You are welcome to use the default MSA server, however keep in mind that it's a limited shared resource only capable of processing a few thousand MSAs per day. Please submit jobs only from a single IP address. We reserve the right to limit access to the server case-by-case when usage exceeds fair use.
If you require more MSAs, please host your own API and pass it to `--host-url`"""
class TqdmHandler(logging.StreamHandler):
"""https://stackoverflow.com/a/38895482/3549270"""
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
# We need the native tqdm here
from tqdm import tqdm
msg = self.format(record)
tqdm.write(msg)
def setup_logging(log_file: Path):
log_file.parent.mkdir(exist_ok=True, parents=True)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
handlers=[TqdmHandler(), logging.FileHandler(log_file)],
)
# otherwise jax will tell us about its search for devices
absl_logging.set_verbosity("error")
warnings.simplefilter(action="ignore", category=TqdmExperimentalWarning)
def safe_filename(file: str) -> str:
return "".join([c if c.isalnum() or c in ["_", ".", "-"] else "_" for c in file])
def get_commit() -> Optional[str]:
text = distribution("colabfold").read_text("direct_url.json")
if not text:
return None
direct_url = json.loads(text)
if "vcs_info" not in direct_url:
return None
if "commit_id" not in direct_url["vcs_info"]:
return None
return direct_url["vcs_info"]["commit_id"]
| ColabFold-main | colabfold/utils.py |
# fmt: off
# @formatter:off
import os
from urllib import request
from concurrent import futures
import pickle
import jax
from alphafold.data.tools import jackhmmer
from alphafold.data import parsers
from alphafold.data import pipeline
from alphafold.common import protein
from alphafold.model import config
from alphafold.model import model
from alphafold.model import data
from alphafold.model.tf import shape_placeholders
import tensorflow as tf
from string import ascii_uppercase
import numpy as np
import matplotlib.pyplot as plt
import re
import colabfold as cf
try:
import pairmsa
except:
pairmsa=None
try:
from google.colab import files
IN_COLAB = True
except:
IN_COLAB = False
import tqdm.notebook
TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
#######################################################################################################################################
# prep_inputs
#######################################################################################################################################
def prep_inputs(sequence, jobname="test", homooligomer="1", output_dir=None, clean=False, verbose=True):
# process inputs
sequence = str(sequence)
sequence = re.sub("[^A-Z:/]", "", sequence.upper())
sequence = re.sub(":+",":",sequence)
sequence = re.sub("/+","/",sequence)
sequence = re.sub("^[:/]+","",sequence)
sequence = re.sub("[:/]+$","",sequence)
jobname = re.sub(r'\W+', '', jobname)
homooligomer = str(homooligomer)
homooligomer = re.sub("[:/]+",":",homooligomer)
homooligomer = re.sub("^[:/]+","",homooligomer)
homooligomer = re.sub("[:/]+$","",homooligomer)
if len(homooligomer) == 0: homooligomer = "1"
homooligomer = re.sub("[^0-9:]", "", homooligomer)
# define inputs
I = {"ori_sequence":sequence,
"sequence":sequence.replace("/","").replace(":",""),
"seqs":sequence.replace("/","").split(":"),
"homooligomer":homooligomer,
"homooligomers":[int(h) for h in homooligomer.split(":")],
"msas":[], "deletion_matrices":[]}
# adjust homooligomer option
if len(I["seqs"]) != len(I["homooligomers"]):
if len(I["homooligomers"]) == 1:
I["homooligomers"] = [I["homooligomers"][0]] * len(I["seqs"])
else:
if verbose:
print("WARNING: Mismatch between number of breaks ':' in 'sequence' and 'homooligomer' definition")
while len(I["seqs"]) > len(I["homooligomers"]):
I["homooligomers"].append(1)
I["homooligomers"] = I["homooligomers"][:len(I["seqs"])]
I["homooligomer"] = ":".join([str(h) for h in I["homooligomers"]])
# define full sequence being modelled
I["full_sequence"] = ''.join([s*h for s,h in zip(I["seqs"],I["homooligomers"])])
I["lengths"] = [len(seq) for seq in I["seqs"]]
# prediction directory
if output_dir is None:
I["output_dir"] = 'prediction_' + jobname + '_' + cf.get_hash(I["full_sequence"])[:5]
else:
I["output_dir"] = output_dir
os.makedirs(I["output_dir"], exist_ok=True)
# delete existing files in working directory
if clean:
for f in os.listdir(I["output_dir"]):
os.remove(os.path.join(I["output_dir"], f))
if verbose and len(I["full_sequence"]) > 1400:
print(f"WARNING: For a typical Google-Colab-GPU (16G) session, the max total length is ~1400 residues. You are at {len(I['full_sequence'])}!")
print(f"Run Alphafold may crash, unless you trim to the protein(s) to a short length. (See trim options below).")
if verbose:
print(f"homooligomer: {I['homooligomer']}")
print(f"total_length: {len(I['full_sequence'])}")
print(f"output_dir: {I['output_dir']}")
return I
#######################################################################################################################################
# prep_msa
#######################################################################################################################################
def run_jackhmmer(sequence, prefix, jackhmmer_binary_path='jackhmmer', verbose=True):
fasta_path = f"{prefix}.fasta"
with open(fasta_path, 'wt') as f:
f.write(f'>query\n{sequence}')
pickled_msa_path = f"{prefix}.jackhmmer.pickle"
if os.path.isfile(pickled_msa_path):
msas_dict = pickle.load(open(pickled_msa_path,"rb"))
msas, deletion_matrices, names = (msas_dict[k] for k in ['msas', 'deletion_matrices', 'names'])
full_msa = []
for msa in msas:
full_msa += msa
else:
# --- Find the closest source ---
test_url_pattern = 'https://storage.googleapis.com/alphafold-colab{:s}/latest/uniref90_2021_03.fasta.1'
ex = futures.ThreadPoolExecutor(3)
def fetch(source):
request.urlretrieve(test_url_pattern.format(source))
return source
fs = [ex.submit(fetch, source) for source in ['', '-europe', '-asia']]
source = None
for f in futures.as_completed(fs):
source = f.result()
ex.shutdown()
break
dbs = []
num_jackhmmer_chunks = {'uniref90': 59, 'smallbfd': 17, 'mgnify': 71}
total_jackhmmer_chunks = sum(num_jackhmmer_chunks.values())
disable_tqdm = not verbose
with tqdm.notebook.tqdm(total=total_jackhmmer_chunks, bar_format=TQDM_BAR_FORMAT, disable=disable_tqdm) as pbar:
def jackhmmer_chunk_callback(i):
pbar.update(n=1)
pbar.set_description('Searching uniref90')
jackhmmer_uniref90_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/uniref90_2021_03.fasta',
get_tblout=True,
num_streamed_chunks=num_jackhmmer_chunks['uniref90'],
streaming_callback=jackhmmer_chunk_callback,
z_value=135301051)
dbs.append(('uniref90', jackhmmer_uniref90_runner.query(fasta_path)))
pbar.set_description('Searching smallbfd')
jackhmmer_smallbfd_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/bfd-first_non_consensus_sequences.fasta',
get_tblout=True,
num_streamed_chunks=num_jackhmmer_chunks['smallbfd'],
streaming_callback=jackhmmer_chunk_callback,
z_value=65984053)
dbs.append(('smallbfd', jackhmmer_smallbfd_runner.query(fasta_path)))
pbar.set_description('Searching mgnify')
jackhmmer_mgnify_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=f'https://storage.googleapis.com/alphafold-colab{source}/latest/mgy_clusters_2019_05.fasta',
get_tblout=True,
num_streamed_chunks=num_jackhmmer_chunks['mgnify'],
streaming_callback=jackhmmer_chunk_callback,
z_value=304820129)
dbs.append(('mgnify', jackhmmer_mgnify_runner.query(fasta_path)))
# --- Extract the MSAs and visualize ---
# Extract the MSAs from the Stockholm files.
# NB: deduplication happens later in pipeline.make_msa_features.
mgnify_max_hits = 501
msas = []
deletion_matrices = []
names = []
for db_name, db_results in dbs:
unsorted_results = []
for i, result in enumerate(db_results):
msa, deletion_matrix, target_names = parsers.parse_stockholm(result['sto'])
e_values_dict = parsers.parse_e_values_from_tblout(result['tbl'])
e_values = [e_values_dict[t.split('/')[0]] for t in target_names]
zipped_results = zip(msa, deletion_matrix, target_names, e_values)
if i != 0:
# Only take query from the first chunk
zipped_results = [x for x in zipped_results if x[2] != 'query']
unsorted_results.extend(zipped_results)
sorted_by_evalue = sorted(unsorted_results, key=lambda x: x[3])
db_msas, db_deletion_matrices, db_names, _ = zip(*sorted_by_evalue)
if db_msas:
if db_name == 'mgnify':
db_msas = db_msas[:mgnify_max_hits]
db_deletion_matrices = db_deletion_matrices[:mgnify_max_hits]
db_names = db_names[:mgnify_max_hits]
msas.append(db_msas)
deletion_matrices.append(db_deletion_matrices)
names.append(db_names)
msa_size = len(set(db_msas))
print(f'{msa_size} Sequences Found in {db_name}')
pickle.dump({"msas":msas,
"deletion_matrices":deletion_matrices,
"names":names}, open(pickled_msa_path,"wb"))
return msas, deletion_matrices, names
def prep_msa(I, msa_method="mmseqs2", add_custom_msa=False, msa_format="fas",
pair_mode="unpaired", pair_cov=50, pair_qid=20,
hhfilter_loc="hhfilter", reformat_loc="reformat.pl", TMP_DIR="tmp",
custom_msa=None, precomputed=None,
mmseqs_host_url="https://a3m.mmseqs.com",
verbose=True):
# make temp directory
os.makedirs(TMP_DIR, exist_ok=True)
# clear previous inputs
I["msas"] = []
I["deletion_matrices"] = []
if add_custom_msa:
if IN_COLAB:
print(f"upload custom msa in '{msa_format}' format")
msa_dict = files.upload()
lines = msa_dict[list(msa_dict.keys())[0]].decode()
input_file = os.path.join(I["output_dir"],f"upload.{msa_format}")
with open(input_file,"w") as tmp_upload:
tmp_upload.write(lines)
else:
input_file = custom_msa
if input_file is None or not os.path.isfile(input_file):
raise ValueError("ERROR: `custom_msa` undefined")
else:
# convert to a3m
output_file = os.path.join(I["output_dir"],f"upload.a3m")
os.system(f"{reformat_loc} {msa_format} a3m {input_file} {output_file}")
# parse
msa, mtx = parsers.parse_a3m(open(output_file,"r").read())
I["msas"].append(msa)
I["deletion_matrices"].append(mtx)
if len(I["msas"][0][0]) != len(I["sequence"]):
raise ValueError("ERROR: the length of msa does not match input sequence")
if msa_method == "precomputed":
if IN_COLAB:
print("upload precomputed pickled msa from previous run")
uploaded_dict = files.upload()
uploaded_filename = list(uploaded_dict.keys())[0]
I.update(pickle.loads(uploaded_dict[uploaded_filename]))
elif precomputed is None:
raise ValueError("ERROR: `precomputed` undefined")
else:
I.update(pickle.load(open(precomputed,"rb")))
elif msa_method == "single_sequence":
if len(I["msas"]) == 0:
I["msas"].append([I["sequence"]])
I["deletion_matrices"].append([[0]*len(I["sequence"])])
else:
_blank_seq = ["-" * L for L in I["lengths"]]
_blank_mtx = [[0] * L for L in I["lengths"]]
def _pad(ns,vals,mode):
if mode == "seq": _blank = _blank_seq.copy()
if mode == "mtx": _blank = _blank_mtx.copy()
if isinstance(ns, list):
for n,val in zip(ns,vals): _blank[n] = val
else: _blank[ns] = vals
if mode == "seq": return "".join(_blank)
if mode == "mtx": return sum(_blank,[])
if len(I["seqs"]) == 1 or "unpaired" in pair_mode:
# gather msas
if msa_method == "mmseqs2":
prefix = cf.get_hash(I["sequence"])
prefix = os.path.join(TMP_DIR,prefix)
print(f"running mmseqs2")
A3M_LINES = cf.run_mmseqs2(I["seqs"], prefix, use_filter=True, host_url=mmseqs_host_url)
for n, seq in enumerate(I["seqs"]):
# tmp directory
prefix = cf.get_hash(seq)
prefix = os.path.join(TMP_DIR,prefix)
if msa_method == "mmseqs2":
# run mmseqs2
a3m_lines = A3M_LINES[n]
msa, mtx = parsers.parse_a3m(a3m_lines)
msas_, mtxs_ = [msa],[mtx]
elif msa_method == "jackhmmer":
print(f"running jackhmmer on seq_{n}")
# run jackhmmer
msas_, mtxs_, names_ = ([sum(x,())] for x in run_jackhmmer(seq, prefix))
# pad sequences
for msa_,mtx_ in zip(msas_,mtxs_):
msa,mtx = [I["sequence"]],[[0]*len(I["sequence"])]
for s,m in zip(msa_,mtx_):
msa.append(_pad(n,s,"seq"))
mtx.append(_pad(n,m,"mtx"))
I["msas"].append(msa)
I["deletion_matrices"].append(mtx)
# PAIR_MSA
if len(I["seqs"]) > 1 and (pair_mode == "paired" or pair_mode == "unpaired+paired"):
print("attempting to pair some sequences...")
if msa_method == "mmseqs2":
prefix = cf.get_hash(I["sequence"])
prefix = os.path.join(TMP_DIR,prefix)
print(f"running mmseqs2_noenv_nofilter on all seqs")
A3M_LINES = cf.run_mmseqs2(I["seqs"], prefix, use_env=False, use_filter=False, host_url=mmseqs_host_url)
_data = []
for a in range(len(I["seqs"])):
print(f"prepping seq_{a}")
_seq = I["seqs"][a]
_prefix = os.path.join(TMP_DIR,cf.get_hash(_seq))
if msa_method == "mmseqs2":
a3m_lines = A3M_LINES[a]
_msa, _mtx, _lab = pairmsa.parse_a3m(a3m_lines,
filter_qid=pair_qid/100,
filter_cov=pair_cov/100)
elif msa_method == "jackhmmer":
_msas, _mtxs, _names = run_jackhmmer(_seq, _prefix)
_msa, _mtx, _lab = pairmsa.get_uni_jackhmmer(_msas[0], _mtxs[0], _names[0],
filter_qid=pair_qid/100,
filter_cov=pair_cov/100)
if len(_msa) > 1:
_data.append(pairmsa.hash_it(_msa, _lab, _mtx, call_uniprot=False))
else:
_data.append(None)
Ln = len(I["seqs"])
O = [[None for _ in I["seqs"]] for _ in I["seqs"]]
for a in range(Ln):
if _data[a] is not None:
for b in range(a+1,Ln):
if _data[b] is not None:
print(f"attempting pairwise stitch for {a} {b}")
O[a][b] = pairmsa._stitch(_data[a],_data[b])
_seq_a, _seq_b, _mtx_a, _mtx_b = (*O[a][b]["seq"],*O[a][b]["mtx"])
# filter to remove redundant sequences
ok = []
with open(f"{TMP_DIR}/tmp.fas","w") as fas_file:
fas_file.writelines([f">{n}\n{a+b}\n" for n,(a,b) in enumerate(zip(_seq_a,_seq_b))])
os.system(f"{hhfilter_loc} -maxseq 1000000 -i {TMP_DIR}/tmp.fas -o {TMP_DIR}/tmp.id90.fas -id 90")
for line in open(f"{TMP_DIR}/tmp.id90.fas","r"):
if line.startswith(">"): ok.append(int(line[1:]))
if verbose:
print(f"found {len(_seq_a)} pairs ({len(ok)} after filtering)")
if len(_seq_a) > 0:
msa,mtx = [I["sequence"]],[[0]*len(I["sequence"])]
for s_a,s_b,m_a,m_b in zip(_seq_a, _seq_b, _mtx_a, _mtx_b):
msa.append(_pad([a,b],[s_a,s_b],"seq"))
mtx.append(_pad([a,b],[m_a,m_b],"mtx"))
I["msas"].append(msa)
I["deletion_matrices"].append(mtx)
# save MSA as pickle
pickle.dump({"msas":I["msas"],"deletion_matrices":I["deletion_matrices"]},
open(os.path.join(I["output_dir"],"msa.pickle"),"wb"))
return I
#######################################################################################################################################
# prep_filter
#######################################################################################################################################
def trim_inputs(trim, msas, deletion_matrices, ori_seq=None, inverse=False):
'''
input: trim, msas, deletion_matrices, ori_seq
output: msas, deletion_matrices, ori_seq
'''
if ori_seq is None: ori_seq = msas[0][0]
seqs = ori_seq.replace("/","").split(":")
L_ini = 0
chain_idx = {}
idx_chain = []
for chain,seq in zip(ascii_uppercase,seqs):
L = len(seq)
chain_idx[chain] = dict(zip(range(L),range(L_ini,L_ini+L)))
idx_chain += [f"{chain}{i+1}" for i in range(L)]
L_ini += L
global_idx = dict(zip(range(L_ini),range(L_ini)))
mode = "keeping" if inverse else "trimming"
trim_set = []
for idx in trim.split(","):
i,j = idx.split("-") if "-" in idx else (idx,"")
# set index reference frame
trim_idx_i = trim_idx_j = global_idx
if i != "" and i[0] in ascii_uppercase:
trim_idx_i,i = chain_idx[i[0]], i[1:]
if j != "" and j[0] in ascii_uppercase:
trim_idx_j,j = chain_idx[j[0]], j[1:]
# set which positions to trim
if "-" in idx:
i = trim_idx_i[int(i)-1] if i != "" else trim_idx_i[0]
j = trim_idx_j[int(j)-1] if j != "" else trim_idx_j[len(trim_idx_j) - 1]
trim_set += list(range(i,j+1))
print(f"{mode} positions: {idx_chain[i]}-{idx_chain[j]}")
else:
i = trim_idx_i[int(i)-1]
trim_set.append(i)
print(f"{mode} position: {idx_chain[i]}")
# deduplicate list
trim_set = set(trim_set)
if inverse:
trim_set = set(range(L_ini)) ^ trim_set
trim_set = sorted(list(trim_set))
# trim MSA
mod_msas, mod_mtxs = [],[]
for msa, mtx in zip(msas, deletion_matrices):
mod_msa = np.delete([list(s) for s in msa], trim_set, 1)
ok = (mod_msa != "-").sum(-1) > 0
mod_msas.append(["".join(s) for s in mod_msa[ok]])
mod_mtx = np.asarray(mtx)[ok]
mod_mtxs.append(np.delete(mod_mtx, trim_set, 1).tolist())
# trim original sequence
mod_idx = []
mod_chain = []
mod_ori_seq = []
for n,a in enumerate(ori_seq.replace("/","").replace(":","")):
if n not in trim_set:
mod_ori_seq.append(a)
mod_idx.append(n)
mod_chain.append(idx_chain[n][0])
if len(mod_idx) > 1:
if mod_chain[-1] != mod_chain[-2]:
mod_ori_seq[-1] = ":"
mod_ori_seq.append(a)
elif (mod_idx[-1] - mod_idx[-2]) > 1:
mod_ori_seq[-1] = "/"
mod_ori_seq.append(a)
mod_ori_seq = "".join(mod_ori_seq)
chains = sorted([ascii_uppercase.index(a) for a in set(mod_chain)])
return {"msas":mod_msas, "deletion_matrices":mod_mtxs,
"ori_sequence":mod_ori_seq, "chains":chains}
def cov_qid_filter(msas, deletion_matrices, ori_seq=None, cov=0, qid=0):
if ori_seq is None: ori_seq = msas[0][0]
seqs = ori_seq.replace("/","").split(":")
ref_seq_ = np.array(list("".join(seqs)))
new_msas,new_mtxs = [],[]
L = np.asarray([len(seq) for seq in seqs])
Ln = np.cumsum(np.append(0,L))
for msa, mtx in zip(msas, deletion_matrices):
msa_ = np.asarray([list(seq) for seq in msa])
# coverage (non-gap characters)
cov_ = msa_ != "-"
# sequence identity to query
qid_ = msa_ == ref_seq_
# split by protein (for protein complexes)
cov__ = np.stack([cov_[:,Ln[i]:Ln[i+1]].sum(-1) for i in range(len(seqs))],-1)
qid__ = np.stack([qid_[:,Ln[i]:Ln[i+1]].sum(-1) for i in range(len(seqs))],-1)
not_empty__ = cov__ > 0
ok = []
for n in range(len(msa)):
m = not_empty__[n]
if m.sum() > 0:
q = qid__[n][m].sum() / cov__[n][m].sum()
c = cov__[n][m].sum() / L[m].sum()
if q > qid and c > cov:
ok.append(n)
new_msas.append([msa[n] for n in ok])
new_mtxs.append([mtx[n] for n in ok])
return {"msas":new_msas, "deletion_matrices":new_mtxs}
def prep_filter(I, trim="", trim_inverse=False, cov=0, qid=0, verbose=True):
trim = re.sub("[^0-9A-Z,-]", "", trim.upper())
trim = re.sub(",+",",",trim)
trim = re.sub("^[,]+","",trim)
trim = re.sub("[,]+$","",trim)
if trim != "" or cov > 0 or qid > 0:
mod_I = dict(I)
if trim != "":
mod_I.update(trim_inputs(trim, mod_I["msas"], mod_I["deletion_matrices"],
mod_I["ori_sequence"], inverse=trim_inverse))
mod_I["homooligomers"] = [mod_I["homooligomers"][c] for c in mod_I["chains"]]
mod_I["sequence"] = mod_I["ori_sequence"].replace("/","").replace(":","")
mod_I["seqs"] = mod_I["ori_sequence"].replace("/","").split(":")
mod_I["full_sequence"] = "".join([s*h for s,h in zip(mod_I["seqs"], mod_I["homooligomers"])])
new_length = len(mod_I["full_sequence"])
if verbose:
print(f"total_length: '{new_length}' after trimming")
if cov > 0 or qid > 0:
mod_I.update(cov_qid_filter(mod_I["msas"], mod_I["deletion_matrices"],
mod_I["ori_sequence"], cov=cov/100, qid=qid/100))
return mod_I
else:
return I
#######################################################################################################################################
# prep features
#######################################################################################################################################
def prep_feats(I, clean=False):
def _placeholder_template_feats(num_templates_, num_res_):
return {
'template_aatype': np.zeros([num_templates_, num_res_, 22], np.float32),
'template_all_atom_masks': np.zeros([num_templates_, num_res_, 37, 3], np.float32),
'template_all_atom_positions': np.zeros([num_templates_, num_res_, 37], np.float32),
'template_domain_names': np.zeros([num_templates_], np.float32),
'template_sum_probs': np.zeros([num_templates_], np.float32),
}
# delete old files
if clean:
for f in os.listdir(I["output_dir"]):
if "rank_" in f: os.remove(os.path.join(I["output_dir"], f))
if len(I["msas"]) == 0:
print("WARNING: no MSA found, switching to 'single_sequence' mode")
I["msas"].append([I["sequence"]])
I["deletion_matrices"].append([[0]*len(I["sequence"])])
# homooligomerize
lengths = [len(seq) for seq in I["seqs"]]
msas_mod, deletion_matrices_mod = cf.homooligomerize_heterooligomer(I["msas"], I["deletion_matrices"],
lengths, I["homooligomers"])
# define input features
num_res = len(I["full_sequence"])
feature_dict = {}
feature_dict.update(pipeline.make_sequence_features(I["full_sequence"], 'test', num_res))
feature_dict.update(pipeline.make_msa_features(msas_mod, deletion_matrices=deletion_matrices_mod))
feature_dict.update(_placeholder_template_feats(0, num_res))
# set chainbreaks
Ls = []
for seq,h in zip(I["ori_sequence"].split(":"), I["homooligomers"]):
Ls += [len(s) for s in seq.split("/")] * h
Ls_plot = []
for seq,h in zip(I["seqs"], I["homooligomers"]):
Ls_plot += [len(seq)] * h
feature_dict['residue_index'] = cf.chain_break(feature_dict['residue_index'], Ls)
feature_dict['Ls'] = Ls_plot
feature_dict['output_dir'] = I["output_dir"]
return feature_dict
def make_fixed_size(feat, runner):
'''pad input features'''
opt = runner["opt"]
cfg = runner["model"].config
shape_schema = {k:[None]+v for k,v in dict(cfg.data.eval.feat).items()}
pad_size_map = {
shape_placeholders.NUM_RES: opt["L"],
shape_placeholders.NUM_MSA_SEQ: cfg.data.eval.max_msa_clusters,
shape_placeholders.NUM_EXTRA_SEQ: cfg.data.common.max_extra_msa,
shape_placeholders.NUM_TEMPLATES: 0,
}
for k, v in feat.items():
# Don't transfer this to the accelerator.
if k == 'extra_cluster_assignment':
continue
shape = list(v.shape)
schema = shape_schema[k]
assert len(shape) == len(schema), (
f'Rank mismatch between shape and shape schema for {k}: '
f'{shape} vs {schema}')
pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)]
padding = [(0, p - tf.shape(v)[i]) for i, p in enumerate(pad_size)]
if padding:
feat[k] = tf.pad(v, padding, name=f'pad_to_fixed_{k}')
feat[k].set_shape(pad_size)
return {k:np.asarray(v) for k,v in feat.items()}
#######################################################################################################################################
# run alphafold
#######################################################################################################################################
def clear_mem(device=None):
'''remove all data from device'''
backend = jax.lib.xla_bridge.get_backend(device)
if hasattr(backend,'live_buffers'):
for buf in backend.live_buffers():
buf.delete()
OPT_DEFAULT = {"N":None, "L":None,
"use_ptm":True, "use_turbo":True,
"max_recycles":3, "tol":0, "num_ensemble":1,
"max_msa_clusters":512, "max_extra_msa":1024,
"is_training":False}
def prep_model_runner(opt=None, model_name="model_5", old_runner=None, params_loc='./alphafold/data'):
# setup the [opt]ions
if opt is None:
opt = OPT_DEFAULT.copy()
else:
for k in OPT_DEFAULT:
if k not in opt: opt[k] = OPT_DEFAULT[k]
# if old_runner not defined or [opt]ions changed, start new runner
if old_runner is None or old_runner["opt"] != opt:
clear_mem()
name = f"{model_name}_ptm" if opt["use_ptm"] else model_name
cfg = config.model_config(name)
if opt["use_turbo"]:
if opt["N"] is None:
cfg.data.eval.max_msa_clusters = opt["max_msa_clusters"]
cfg.data.common.max_extra_msa = opt["max_extra_msa"]
else:
msa_clusters = min(opt["N"], opt["max_msa_clusters"])
cfg.data.eval.max_msa_clusters = msa_clusters
cfg.data.common.max_extra_msa = max(min(opt["N"] - msa_clusters, opt["max_extra_msa"]),1)
cfg.data.common.num_recycle = opt["max_recycles"]
cfg.model.num_recycle = opt["max_recycles"]
cfg.model.recycle_tol = opt["tol"]
cfg.data.eval.num_ensemble = opt["num_ensemble"]
params = data.get_model_haiku_params(name, params_loc)
return {"model":model.RunModel(cfg, params, is_training=opt["is_training"]), "opt":opt}
else:
return old_runner
def run_alphafold(feature_dict, opt=None, runner=None, num_models=5, num_samples=1, subsample_msa=True,
pad_feats=False, rank_by="pLDDT", show_images=True, params_loc='./alphafold/data', verbose=True):
def do_subsample_msa(F, random_seed=0):
'''subsample msa to avoid running out of memory'''
N = len(F["msa"])
L = len(F["residue_index"])
N_ = int(3E7/L)
if N > N_:
if verbose:
print(f"whhhaaa... too many sequences ({N}) subsampling to {N_}")
np.random.seed(random_seed)
idx = np.append(0,np.random.permutation(np.arange(1,N)))[:N_]
F_ = {}
F_["msa"] = F["msa"][idx]
F_["deletion_matrix_int"] = F["deletion_matrix_int"][idx]
F_["num_alignments"] = np.full_like(F["num_alignments"],N_)
for k in F.keys():
if k not in F_: F_[k] = F[k]
return F_
else:
return F
def parse_results(prediction_result, processed_feature_dict, r, t, num_res):
'''parse results and convert to numpy arrays'''
to_np = lambda a: np.asarray(a)
def class_to_np(c):
class dict2obj():
def __init__(self, d):
for k,v in d.items(): setattr(self, k, to_np(v))
return dict2obj(c.__dict__)
dist_bins = jax.numpy.append(0,prediction_result["distogram"]["bin_edges"])
dist_logits = prediction_result["distogram"]["logits"][:num_res,:][:,:num_res]
dist_mtx = dist_bins[dist_logits.argmax(-1)]
contact_mtx = jax.nn.softmax(dist_logits)[:,:,dist_bins < 8].sum(-1)
b_factors = prediction_result['plddt'][:,None] * prediction_result['structure_module']['final_atom_mask']
p = protein.from_prediction(processed_feature_dict, prediction_result, b_factors=b_factors)
plddt = prediction_result['plddt'][:num_res]
out = {"unrelaxed_protein": class_to_np(p),
"plddt": to_np(plddt),
"pLDDT": to_np(plddt.mean()),
"dists": to_np(dist_mtx),
"adj": to_np(contact_mtx),
"recycles":to_np(r),
"tol":to_np(t)}
if "ptm" in prediction_result:
out["pae"] = to_np(prediction_result['predicted_aligned_error'][:num_res,:][:,:num_res])
out["pTMscore"] = to_np(prediction_result['ptm'])
return out
num_res = len(feature_dict["residue_index"])
# if [opt]ions not defined
if opt is None:
opt = OPT_DEFAULT.copy()
opt["N"] = len(feature_dict["msa"])
opt["L"] = num_res
else:
for k in OPT_DEFAULT.keys():
if k not in opt: opt[k] = OPT_DEFAULT[k]
model_names = ['model_1', 'model_2', 'model_3', 'model_4', 'model_5'][:num_models]
total = len(model_names) * num_samples
outs = {}
def do_report(key):
o = outs[key]
if verbose:
line = f"{key} recycles:{o['recycles']} tol:{o['tol']:.2f} pLDDT:{o['pLDDT']:.2f}"
if 'pTMscore' in o:
line += f" pTMscore:{o['pTMscore']:.2f}"
print(line)
if show_images:
fig = cf.plot_protein(o['unrelaxed_protein'], Ls=feature_dict["Ls"], dpi=100)
plt.show()
tmp_pdb_path = os.path.join(feature_dict["output_dir"],f'unranked_{key}_unrelaxed.pdb')
pdb_lines = protein.to_pdb(o['unrelaxed_protein'])
with open(tmp_pdb_path, 'w') as f: f.write(pdb_lines)
disable_tqdm = not verbose
with tqdm.notebook.tqdm(total=total, bar_format=TQDM_BAR_FORMAT, disable=disable_tqdm) as pbar:
if opt["use_turbo"]:
if runner is None:
runner = prep_model_runner(opt,params_loc=params_loc)
# go through each random_seed
for seed in range(num_samples):
# prep input features
feat = do_subsample_msa(feature_dict, random_seed=seed) if subsample_msa else feature_dict
processed_feature_dict = runner["model"].process_features(feat, random_seed=seed)
if pad_feats:
processed_feature_dict = make_fixed_size(processed_feature_dict, runner)
# go through each model
for num, model_name in enumerate(model_names):
name = model_name+"_ptm" if opt["use_ptm"] else model_name
key = f"{name}_seed_{seed}"
pbar.set_description(f'Running {key}')
# replace model parameters
params = data.get_model_haiku_params(name, params_loc)
for k in runner["model"].params.keys():
runner["model"].params[k] = params[k]
# predict
prediction_result, (r, t) = runner["model"].predict(processed_feature_dict, random_seed=seed)
outs[key] = parse_results(prediction_result, processed_feature_dict, r=r, t=t, num_res=num_res)
# cleanup
del prediction_result, params, r, t
# report
do_report(key)
pbar.update(n=1)
# cleanup
del processed_feature_dict
if subsample_msa: del feat
else:
# go through each model
for num, model_name in enumerate(model_names):
name = model_name+"_ptm" if opt["use_ptm"] else model_name
model_runner = prep_model_runner(opt, model_name=model_name, use_turbo=False, params_loc=params_loc)["model"]
# go through each random_seed
for seed in range(num_samples):
key = f"{name}_seed_{seed}"
pbar.set_description(f'Running {key}')
processed_feature_dict = model_runner.process_features(feature_dict, random_seed=seed)
# predict
prediction_result, (r, t) = model_runner.predict(processed_feature_dict, random_seed=seed)
outs[key] = parse_results(prediction_result, processed_feature_dict, r=r, t=t, num_res=num_res)
# cleanup
del processed_feature_dict, prediction_result, r, t
# report
do_report(key)
pbar.update(n=1)
# cleanup
del model_runner
# Find the best model according to the mean pLDDT.
model_rank = list(outs.keys())
model_rank = [model_rank[i] for i in np.argsort([outs[x][rank_by] for x in model_rank])[::-1]]
# Write out the prediction
for n,key in enumerate(model_rank):
prefix = f"rank_{n+1}_{key}"
pred_output_path = os.path.join(feature_dict["output_dir"],f'{prefix}_unrelaxed.pdb')
fig = cf.plot_protein(outs[key]["unrelaxed_protein"], Ls=feature_dict["Ls"], dpi=200)
plt.savefig(os.path.join(feature_dict["output_dir"],f'{prefix}.png'), bbox_inches = 'tight')
plt.close(fig)
pdb_lines = protein.to_pdb(outs[key]["unrelaxed_protein"])
with open(pred_output_path, 'w') as f:
f.write(pdb_lines)
tmp_pdb_path = os.path.join(feature_dict["output_dir"],f'unranked_{key}_unrelaxed.pdb')
if os.path.isfile(tmp_pdb_path):
os.remove(tmp_pdb_path)
############################################################
if verbose:
print(f"model rank based on {rank_by}")
for n,key in enumerate(model_rank):
print(f"rank_{n+1}_{key} {rank_by}:{outs[key][rank_by]:.2f}")
return outs, model_rank
| ColabFold-main | colabfold/colabfold_alphafold.py |
from typing import Mapping, Any
import numpy as np
import tensorflow as tf
from alphafold.model.features import FeatureDict
from alphafold.model.tf import shape_placeholders
NUM_RES = shape_placeholders.NUM_RES
NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ
NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ
NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES
def make_fixed_size(
protein: Mapping[str, Any],
shape_schema,
msa_cluster_size: int,
extra_msa_size: int,
num_res: int,
num_templates: int = 0,
) -> FeatureDict:
"""Guess at the MSA and sequence dimensions to make fixed size."""
pad_size_map = {
NUM_RES: num_res,
NUM_MSA_SEQ: msa_cluster_size,
NUM_EXTRA_SEQ: extra_msa_size,
NUM_TEMPLATES: num_templates,
}
for k, v in protein.items():
# Don't transfer this to the accelerator.
if k == "extra_cluster_assignment":
continue
shape = list(v.shape)
schema = shape_schema[k]
assert len(shape) == len(schema), (
f"Rank mismatch between shape and shape schema for {k}: "
f"{shape} vs {schema}"
)
pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)]
padding = [(0, p - tf.shape(v)[i]) for i, p in enumerate(pad_size)]
if padding:
# TODO: alphafold's typing is wrong
protein[k] = tf.pad(v, padding, name=f"pad_to_fixed_{k}")
protein[k].set_shape(pad_size)
return {k: np.asarray(v) for k, v in protein.items()}
| ColabFold-main | colabfold/alphafold/msa.py |
from pathlib import Path
from functools import wraps, partialmethod
from typing import Tuple, List, Optional
import haiku
from alphafold.model import model, config, data
from alphafold.model.modules import AlphaFold
from alphafold.model.modules_multimer import AlphaFold as AlphaFoldMultimer
def load_models_and_params(
num_models: int,
use_templates: bool,
num_recycle: int = 3,
model_order: Optional[List[int]] = None,
model_suffix: str = "_ptm",
data_dir: Path = Path("."),
recompile_all_models: bool = False,
stop_at_score: float = 100,
rank_by: str = "plddt",
return_representations: bool = False,
) -> List[Tuple[str, model.RunModel, haiku.Params]]:
"""We use only two actual models and swap the parameters to avoid recompiling.
Note that models 1 and 2 have a different number of parameters compared to models 3, 4 and 5,
so we load model 1 and model 3.
"""
if return_representations:
# this forces the AlphaFold to always return representations
AlphaFold.__call__ = partialmethod(
AlphaFold.__call__, return_representations=True
)
AlphaFoldMultimer.__call__ = partialmethod(
AlphaFoldMultimer.__call__, return_representations=True
)
if not model_order:
model_order = [3, 4, 5, 1, 2]
# Use only two model and later swap params to avoid recompiling
model_runner_and_params: [Tuple[str, model.RunModel, haiku.Params]] = []
if recompile_all_models:
for n, model_number in enumerate(model_order):
if n == num_models:
break
model_name = f"model_{model_number}"
params = data.get_model_haiku_params(
model_name=model_name + model_suffix, data_dir=str(data_dir)
)
model_config = config.model_config(model_name + model_suffix)
model_config.model.stop_at_score = float(stop_at_score)
model_config.model.stop_at_score_ranker = rank_by
if model_suffix == "_ptm":
model_config.data.eval.num_ensemble = 1
model_config.data.common.num_recycle = num_recycle
model_config.model.num_recycle = num_recycle
elif model_suffix.startswith("_multimer"):
model_config.model.num_recycle = num_recycle
model_config.model.num_ensemble_eval = 1
model_runner_and_params.append(
(model_name, model.RunModel(model_config, params), params)
)
else:
models_need_compilation = [1, 3] if use_templates else [3]
model_build_order = [3, 4, 5, 1, 2]
model_runner_and_params_build_order: [
Tuple[str, model.RunModel, haiku.Params]
] = []
model_runner = None
for model_number in model_build_order:
if model_number in models_need_compilation:
model_config = config.model_config(
"model_" + str(model_number) + model_suffix
)
model_config.model.stop_at_score = float(stop_at_score)
model_config.model.stop_at_score_ranker = rank_by
if model_suffix == "_ptm":
model_config.data.eval.num_ensemble = 1
model_config.data.common.num_recycle = num_recycle
model_config.model.num_recycle = num_recycle
elif model_suffix.startswith("_multimer"):
model_config.model.num_ensemble_eval = 1
model_config.model.num_recycle = num_recycle
model_runner = model.RunModel(
model_config,
data.get_model_haiku_params(
model_name="model_" + str(model_number) + model_suffix,
data_dir=str(data_dir),
),
)
model_name = f"model_{model_number}"
params = data.get_model_haiku_params(
model_name=model_name + model_suffix, data_dir=str(data_dir)
)
# keep only parameters of compiled model
params_subset = {}
for k in model_runner.params.keys():
params_subset[k] = params[k]
model_runner_and_params_build_order.append(
(model_name, model_runner, params_subset)
)
# reorder model
for n, model_number in enumerate(model_order):
if n == num_models:
break
model_name = f"model_{model_number}"
for m in model_runner_and_params_build_order:
if model_name == m[0]:
model_runner_and_params.append(m)
break
return model_runner_and_params
| ColabFold-main | colabfold/alphafold/models.py |
ColabFold-main | colabfold/alphafold/__init__.py |
|
"""
colabdfold_search produces two a3m files with null separated msa in them.
We merge the two searches and then split into one a3m file per msa.
"""
import logging
from argparse import ArgumentParser
from pathlib import Path
from subprocess import check_call
from tqdm import tqdm
logger = logging.getLogger(__name__)
def merge_msa(mmseqs: str = "mmseqs", cwd: Path = Path("..")):
check_call(
[
mmseqs,
"mergedbs",
"bfd.mgnify30.metaeuk30.smag30.a3m",
"merged.a3m",
"uniref.a3m",
"bfd.mgnify30.metaeuk30.smag30.a3m",
],
cwd=cwd,
)
return Path(cwd).joinpath("merged.a3m")
def split_msa(merged_msa: Path, output_folder: Path):
for msa in tqdm(merged_msa.read_text().split("\0")):
if not msa.strip():
continue
filename = msa.split("\n", 1)[0][1:].split(" ")[0] + ".a3m"
output_folder.joinpath(filename).write_text(msa)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
parser = ArgumentParser(
description="Take an a3m database from the colabdb search and turn it into a folder of a3m files"
)
parser.add_argument(
"search_folder",
help="The search folder in which you ran colabfold_search, "
"which should contain uniref.a3m and bfd.mgnify30.metaeuk30.smag30.a3m",
)
parser.add_argument("output_folder", help="Will contain all the a3m files")
parser.add_argument("--mmseqs", help="Path to the mmseqs2 binary", default="mmseqs")
args = parser.parse_args()
output_folder = Path(args.output_folder)
output_folder.mkdir(exist_ok=True)
logger.info("Merging MSAs")
merged_msa = merge_msa(args.mmseqs, Path(args.search_folder))
logger.info("Splitting MSAs")
split_msa(merged_msa, output_folder)
logger.info("Done")
if __name__ == "__main__":
main()
| ColabFold-main | colabfold/mmseqs/merge_and_split_msas.py |
ColabFold-main | colabfold/mmseqs/__init__.py |
|
"""
colabdfold_search produces two a3m files with null separated msa in them.
We merge the two searches and then split into one a3m file per msa.
"""
import logging
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
logger = logging.getLogger(__name__)
def split_msa(merged_msa: Path, output_folder: Path):
for msa in tqdm(merged_msa.read_text().split("\0")):
if not msa.strip():
continue
filename = msa.split("\n", 1)[0][1:].split(" ")[0].replace("/", "_") + ".a3m"
output_folder.joinpath(filename).write_text(msa)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
parser = ArgumentParser(
description="Take an a3m database from the colabdb search and turn it into a folder of a3m files"
)
parser.add_argument(
"search_folder",
help="The search folder in which you ran colabfold_search with the final.a3m",
)
parser.add_argument("output_folder", help="Will contain all the a3m files")
parser.add_argument("--mmseqs", help="Path to the mmseqs2 binary", default="mmseqs")
args = parser.parse_args()
output_folder = Path(args.output_folder)
output_folder.mkdir(exist_ok=True)
logger.info("Splitting MSAs")
split_msa(Path(args.search_folder).joinpath("final.a3m"), output_folder)
logger.info("Done")
if __name__ == "__main__":
main()
| ColabFold-main | colabfold/mmseqs/split_msas.py |
"""
Functionality for running mmseqs locally. Takes in a fasta file, outputs final.a3m
Note: Currently needs mmseqs compiled from source
"""
import logging
import math
import shutil
import subprocess
from argparse import ArgumentParser
from pathlib import Path
from typing import List, Union
from colabfold.batch import get_queries, msa_to_str
logger = logging.getLogger(__name__)
def run_mmseqs(mmseqs: Path, params: List[Union[str, Path]]):
params_log = " ".join(str(i) for i in params)
logger.info(f"Running {mmseqs} {params_log}")
subprocess.check_call([mmseqs] + params)
def mmseqs_search_monomer(
dbbase: Path,
base: Path,
uniref_db: Path = Path("uniref30_2103_db"),
template_db: Path = Path(""), # Unused by default
metagenomic_db: Path = Path("colabfold_envdb_202108_db"),
mmseqs: Path = Path("mmseqs"),
use_env: bool = True,
use_templates: bool = False,
filter: bool = True,
expand_eval: float = math.inf,
align_eval: int = 10,
diff: int = 3000,
qsc: float = -20.0,
max_accept: int = 1000000,
s: float = 8,
db_load_mode: int = 2,
threads: int = 64,
):
"""Run mmseqs with a local colabfold database set
db1: uniprot db (UniRef30)
db2: Template (unused by default)
db3: metagenomic db (colabfold_envdb_202108 or bfd_mgy_colabfold, the former is preferred)
"""
if filter:
# 0.1 was not used in benchmarks due to POSIX shell bug in line above
# EXPAND_EVAL=0.1
align_eval = 10
qsc = 0.8
max_accept = 100000
used_dbs = [uniref_db]
if use_templates:
used_dbs.append(template_db)
if use_env:
used_dbs.append(metagenomic_db)
for db in used_dbs:
if not dbbase.joinpath(f"{db}.dbtype").is_file():
raise FileNotFoundError(f"Database {db} does not exist")
if (
not dbbase.joinpath(f"{db}.idx").is_file()
and not dbbase.joinpath(f"{db}.idx.index").is_file()
):
logger.info("Search does not use index")
db_load_mode = 0
dbSuffix1 = "_seq"
dbSuffix2 = "_aln"
else:
dbSuffix1 = ".idx"
dbSuffix2 = ".idx"
# fmt: off
# @formatter:off
search_param = ["--num-iterations", "3", "--db-load-mode", str(db_load_mode), "-a", "-s", str(s), "-e", "0.1", "--max-seqs", "10000",]
filter_param = ["--filter-msa", str(filter), "--filter-min-enable", "1000", "--diff", str(diff), "--qid", "0.0,0.2,0.4,0.6,0.8,1.0", "--qsc", "0", "--max-seq-id", "0.95",]
expand_param = ["--expansion-mode", "0", "-e", str(expand_eval), "--expand-filter-clusters", str(filter), "--max-seq-id", "0.95",]
run_mmseqs(mmseqs, ["search", base.joinpath("qdb"), dbbase.joinpath(uniref_db), base.joinpath("res"), base.joinpath("tmp"), "--threads", str(threads)] + search_param)
run_mmseqs(mmseqs, ["expandaln", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), base.joinpath("res"), dbbase.joinpath(f"{uniref_db}{dbSuffix2}"), base.joinpath("res_exp"), "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + expand_param)
run_mmseqs(mmseqs, ["mvdb", base.joinpath("tmp/latest/profile_1"), base.joinpath("prof_res")])
run_mmseqs(mmseqs, ["lndb", base.joinpath("qdb_h"), base.joinpath("prof_res_h")])
run_mmseqs(mmseqs, ["align", base.joinpath("prof_res"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"), base.joinpath("res_exp"), base.joinpath("res_exp_realign"), "--db-load-mode", str(db_load_mode), "-e", str(align_eval), "--max-accept", str(max_accept), "--threads", str(threads), "--alt-ali", "10", "-a"])
run_mmseqs(mmseqs, ["filterresult", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"),
base.joinpath("res_exp_realign"), base.joinpath("res_exp_realign_filter"), "--db-load-mode",
str(db_load_mode), "--qid", "0", "--qsc", str(qsc), "--diff", "0", "--threads",
str(threads), "--max-seq-id", "1.0", "--filter-min-enable", "100"])
run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{uniref_db}{dbSuffix1}"),
base.joinpath("res_exp_realign_filter"), base.joinpath("uniref.a3m"), "--msa-format-mode",
"6", "--db-load-mode", str(db_load_mode), "--threads", str(threads)] + filter_param)
subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp_realign")])
subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp")])
subprocess.run([mmseqs] + ["rmdb", base.joinpath("res")])
subprocess.run([mmseqs] + ["rmdb", base.joinpath("res_exp_realign_filter")])
if use_templates:
run_mmseqs(mmseqs, ["search", base.joinpath("prof_res"), dbbase.joinpath(template_db), base.joinpath("res_pdb"), base.joinpath("tmp"), "--db-load-mode", str(db_load_mode), "--threads", str(threads), "-s", "7.5", "-a", "-e", "0.1"])
run_mmseqs(mmseqs, ["convertalis", base.joinpath("prof_res"), dbbase.joinpath(f"{template_db}{dbSuffix1}"), base.joinpath("res_pdb"), base.joinpath(f"{template_db}.m8"), "--format-output", "query,target,fident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,cigar", "--db-load-mode", str(db_load_mode), "--threads", str(threads)])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_pdb")])
if use_env:
run_mmseqs(mmseqs, ["search", base.joinpath("prof_res"), dbbase.joinpath(metagenomic_db), base.joinpath("res_env"), base.joinpath("tmp"), "--threads", str(threads)] + search_param)
run_mmseqs(mmseqs, ["expandaln", base.joinpath("prof_res"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"), base.joinpath("res_env"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix2}"), base.joinpath("res_env_exp"), "-e", str(expand_eval), "--expansion-mode", "0", "--db-load-mode", str(db_load_mode), "--threads", str(threads)])
run_mmseqs(mmseqs,
["align", base.joinpath("tmp/latest/profile_1"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"),
base.joinpath("res_env_exp"), base.joinpath("res_env_exp_realign"), "--db-load-mode",
str(db_load_mode), "-e", str(align_eval), "--max-accept", str(max_accept), "--threads",
str(threads), "--alt-ali", "10", "-a"])
run_mmseqs(mmseqs, ["filterresult", base.joinpath("qdb"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"),
base.joinpath("res_env_exp_realign"), base.joinpath("res_env_exp_realign_filter"),
"--db-load-mode", str(db_load_mode), "--qid", "0", "--qsc", str(qsc), "--diff", "0",
"--max-seq-id", "1.0", "--threads", str(threads), "--filter-min-enable", "100"])
run_mmseqs(mmseqs, ["result2msa", base.joinpath("qdb"), dbbase.joinpath(f"{metagenomic_db}{dbSuffix1}"),
base.joinpath("res_env_exp_realign_filter"),
base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m"), "--msa-format-mode", "6",
"--db-load-mode", str(db_load_mode), "--threads", str(threads)] + filter_param)
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp_realign_filter")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp_realign")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env_exp")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_env")])
if use_env:
run_mmseqs(mmseqs, ["mergedbs", base.joinpath("qdb"), base.joinpath("final.a3m"), base.joinpath("uniref.a3m"), base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("bfd.mgnify30.metaeuk30.smag30.a3m")])
else:
run_mmseqs(mmseqs, ["mvdb", base.joinpath("uniref.a3m"), base.joinpath("final.a3m")])
run_mmseqs(mmseqs, ["unpackdb", base.joinpath("final.a3m"), base.joinpath("."), "--unpack-name-mode", "0", "--unpack-suffix", ".a3m"])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("final.a3m")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("uniref.a3m")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res")])
# @formatter:on
# fmt: on
for file in base.glob("prof_res*"):
file.unlink()
shutil.rmtree(base.joinpath("tmp"))
def mmseqs_search_pair(
dbbase: Path,
base: Path,
uniref_db: Path = Path("uniref30_2103_db"),
mmseqs: Path = Path("mmseqs"),
s: float = 8,
threads: int = 64,
db_load_mode: int = 2,
):
if not dbbase.joinpath(f"{uniref_db}.dbtype").is_file():
raise FileNotFoundError(f"Database {uniref_db} does not exist")
if (
not dbbase.joinpath(f"{uniref_db}.idx").is_file()
and not dbbase.joinpath(f"{uniref_db}.idx.index").is_file()
):
logger.info("Search does not use index")
db_load_mode = 0
dbSuffix1 = "_seq"
dbSuffix2 = "_aln"
else:
dbSuffix1 = ".idx"
dbSuffix2 = ".idx"
search_param = [
"--num-iterations",
"3",
"--db-load-mode",
str(db_load_mode),
"-a",
"-s",
str(s),
"-e",
"0.1",
"--max-seqs",
"10000",
]
expand_param = [
"--expansion-mode",
"0",
"-e",
"inf",
"--expand-filter-clusters",
"0",
"--max-seq-id",
"0.95",
]
run_mmseqs(
mmseqs,
[
"search",
base.joinpath("qdb"),
dbbase.joinpath(uniref_db),
base.joinpath("res"),
base.joinpath("tmp"),
"--threads",
str(threads),
]
+ search_param,
)
run_mmseqs(
mmseqs,
[
"expandaln",
base.joinpath("qdb"),
dbbase.joinpath(f"{uniref_db}{dbSuffix1}"),
base.joinpath("res"),
dbbase.joinpath(f"{uniref_db}{dbSuffix2}"),
base.joinpath("res_exp"),
"--db-load-mode",
str(db_load_mode),
"--threads",
str(threads),
]
+ expand_param,
)
run_mmseqs(
mmseqs,
[
"align",
base.joinpath("qdb"),
dbbase.joinpath(f"{uniref_db}{dbSuffix1}"),
base.joinpath("res_exp"),
base.joinpath("res_exp_realign"),
"--db-load-mode",
str(db_load_mode),
"-e",
"0.001",
"--max-accept",
"1000000",
"--threads",
str(threads),
"-c",
"0.5",
"--cov-mode",
"1",
],
)
run_mmseqs(
mmseqs,
[
"pairaln",
base.joinpath("qdb"),
dbbase.joinpath(f"{uniref_db}"),
base.joinpath("res_exp_realign"),
base.joinpath("res_exp_realign_pair"),
"--db-load-mode",
str(db_load_mode),
"--threads",
str(threads),
],
)
run_mmseqs(
mmseqs,
[
"align",
base.joinpath("qdb"),
dbbase.joinpath(f"{uniref_db}{dbSuffix1}"),
base.joinpath("res_exp_realign_pair"),
base.joinpath("res_exp_realign_pair_bt"),
"--db-load-mode",
str(db_load_mode),
"-e",
"inf",
"--threads",
str(threads),
],
)
run_mmseqs(
mmseqs,
[
"pairaln",
base.joinpath("qdb"),
dbbase.joinpath(f"{uniref_db}"),
base.joinpath("res_exp_realign_pair_bt"),
base.joinpath("res_final"),
"--db-load-mode",
str(db_load_mode),
"--threads",
str(threads),
],
)
run_mmseqs(
mmseqs,
[
"result2msa",
base.joinpath("qdb"),
dbbase.joinpath(f"{uniref_db}{dbSuffix1}"),
base.joinpath("res_final"),
base.joinpath("pair.a3m"),
"--db-load-mode",
str(db_load_mode),
"--msa-format-mode",
"5",
"--threads",
str(threads),
],
)
run_mmseqs(
mmseqs,
[
"unpackdb",
base.joinpath("pair.a3m"),
base.joinpath("."),
"--unpack-name-mode",
"0",
"--unpack-suffix",
".paired.a3m",
],
)
run_mmseqs(mmseqs, ["rmdb", base.joinpath("qdb")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("qdb_h")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign_pair")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_exp_realign_pair_bt")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("res_final")])
run_mmseqs(mmseqs, ["rmdb", base.joinpath("pair.a3m")])
shutil.rmtree(base.joinpath("tmp"))
def main():
parser = ArgumentParser()
parser.add_argument(
"query",
type=Path,
help="fasta files with the queries. Doesn't support complexes yet",
)
parser.add_argument(
"dbbase",
type=Path,
help="The path to the database and indices you downloaded and created with setup_databases.sh",
)
parser.add_argument(
"base", type=Path, help="Directory for the results (and intermediate files)"
)
parser.add_argument(
"-s",
type=int,
default=8,
help="mmseqs sensitivity. Lowering this will result in a much faster search but possibly sparser msas",
)
# dbs are uniref, templates and environmental
# We normally don't use templates
parser.add_argument(
"--db1", type=Path, default=Path("uniref30_2103_db"), help="UniRef database"
)
parser.add_argument("--db2", type=Path, default=Path(""), help="Templates database")
parser.add_argument(
"--db3",
type=Path,
default=Path("colabfold_envdb_202108_db"),
help="Environmental database",
)
# poor man's boolean arguments
parser.add_argument("--use-env", type=int, default=1, choices=[0, 1])
parser.add_argument("--use-templates", type=int, default=0, choices=[0, 1])
parser.add_argument("--filter", type=int, default=1, choices=[0, 1])
parser.add_argument(
"--mmseqs",
type=Path,
default=Path("mmseqs"),
help="Location of the mmseqs binary",
)
parser.add_argument("--expand-eval", type=float, default=math.inf)
parser.add_argument("--align-eval", type=int, default=10)
parser.add_argument("--diff", type=int, default=3000)
parser.add_argument("--qsc", type=float, default=-20.0)
parser.add_argument("--max-accept", type=int, default=1000000)
parser.add_argument("--db-load-mode", type=int, default=2)
parser.add_argument("--threads", type=int, default=64)
args = parser.parse_args()
queries, is_complex = get_queries(args.query, None)
queries_unique = []
for job_number, (raw_jobname, query_sequences, a3m_lines) in enumerate(queries):
# remove duplicates before searching
query_sequences = (
[query_sequences] if isinstance(query_sequences, str) else query_sequences
)
query_seqs_unique = []
for x in query_sequences:
if x not in query_seqs_unique:
query_seqs_unique.append(x)
query_seqs_cardinality = [0] * len(query_seqs_unique)
for seq in query_sequences:
seq_idx = query_seqs_unique.index(seq)
query_seqs_cardinality[seq_idx] += 1
queries_unique.append([raw_jobname, query_seqs_unique, query_seqs_cardinality])
args.base.mkdir(exist_ok=True, parents=True)
query_file = args.base.joinpath("query.fas")
with query_file.open("w") as f:
for job_number, (
raw_jobname,
query_sequences,
query_seqs_cardinality,
) in enumerate(queries_unique):
for seq in query_sequences:
f.write(f">{raw_jobname}\n{seq}\n")
run_mmseqs(
args.mmseqs,
["createdb", query_file, args.base.joinpath("qdb"), "--shuffle", "0"],
)
with args.base.joinpath("qdb.lookup").open("w") as f:
id = 0
file_number = 0
for job_number, (
raw_jobname,
query_sequences,
query_seqs_cardinality,
) in enumerate(queries_unique):
for seq in query_sequences:
f.write(f"{id}\t{raw_jobname}\t{file_number}\n")
id += 1
file_number += 1
mmseqs_search_monomer(
mmseqs=args.mmseqs,
dbbase=args.dbbase,
base=args.base,
uniref_db=args.db1,
template_db=args.db2,
metagenomic_db=args.db3,
use_env=args.use_env,
use_templates=args.use_templates,
filter=args.filter,
expand_eval=args.expand_eval,
align_eval=args.align_eval,
diff=args.diff,
qsc=args.qsc,
max_accept=args.max_accept,
s=args.s,
db_load_mode=args.db_load_mode,
threads=args.threads,
)
if is_complex == True:
mmseqs_search_pair(
mmseqs=args.mmseqs,
dbbase=args.dbbase,
base=args.base,
uniref_db=args.db1,
s=args.s,
db_load_mode=args.db_load_mode,
threads=args.threads,
)
id = 0
for job_number, (
raw_jobname,
query_sequences,
query_seqs_cardinality,
) in enumerate(queries_unique):
unpaired_msa = []
paired_msa = None
if len(query_seqs_cardinality) > 1:
paired_msa = []
for seq in query_sequences:
with args.base.joinpath(f"{id}.a3m").open("r") as f:
unpaired_msa.append(f.read())
args.base.joinpath(f"{id}.a3m").unlink()
if len(query_seqs_cardinality) > 1:
with args.base.joinpath(f"{id}.paired.a3m").open("r") as f:
paired_msa.append(f.read())
args.base.joinpath(f"{id}.paired.a3m").unlink()
id += 1
msa = msa_to_str(
unpaired_msa, paired_msa, query_sequences, query_seqs_cardinality
)
args.base.joinpath(f"{job_number}.a3m").write_text(msa)
query_file.unlink()
run_mmseqs(args.mmseqs, ["rmdb", args.base.joinpath("qdb")])
run_mmseqs(args.mmseqs, ["rmdb", args.base.joinpath("qdb_h")])
if __name__ == "__main__":
main()
| ColabFold-main | colabfold/mmseqs/search.py |
from setuptools import setup, find_packages
setup(
name = 'mixture-of-attention',
packages = find_packages(exclude=[]),
version = '0.0.24',
license='MIT',
description = 'Mixture of Attention',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/mixture-of-attention',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'mixture-of-experts',
'routed attention'
],
install_requires=[
'colt5-attention>=0.10.14',
'einops>=0.6.1',
'local-attention>=1.8.6',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| mixture-of-attention-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from mixture_of_attention.transformer import Transformer
from mixture_of_attention.autoregressive_wrapper import AutoregressiveWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate transformer
model = Transformer(
num_tokens = 256,
dim = 512,
depth = 8,
num_experts = 2,
seq_len = SEQ_LEN,
local_attn_window_size = 64,
num_routed_queries = 32,
num_routed_key_values = 64,
cosine_sim_routing = True,
use_triton = True
)
model = AutoregressiveWrapper(model).cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10.0, desc = "training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward(loss / GRADIENT_ACCUMULATE_EVERY)
print(f"training loss: {loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str, "\n")
| mixture-of-attention-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, -torch.finfo(logits.dtype).max)
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
pad_value = 0
):
super().__init__()
self.seq_len = net.seq_len
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(
self,
prompt,
seq_len,
temperature=1.0,
filter_thres=0.9,
**kwargs
):
b, t, device = *prompt.shape, prompt.device
out = prompt
for _ in range(seq_len):
logits = self.net(out[:, -self.seq_len:], **kwargs)[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim = -1)
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x, labels = x[:, :-1], x[:, 1:]
logits = self.net(x, **kwargs)
logits = rearrange(logits, "b c n -> b n c")
return F.cross_entropy(logits, labels)
| mixture-of-attention-main | mixture_of_attention/autoregressive_wrapper.py |
from mixture_of_attention.mixture_of_attention import (
MixtureOfAttention,
MixtureOfAutoregressiveAttention,
Attention
)
| mixture-of-attention-main | mixture_of_attention/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, i, j, device):
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
| mixture-of-attention-main | mixture_of_attention/attend.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from mixture_of_attention.mixture_of_attention import MixtureOfAutoregressiveAttention
from mixture_of_attention.rotary_emb import RotaryEmbedding
# helper functions
def exists(val):
return val is not None
# classes
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
normed = F.normalize(x, dim = -1)
return normed * self.scale * self.gamma
def FeedForward(dim, mult = 4):
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
# main class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
seq_len,
local_attn_window_size,
num_routed_queries,
num_routed_key_values,
num_experts,
cosine_sim_routing = True,
routed_window_size = None,
dim_head = 64,
heads = 8,
ff_mult = 4,
use_triton = True,
routed_rotary_emb = True
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(seq_len, dim)
self.seq_len = seq_len
self.rotary_emb = RotaryEmbedding(dim_head) if routed_rotary_emb else None
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
MixtureOfAutoregressiveAttention(
dim = dim,
local_attn_window_size = local_attn_window_size,
routed_window_size = routed_window_size,
num_routed_queries = num_routed_queries,
num_routed_key_values = num_routed_key_values,
cosine_sim_routing = cosine_sim_routing,
num_experts = num_experts,
dim_head = dim_head,
heads = heads,
use_triton = use_triton
),
FeedForward(dim = dim, mult = ff_mult)
]))
self.to_logits = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, num_tokens)
)
@property
def device(self):
return next(self.parameters()).device
def forward(self, x):
x = self.token_emb(x)
x = x + self.pos_emb(torch.arange(x.shape[-2], device = self.device))
rotary_emb = None
if exists(self.rotary_emb):
rotary_emb = self.rotary_emb(x.shape[1])
for attn, ff in self.layers:
x = attn(x, rotary_emb = rotary_emb) + x
x = ff(x) + x
return self.to_logits(x)
| mixture-of-attention-main | mixture_of_attention/transformer.py |
import math
import torch
import torch.nn.functional as F
from torch import Tensor, nn, einsum
from typing import Tuple, Optional
from einops import rearrange, repeat, reduce, pack, unpack
from mixture_of_attention.attend import Attend
from mixture_of_attention.rotary_emb import apply_rotary_pos_emb
from local_attention import LocalMHA
from colt5_attention import CoordinateDescentRouter
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_to_multiple(tensor, multiple, dim = -1, value = 0):
seq_len = tensor.shape[dim]
m = seq_len / multiple
if m.is_integer():
return tensor, seq_len
remainder = math.ceil(m) * multiple - seq_len
pad_offset = (0,) * (-1 - dim) * 2
padded_tensor = F.pad(tensor, (*pad_offset, 0, remainder), value = value)
return padded_tensor, seq_len
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim, groups = 1):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(groups, dim, 1))
def forward(self, x):
normed = F.normalize(x, dim = -2)
return normed * self.scale * self.gamma
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
dim_context = None,
heads = 8,
causal = False,
groups = 1, # defines number of experts
dropout = 0.,
flash = False,
prenorm = False
):
super().__init__()
self.heads = heads
self.groups = groups
dim_inner = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = RMSNorm(dim, groups = groups) if prenorm else nn.Identity()
self.context_norm = RMSNorm(dim_context, groups = groups) if prenorm else nn.Identity()
self.attend = Attend(
dropout = dropout,
causal = causal,
flash = flash
)
# null key / value, to protect against a row that is all masked out
self.null_kv = nn.Parameter(torch.randn(2, groups, heads, 1, dim_head))
# taking advantage of convolutional groups to process experts in parallel
self.to_q = nn.Conv1d(dim * groups, dim_inner * groups, 1, bias = False, groups = groups)
self.to_kv = nn.Conv1d(dim_context * groups, dim_inner * 2 * groups, 1, bias = False, groups = groups)
self.to_out = nn.Conv1d(dim_inner * groups, dim * groups, 1, bias = False, groups = groups)
def forward(
self,
x,
context = None,
mask = None,
queries_scale = None,
keys_scale = None,
values_scale = None,
output_scale = None,
rotary_emb: Optional[Tuple[Tensor, Tensor]] = None
):
"""
einops
b - batch
g - groups
n - sequence
d - feature dimension
"""
b, g, h = x.shape[0], self.groups, self.heads
one_expert = x.ndim == 3
if one_expert:
assert g == 1
x = rearrange(x, 'b n d -> b 1 n d')
assert x.ndim == 4
assert x.shape[1] == g
# fold the groups into the feature dimension to be processed in one go by grouped convolutions
x = rearrange(x, 'b g n d -> b g d n')
# handle context for cross attention
if exists(context):
context_one_expert = context.ndim == 3
if context_one_expert:
assert g == 1
context = rearrange(context, 'b n d -> b 1 n d')
assert context.ndim == 4
assert context.shape[1] == g
context = rearrange(context, 'b g n d -> b g d n')
context = default(context, x)
# take care of mask
if exists(mask):
if mask.ndim == 2:
mask = repeat(mask, 'b n -> (b g) n', g = g)
elif mask.ndim == 3:
mask = rearrange(mask, 'b g n -> (b g) n')
mask = F.pad(mask, (1, 0), value = True)
# prenorm if applicable
x = self.norm(x)
context = self.context_norm(context)
# fold groups into dimension for grouped conv
x, context = map(lambda t: rearrange(t, 'b g d n -> b (g d) n'), (x, context))
# queries, keys, values
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = 1))
# split out heads and merge groups into batches
q, k, v = map(lambda t: rearrange(t, 'b (g h d) n -> b g h n d', h = h, g = g), (q, k, v))
# rotary embedding
if exists(rotary_emb):
q_rotary_emb, k_rotary_emb = rotary_emb
if q_rotary_emb.ndim > 2:
q_rotary_emb = rearrange(q_rotary_emb, 'b g n d -> b g 1 n d')
if k_rotary_emb.ndim > 2:
k_rotary_emb = rearrange(k_rotary_emb, 'b g n d -> b g 1 n d')
q = apply_rotary_pos_emb(q_rotary_emb, q)
k = apply_rotary_pos_emb(k_rotary_emb, k)
# give gradients to routed keys / values via normalized scores from the router, if passed in
if exists(queries_scale):
q = q * queries_scale
if exists(keys_scale):
k = k * keys_scale
if exists(values_scale):
v = v * values_scale
# merge group into batch
q, k, v = map(lambda t: rearrange(t, 'b g ... -> (b g) ...'), (q, k, v))
# concat null key / values, to protect against a row having all masked out elements and save a lot of headache
nk, nv = map(lambda t: repeat(t, 'g h 1 d -> (b g) h 1 d', b = b), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# attention
out = self.attend(q, k, v, mask = mask)
# combine heads out
out = rearrange(out, '(b g) h n d -> b (g h d) n', g = g)
out = self.to_out(out)
out = rearrange(out, 'b (g d) n -> b g n d', g = g)
if one_expert:
out = rearrange(out, 'b 1 n d -> b n d')
if exists(output_scale):
out = out * output_scale
return out
# mixture of attention
class MixtureOfAttention(nn.Module):
def __init__(
self,
dim,
*,
num_routed_queries,
num_routed_key_values,
dim_context = None,
local_attn = False,
local_attn_window_size = None,
num_experts = 2,
dim_head = 64,
heads = 8,
dropout = 0.,
use_triton = True,
flash_attn = True,
prenorm = True,
average_routed = False,
**kwargs
):
super().__init__()
dim_context = default(dim_context, dim)
self.num_routed_queries = num_routed_queries
self.num_routed_key_values = num_routed_key_values
self.null_routed_token = nn.Parameter(torch.randn(1, 1, dim)) if not local_attn else None
self.average_routed = average_routed
self.local_attn = None
if local_attn:
assert exists(local_attn_window_size)
self.local_attn = LocalMHA(
dim = dim,
dim_head = dim_head,
heads = heads,
prenorm = prenorm,
window_size = local_attn_window_size
)
self.query_router = CoordinateDescentRouter(
dim,
num_routing_tokens = num_experts,
use_triton = use_triton,
**kwargs
)
self.key_value_router = CoordinateDescentRouter(
dim_context,
num_routing_tokens = num_experts,
use_triton = use_triton,
**kwargs
)
self.attn = Attention(
dim = dim,
dim_context = dim_context,
dim_head = dim_head,
heads = heads,
groups = num_experts,
dropout = dropout,
flash = flash_attn,
prenorm = prenorm
)
@property
def device(self):
return next(self.parameters()).device
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
num_routed_queries = None,
num_routed_key_values = None,
rotary_emb = None
):
num_routed_queries = default(num_routed_queries, self.num_routed_queries)
num_routed_key_values = default(num_routed_key_values, self.num_routed_key_values)
is_cross_attn = exists(context)
assert not (exists(self.local_attn) and is_cross_attn), 'cannot do cross attention with local attention (only for self attention)'
if not is_cross_attn:
# self attention if context and context mask not passed in
context = x
context_mask = mask
query_indices, query_scores, queries, query_mask = self.query_router(x, mask = mask, num_tokens = num_routed_queries, keep_one_route_dim = True)
query_scores = rearrange(query_scores, 'b g n -> b g n 1')
kv_indices, key_value_scores, key_values, key_value_mask = self.key_value_router(context, mask = context_mask, num_tokens = num_routed_key_values, keep_one_route_dim = True)
key_value_scores = rearrange(key_value_scores, 'b g n -> b g 1 n 1')
# rotary embeddings
if exists(rotary_emb):
assert not is_cross_attn, 'rotary embedding should not be used for cross attending'
q_rotary_emb = rotary_emb[query_indices] if exists(query_indices) else rotary_emb
k_rotary_emb = rotary_emb[kv_indices] if exists(kv_indices) else rotary_emb
rotary_emb = (q_rotary_emb, k_rotary_emb)
# attend
attn_out = self.attn(
queries,
rotary_emb = rotary_emb,
context = key_values,
mask = key_value_mask,
values_scale = key_value_scores,
output_scale = query_scores
)
local_out = None
if exists(self.local_attn):
local_out = self.local_attn(x, mask = mask)
need_route_queries = exists(query_indices)
if not need_route_queries:
out = attn_out
if exists(local_out):
local_out = rearrange(local_out, 'b n d -> b 1 n d')
out = torch.cat((local_out, out), dim = 1)
out = reduce(attn_out, 'b e n d -> b n d', 'mean')
if exists(mask):
out = out.masked_fill(~mask[..., None], 0.)
return out
out = torch.zeros_like(x)
counts = torch.zeros(x.shape[:-1], device = x.device)
query_indices = rearrange(query_indices, 'b g n -> b (g n)')
attn_out = rearrange(attn_out, 'b g n d -> b (g n) d')
expanded_query_indices = repeat(query_indices, 'b n -> b n d', d = x.shape[-1])
attn_out_summed = out.scatter_add(1, expanded_query_indices, attn_out)
ones = torch.ones(attn_out.shape[:-1], device = self.device)
if exists(query_mask):
ones = ones * rearrange(query_mask, 'b g n -> b (g n)')
counts = counts.scatter_add(1, query_indices, ones)
counts = rearrange(counts, '... -> ... 1')
has_unrouted = not exists(local_out)
if not has_unrouted:
counts = counts + 1
attn_out_summed = attn_out_summed + local_out
else:
not_routed_mask = counts == 0
attn_out_summed = attn_out_summed.masked_fill(not_routed_mask, 0.)
out = attn_out_summed
# average if needed
if self.average_routed:
out = out / counts.clamp(min = 1e-5)
# for the positions that were not routed, use a learned routing token instead of just 0s
if has_unrouted:
out = torch.where(
not_routed_mask,
self.null_routed_token,
out,
)
if exists(mask):
out = out.masked_fill(~mask[..., None], 0.)
return out
# mixture of autoregressive attention
class MixtureOfAutoregressiveAttention(nn.Module):
def __init__(
self,
dim,
*,
num_routed_queries,
num_routed_key_values,
local_attn_window_size,
routed_window_size = None,
num_experts = 2,
dim_head = 64,
heads = 8,
dropout = 0.,
use_triton = False,
flash_attn = True,
prenorm = True,
average_routed = False,
**kwargs
):
super().__init__()
self.num_routed_queries = num_routed_queries
self.num_routed_key_values = num_routed_key_values
self.num_experts = num_experts
self.null_tokens = nn.Parameter(torch.randn(num_experts, dim))
routed_window_size = default(routed_window_size, local_attn_window_size)
self.routed_window_size = routed_window_size
self.average_routed = average_routed
self.local_attn = LocalMHA(
dim = dim,
dim_head = dim_head,
heads = heads,
prenorm = prenorm,
causal = True,
window_size = local_attn_window_size
)
self.query_router = CoordinateDescentRouter(
dim,
num_routing_tokens = num_experts,
use_triton = use_triton,
**kwargs
)
self.key_value_router = CoordinateDescentRouter(
dim,
num_routing_tokens = num_experts,
use_triton = use_triton,
**kwargs
)
self.attn = Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
groups = num_experts,
dropout = dropout,
flash = flash_attn,
prenorm = prenorm
)
@property
def device(self):
return next(self.parameters()).device
def forward(
self,
x,
rotary_emb = None,
num_routed_queries = None,
num_routed_key_values = None
):
b = x.shape[0]
w = self.routed_window_size
num_windows = math.ceil(x.shape[-2] / w) - 1
# calculate local attention first
local_out = self.local_attn(x)
# early return local attention results if window size is equal or less than the routed window size
if num_windows == 0:
return local_out
# pad sequence to multiple of routing window size
mask = torch.ones(x.shape[:-1], device = self.device, dtype = torch.bool)
x, seq_len = pad_to_multiple(x, w, dim = -2)
mask, _ = pad_to_multiple(mask, w, dim = -1, value = False)
context = x[..., :-w, :]
context = repeat(context, 'b n d -> (b nw) n d', nw = num_windows)
context_mask = torch.ones((num_windows, num_windows), device = self.device, dtype = torch.bool).tril()
context_mask = repeat(context_mask, 'n1 n2 -> (b n1) (n2 w)', b = b, w = w)
# fold queries and mask into windows
x = rearrange(x, 'b (n w) d -> b n w d', w = w)
mask = rearrange(mask, 'b (n w) -> b n w', w = w)
# omit the first window of queries, as they have nothing to attend to
x = rearrange(x[:, 1:, ...], 'b n w d -> (b n) w d')
mask = rearrange(mask[:, 1:, ...], 'b n w -> (b n) w')
# get number of queries and key values to route
num_routed_queries = default(num_routed_queries, self.num_routed_queries)
num_routed_key_values = default(num_routed_key_values, self.num_routed_key_values)
# coordinate descent routing
query_indices, query_scores, queries, query_mask = self.query_router(x, mask = mask, num_tokens = num_routed_queries, keep_one_route_dim = True)
query_scores = rearrange(query_scores, 'b g n -> b g n 1')
kv_indices, key_value_scores, key_values, key_value_mask = self.key_value_router(context, mask = context_mask, num_tokens = num_routed_key_values, keep_one_route_dim = True)
key_value_scores = rearrange(key_value_scores, 'b g n -> b g 1 n 1')
# rotary embeddings
if exists(rotary_emb):
rotary_emb, _ = pad_to_multiple(rotary_emb, w, dim = -2)
windowed_rotary_emb = rearrange(rotary_emb, '(n w) d -> n w d', w = w)
windowed_rotary_emb = windowed_rotary_emb[1:]
windowed_rotary_emb = repeat(windowed_rotary_emb, 'n w d -> (b n) g w d', b = b, g = query_scores.shape[1])
if exists(query_indices):
rotary_query_indices = repeat(query_indices, '... -> ... d', d = windowed_rotary_emb.shape[-1])
q_rotary_emb = windowed_rotary_emb.gather(2, rotary_query_indices)
else:
q_rotary_emb = windowed_rotary_emb
k_rotary_emb = rotary_emb[kv_indices] if exists(kv_indices) else rotary_emb[:context.shape[-2]]
rotary_emb = (q_rotary_emb, k_rotary_emb)
# attend
attn_out = self.attn(
queries,
rotary_emb = rotary_emb,
context = key_values,
mask = key_value_mask,
values_scale = key_value_scores,
output_scale = query_scores
)
need_route_queries = exists(query_indices)
if not need_route_queries:
out = F.pad(attn_out, (0, 0, w, 0), value = 0.)
out = out[:, :, :seq_len]
if exists(local_out):
local_out = rearrange(local_out, 'b n d -> b 1 n d')
out = torch.cat((local_out, out), dim = 1)
out = reduce(out, 'b e n d -> b n d', 'mean' if self.average_routed else 'sum')
return out
out = torch.zeros((x.shape[0], self.num_experts, *x.shape[1:]), device = x.device, dtype = x.dtype)
counts = torch.zeros((x.shape[0], self.num_experts, x.shape[-2]), device = x.device)
ones = torch.ones(attn_out.shape[:-1], device = self.device)
if exists(query_mask):
ones = ones * query_mask
counts = counts.scatter_add(2, query_indices, ones)
expanded_query_indices = repeat(query_indices, 'b g n -> b g n d', d = x.shape[-1])
attn_out_summed = out.scatter_add(2, expanded_query_indices, attn_out)
# for the positions that were not routed, fill with each individual expert null tokens
fill_null_token = counts == 0 & ~rearrange(mask, 'b n -> b 1 n')
attn_out_summed = torch.where(
rearrange(fill_null_token, '... -> ... 1'),
rearrange(self.null_tokens, 'g d -> 1 g 1 d'),
attn_out_summed
)
# un-window the attention output as well as the routed counts (denominator)
attn_out_summed = rearrange(attn_out_summed, '(b n) g w d -> b g (n w) d', b = b)
attn_out_summed = F.pad(attn_out_summed, (0, 0, w, 0), value = 0.)
attn_out_summed = attn_out_summed[..., :seq_len, :]
# sum local attended tokens with routed tokens
attn_out_summed = reduce(attn_out_summed, 'b g n d -> b n d', 'sum')
attn_out_summed = attn_out_summed + local_out
# in experiments, seems to perform better without averaging
if not self.average_routed:
return attn_out_summed
# average tokens
return attn_out_summed / (self.num_experts + 1)
| mixture-of-attention-main | mixture_of_attention/mixture_of_attention.py |
import os
import re
import sys
from setuptools import setup, find_packages
install_requires = ['torch>=1.1.0']
PY36 = (3, 6, 0)
if sys.version_info < PY36:
raise RuntimeError('torch-optimizer requires Python 3.6.0+')
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
def read_version():
regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
init_py = os.path.join(
os.path.dirname(__file__), 'torch_optimizer', '__init__.py'
)
with open(init_py) as f:
for line in f:
match = regexp.match(line)
if match is not None:
return match.group(1)
else:
raise RuntimeError(
'Cannot find version in torch_optimizer/__init__.py'
)
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
]
keywords = [
'torch-optimizer',
'pytorch',
'accsgd',
'adamod',
'diffgrad',
'lamb',
'radam',
'sgdw',
'yogi',
]
setup(
name='torch-optimizer',
version=read_version(),
description=('pytorch-optimizer'),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
long_description_content_type='text/x-rst',
classifiers=classifiers,
platforms=['POSIX'],
author='Nikolay Novik',
author_email='[email protected]',
url='https://github.com/jettify/pytorch-optimizer',
download_url='https://pypi.org/project/torch-optimizer/',
license='Apache 2',
packages=find_packages(),
install_requires=install_requires,
keywords=keywords,
zip_safe=True,
include_package_data=True,
)
| pytorch-optimizer-master | setup.py |
import functools
from copy import deepcopy
import torch
import torch_optimizer as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau, StepLR
from tests.utils import assert_dict_equal
def _build_params_dict(weight, bias, **kwargs):
return [{'params': [weight]}, dict(params=[bias], **kwargs)]
def _build_params_dict_single(weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
def make_test_params(optimizer_class):
cases = [
(lambda weight, bias: optimizer_class([weight, bias], lr=1e-3),),
(
lambda weight, bias: optimizer_class(
_build_params_dict(weight, bias, lr=1e-2), lr=1e-3
),
),
(
lambda weight, bias: optimizer_class(
_build_params_dict_single(weight, bias, lr=1e-2), lr=1e-3
),
),
(
lambda weight, bias: optimizer_class(
_build_params_dict_single(weight, bias, lr=1e-2)
),
),
(
lambda weight, bias: optimizer_class([weight, bias], lr=1e-3),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10)],
),
(
lambda weight, bias: optimizer_class([weight, bias], lr=1e-3),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
),
(
lambda weight, bias: optimizer_class([weight, bias], lr=1e-3),
[
lambda opt: StepLR(opt, gamma=0.99, step_size=10),
lambda opt: ExponentialLR(opt, gamma=0.99),
lambda opt: ReduceLROnPlateau(opt),
],
),
]
ids = [f'{optimizer_class.__name__}_{i}' for i in range(len(cases))]
return cases, ids
def build_lookahead(*a, **kw):
base = optim.Yogi(*a, **kw)
return optim.Lookahead(base)
optimizers = [
optim.AccSGD,
optim.AdaBound,
optim.AdaMod,
optim.DiffGrad,
optim.Lamb,
optim.NovoGrad,
optim.RAdam,
optim.SGDW,
optim.Yogi,
build_lookahead,
]
def pytest_generate_tests(metafunc):
if 'optimizer_constructor' in metafunc.fixturenames:
cases = []
ids = []
for o in optimizers:
c, i = make_test_params(o)
cases = cases + c
ids = ids + i
metafunc.parametrize('optimizer_constructor', cases, ids=ids)
class TestOptim:
def _test_basic_cases_template(
self, weight, bias, input, constructor, scheduler_constructors
):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
optimizer = constructor(weight, bias)
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if (
y.is_cuda
and bias.is_cuda
and y.get_device() != bias.get_device()
):
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
optimizer.step(fn)
for _i in range(200):
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
val_loss = fn()
scheduler.step(val_loss)
else:
scheduler.step()
assert fn().item() < initial_value
def _test_state_dict(self, weight, bias, input, constructor):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_cuda if weight.is_cuda else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
weight_c = Variable(weight.data.clone(), requires_grad=True)
bias_c = Variable(bias.data.clone(), requires_grad=True)
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
precision = 0.0001
# Run both optimizations in parallel
for _i in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
assert torch.allclose(weight, weight_c, atol=precision)
assert torch.allclose(bias, bias_c, atol=precision)
# Make sure state dict wasn't modified
assert assert_dict_equal(state_dict, state_dict_c)
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
input_cuda = Variable(input.data.float().cuda())
weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True)
bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(
fn_base, optimizer_cuda, weight_cuda, bias_cuda
)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
assert assert_dict_equal(state_dict, state_dict_c)
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
assert weight == weight_cuda
assert bias == bias_cuda
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return set(k for k in obj.__dict__ if not k.startswith('_'))
assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer))
def _test_basic_cases(
self,
constructor,
scheduler_constructors=None,
ignore_multidevice=False,
):
if scheduler_constructors is None:
scheduler_constructors = []
self._test_state_dict(
torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor
)
self._test_basic_cases_template(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor,
scheduler_constructors,
)
# non-contiguous parameters
self._test_basic_cases_template(
torch.randn(10, 5, 2)[..., 0],
torch.randn(10, 2)[..., 0],
torch.randn(5),
constructor,
scheduler_constructors,
)
# CUDA
if not torch.cuda.is_available():
return
self._test_basic_cases_template(
torch.randn(10, 5).cuda(),
torch.randn(10).cuda(),
torch.randn(5).cuda(),
constructor,
scheduler_constructors,
)
# Multi-GPU
if not torch.cuda.device_count() > 1 or ignore_multidevice:
return
self._test_basic_cases_template(
torch.randn(10, 5).cuda(0),
torch.randn(10).cuda(1),
torch.randn(5).cuda(0),
constructor,
scheduler_constructors,
)
def test_optimizer(self, optimizer_constructor):
self._test_basic_cases(*optimizer_constructor)
| pytorch-optimizer-master | tests/test_optimizer.py |
import torch
import pytest
import torch_optimizer as optim
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 1 * (y - x ** 2) ** 2
def quadratic(tensor):
x, y = tensor
a = 1.0
b = 1.0
return (x ** 2) / a + (y ** 2) / b
def beale(tensor):
x, y = tensor
f = (
(1.5 - x + x * y) ** 2
+ (2.25 - x + x * y ** 2) ** 2
+ (2.625 - x + x * y ** 3) ** 2
)
return f
cases = [
(rosenbrock, (1.5, 1.5), (1, 1)),
(quadratic, (1.5, 1.5), (0, 0)),
(beale, (1.5, 1.5), (3, 0.5)),
]
def ids(v):
n = f'{v[0].__name__} {v[1:]}'
return n
def build_lookahead(*a, **kw):
base = optim.Yogi(*a, **kw)
return optim.Lookahead(base)
optimizers = [
(
optim.NovoGrad,
{'lr': 2.9, 'betas': (0.9, 0.999), 'grad_averaging': True},
900,
),
(optim.RAdam, {'lr': 0.01, 'betas': (0.9, 0.95), 'eps': 1e-3}, 800),
(optim.SGDW, {'lr': 0.001, 'momentum': 0.99}, 9000),
(optim.DiffGrad, {'lr': 0.5}, 500),
(optim.AdaMod, {'lr': 1.0}, 800),
(optim.AdaBound, {'lr': 1.0}, 800),
(optim.Yogi, {'lr': 1.0}, 500),
(optim.AccSGD, {'lr': 0.015}, 800),
(build_lookahead, {'lr': 1.0}, 500),
]
@pytest.mark.parametrize('case', cases, ids=ids)
@pytest.mark.parametrize('optimizer_config', optimizers, ids=ids)
def test_benchmark_function(case, optimizer_config):
func, initial_state, min_loc = case
optimizer_class, config, iterations = optimizer_config
x = torch.Tensor(initial_state).requires_grad_(True)
x_min = torch.Tensor(min_loc)
optimizer = optimizer_class([x], **config)
for _ in range(iterations):
optimizer.zero_grad()
f = func(x)
f.backward(retain_graph=True)
optimizer.step()
assert torch.allclose(x, x_min, atol=0.001)
name = optimizer.__class__.__name__
assert name in optimizer.__repr__()
| pytorch-optimizer-master | tests/test_basic.py |
import torch
def assert_dict_equal(a, b, precision=0.000001):
if isinstance(a, dict) and isinstance(b, dict):
assert set(a.keys()) == set(b.keys())
for k in a.keys():
assert_dict_equal(a[k], b[k], precision)
elif isinstance(a, list) and isinstance(b, list):
assert len(a) == len(b)
for v1, v2 in zip(a, b):
assert_dict_equal(v1, v2, precision)
elif isinstance(a, torch.Tensor) and isinstance(b, torch.Tensor):
assert torch.allclose(a, b, atol=precision)
assert a == b
| pytorch-optimizer-master | tests/conftest.py |
import torch
import pytest
import torch_optimizer as optim
def assert_sparse_not_supported(optimizer_class, err_msg=None):
param = torch.randn(1, 1).to_sparse().requires_grad_(True)
grad = torch.randn(1, 1).to_sparse()
param.grad = grad
optimizer = optimizer_class([param])
optimizer.zero_grad()
with pytest.raises(RuntimeError) as ctx:
optimizer.step()
msg = err_msg or 'does not support sparse gradients'
assert msg in str(ctx.value)
optimizers = [
optim.AdaBound,
optim.AdaMod,
optim.DiffGrad,
optim.Lamb,
optim.NovoGrad,
optim.RAdam,
optim.SGDW,
optim.Yogi,
]
@pytest.mark.parametrize('optimizer_class', optimizers)
def test_sparse_not_supported(optimizer_class):
assert_sparse_not_supported(optimizer_class)
@pytest.mark.parametrize('optimizer_class', optimizers)
def test_learning_rate(optimizer_class):
lr = -0.01
with pytest.raises(ValueError) as ctx:
optimizer_class(None, lr=-0.01)
msg = f'Invalid learning rate: {lr}'
assert msg in str(ctx.value)
eps_optimizers = [
optim.AdaBound,
optim.AdaMod,
optim.DiffGrad,
optim.Lamb,
optim.NovoGrad,
optim.RAdam,
# optim.SGDW,
optim.Yogi,
]
@pytest.mark.parametrize('optimizer_class', eps_optimizers)
def test_eps_validation(optimizer_class):
eps = -0.1
with pytest.raises(ValueError) as ctx:
optimizer_class(None, lr=0.1, eps=eps)
msg = f'Invalid epsilon value: {eps}'
assert msg in str(ctx.value)
weight_decay_optimizers = [
optim.AccSGD,
optim.AdaBound,
optim.AdaMod,
optim.DiffGrad,
optim.Lamb,
optim.RAdam,
optim.SGDW,
optim.Yogi,
]
@pytest.mark.parametrize('optimizer_class', optimizers)
def test_weight_decay_validation(optimizer_class):
weight_decay = -0.1
with pytest.raises(ValueError) as ctx:
optimizer_class(None, lr=0.1, weight_decay=weight_decay)
msg = f'Invalid weight_decay value: {weight_decay}'
assert msg in str(ctx.value)
betas_optimizers = [
optim.AdaBound,
optim.AdaMod,
optim.DiffGrad,
optim.Lamb,
optim.NovoGrad,
optim.RAdam,
optim.Yogi,
]
@pytest.mark.parametrize('optimizer_class', eps_optimizers)
def test_betas_validation(optimizer_class):
betas = (-1, 0.999)
with pytest.raises(ValueError) as ctx:
optimizer_class(None, lr=0.1, betas=(-1, 0.999))
msg = f'Invalid beta parameter at index 0: {betas[0]}'
assert msg in str(ctx.value)
betas = (0.9, -0.999)
with pytest.raises(ValueError) as ctx:
optimizer_class(None, lr=0.1, betas=betas)
msg = f'Invalid beta parameter at index 1: {betas[1]}'
assert msg in str(ctx.value)
| pytorch-optimizer-master | tests/test_param_validation.py |
import numpy as np
import pytest
import torch
import torch_optimizer as optim
from torch import nn
def make_dataset(seed=42):
rng = np.random.RandomState(seed)
N = 100
D = 2
X = rng.randn(N, D) * 2
# center the first N/2 points at (-2,-2)
mid = N // 2
X[: mid, :] = X[: mid, :] - 2 * np.ones((mid, D))
# center the last N/2 points at (2, 2)
X[mid:, :] = X[mid:, :] + 2 * np.ones((mid, D))
# labels: first N/2 are 0, last N/2 are 1
Y = np.array([0] * mid + [1] * mid).reshape(100, 1)
x = torch.Tensor(X)
y = torch.Tensor(Y)
return x, y
class LogisticRegression(nn.Module):
def __init__(self):
super(LogisticRegression, self).__init__()
self.linear1 = nn.Linear(2, 4)
self.linear2 = nn.Linear(4, 1)
def forward(self, x):
output = torch.relu(self.linear1(x))
output = self.linear2(output)
y_pred = torch.sigmoid(output)
return y_pred
def ids(v):
return f'{v[0].__name__} {v[1:]}'
def build_lookahead(*a, **kw):
base = optim.Yogi(*a, **kw)
return optim.Lookahead(base)
optimizers = [
(optim.NovoGrad, {'lr': 0.01, 'weight_decay': 1e-3}, 200),
(optim.Lamb, {'lr': 0.01, 'weight_decay': 1e-3}, 200),
(optim.SGDW, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.DiffGrad, {'lr': 0.5, 'weight_decay': 1e-3}, 200),
(optim.AdaMod, {'lr': 2.0, 'weight_decay': 1e-3}, 200),
(optim.AdaBound, {'lr': 1.1, 'weight_decay': 1e-3}, 200),
(optim.Yogi, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.RAdam, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.AccSGD, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(build_lookahead, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
]
@pytest.mark.parametrize('optimizer_config', optimizers, ids=ids)
def test_basic_nn_modeloptimizer_config(optimizer_config):
x_data, y_data = make_dataset()
model = LogisticRegression()
loss_fn = nn.BCELoss()
optimizer_class, config, iterations = optimizer_config
optimizer = optimizer_class(model.parameters(), **config)
init_loss = None
for _ in range(iterations):
y_pred = model(x_data)
loss = loss_fn(y_pred, y_data)
if init_loss is None:
init_loss = loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
assert init_loss.item() > 2.0 * loss.item()
| pytorch-optimizer-master | tests/test_optimizer_with_nn.py |
import torch
def assert_dict_equal(a, b, precision=0.000001):
if isinstance(a, dict) and isinstance(b, dict):
assert set(a.keys()) == set(b.keys())
for k in a.keys():
assert_dict_equal(a[k], b[k], precision)
elif isinstance(a, list) and isinstance(b, list):
assert len(a) == len(b)
for v1, v2 in zip(a, b):
assert_dict_equal(v1, v2, precision)
elif isinstance(a, torch.Tensor) and isinstance(b, torch.Tensor):
assert torch.allclose(a, b, atol=precision)
else:
assert a == b
return True
| pytorch-optimizer-master | tests/utils.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pytorch-optimizer'
copyright = '2020, Nikolai Novik'
author = 'Nikolai Novik'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# Sphinx extension modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Configuration for intersphinx: refer to the Python standard library and PyTorch
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
desc = 'collection of optimizers for PyTorch'
html_theme_options = {
'description': desc,
'github_user': 'jettify',
'github_repo': 'pytorch-optimizer',
'github_button': True,
'github_type': 'star',
'github_banner': True,
}
| pytorch-optimizer-master | docs/conf.py |
import math
import numpy as np
import torch_optimizer as optim
import torch
from hyperopt import fmin, tpe, hp
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
def rosenbrock(tensor):
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def rastrigin(tensor, lib=torch):
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
x, y = tensor
A = 10
f = (
A * 2
+ (x ** 2 - A * lib.cos(x * math.pi * 2))
+ (y ** 2 - A * lib.cos(y * math.pi * 2))
)
return f
def execute_steps(
func, initial_state, optimizer_class, optimizer_config, num_iter=500
):
x = torch.Tensor(initial_state).requires_grad_(True)
optimizer = optimizer_class([x], **optimizer_config)
steps = []
steps = np.zeros((2, num_iter + 1))
steps[:, 0] = np.array(initial_state)
for i in range(1, num_iter + 1):
optimizer.zero_grad()
f = func(x)
f.backward(retain_graph=True)
optimizer.step()
steps[:, i] = x.detach().numpy()
return steps
def objective_rastrigin(params):
lr = params['lr']
optimizer_class = params['optimizer_class']
initial_state = (-2.0, 3.5)
minimum = (0, 0)
optimizer_config = dict(lr=lr)
num_iter = 100
steps = execute_steps(
rastrigin, initial_state, optimizer_class, optimizer_config, num_iter
)
return (steps[0][-1] - minimum[0]) ** 2 + (steps[1][-1] - minimum[1]) ** 2
def objective_rosenbrok(params):
lr = params['lr']
optimizer_class = params['optimizer_class']
minimum = (1.0, 1.0)
initial_state = (-2.0, 2.0)
optimizer_config = dict(lr=lr)
num_iter = 100
steps = execute_steps(
rosenbrock, initial_state, optimizer_class, optimizer_config, num_iter
)
return (steps[0][-1] - minimum[0]) ** 2 + (steps[1][-1] - minimum[1]) ** 2
def plot_rastrigin(grad_iter, optimizer_name, lr):
x = np.linspace(-4.5, 4.5, 250)
y = np.linspace(-4.5, 4.5, 250)
minimum = (0, 0)
X, Y = np.meshgrid(x, y)
Z = rastrigin([X, Y], lib=np)
iter_x, iter_y = grad_iter[0, :], grad_iter[1, :]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.contour(X, Y, Z, 20, cmap='jet')
ax.plot(iter_x, iter_y, color='r', marker='x')
ax.set_title(
f'Rastrigin func: {optimizer_name} with '
f'{len(iter_x)} iterations, lr={lr:.6}'
)
plt.plot(*minimum, 'gD')
plt.plot(iter_x[-1], iter_y[-1], 'rD')
plt.savefig(f'rastrigin_{optimizer_name}.png')
def plot_rosenbrok(grad_iter, optimizer_name, lr):
x = np.linspace(-2, 2, 250)
y = np.linspace(-1, 3, 250)
minimum = (1.0, 1.0)
X, Y = np.meshgrid(x, y)
Z = rosenbrock([X, Y])
iter_x, iter_y = grad_iter[0, :], grad_iter[1, :]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.contour(X, Y, Z, 90, cmap='jet')
ax.plot(iter_x, iter_y, color='r', marker='x')
ax.set_title(
f'Rosenbrock func: {optimizer_name} with {len(iter_x)} '
f'iterations, lr={lr:.6}'
)
plt.plot(*minimum, 'gD')
plt.plot(iter_x[-1], iter_y[-1], 'rD')
plt.savefig(f'rosenbrock_{optimizer_name}.png')
def execute_experiments(
optimizers, objective, func, plot_func, initial_state, seed=1
):
seed = seed
for item in optimizers:
optimizer_class, lr_low, lr_hi = item
space = {
'optimizer_class': hp.choice('optimizer_class', [optimizer_class]),
'lr': hp.loguniform('lr', lr_low, lr_hi),
}
best = fmin(
fn=objective,
space=space,
algo=tpe.suggest,
max_evals=200,
rstate=np.random.RandomState(seed),
)
print(best['lr'], optimizer_class)
steps = execute_steps(
func,
initial_state,
optimizer_class,
{'lr': best['lr']},
num_iter=500,
)
plot_func(steps, optimizer_class.__name__, best['lr'])
if __name__ == '__main__':
# python examples/viz_optimizers.py
# Each optimizer has tweaked search space to produce better plots and
# help to converge on better lr faster.
optimizers = [
(optim.AccSGD, -8, -0.1),
(optim.AdaBound, -8, 0.7),
(optim.AdaMod, -8, 1.2),
(optim.DiffGrad, -8, 0.7),
(optim.Lamb, -8, 0.7),
(optim.NovoGrad, -6, -2.0),
(optim.RAdam, -8, 0.7),
(optim.SGDW, -8, -0.9),
(optim.Yogi, -8, 0.1),
]
execute_experiments(
optimizers, objective_rastrigin, rastrigin, plot_rastrigin, (-2.0, 3.5)
)
optimizers = [
(optim.AccSGD, -8, -0.1),
(optim.AdaBound, -8, 0.7),
(optim.AdaMod, -4, 1.0),
(optim.DiffGrad, -8, 0.2),
(optim.Lamb, -8, -0.5),
(optim.NovoGrad, -8, -1.0),
(optim.RAdam, -8, 0.7),
(optim.SGDW, -8, 0.7),
(optim.Yogi, -8, 0.1),
]
execute_experiments(
optimizers,
objective_rosenbrok,
rosenbrock,
plot_rosenbrok,
(-2.0, 2.0),
)
| pytorch-optimizer-master | examples/viz_optimizers.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_optimizer as optim
from torchvision import datasets, transforms, utils
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from dataclasses import dataclass
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(conf, model, device, train_loader, optimizer, epoch, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % conf.log_interval == 0:
loss = loss.item()
idx = batch_idx + epoch * (len(train_loader))
writer.add_scalar('Loss/train', loss, idx)
print(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss,
)
)
def test(conf, model, device, test_loader, epoch, writer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, reduction='sum').item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
fmt = '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
print(
fmt.format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
writer.add_scalar('Accuracy', correct, epoch)
writer.add_scalar('Loss/test', test_loss, epoch)
def prepare_loaders(conf, use_cuda=False):
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
'../data',
train=True,
download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
),
),
batch_size=conf.batch_size,
shuffle=True,
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
'../data',
train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
),
),
batch_size=conf.test_batch_size,
shuffle=True,
**kwargs,
)
return train_loader, test_loader
@dataclass
class Config:
batch_size: int = 64
test_batch_size: int = 1000
epochs: int = 15
lr: float = 0.01
gamma: float = 0.7
no_cuda: bool = True
seed: int = 42
log_interval: int = 10
def main():
conf = Config()
log_dir = 'runs/mnist_custom_optim'
print('Tensorboard: tensorboard --logdir={}'.format(log_dir))
with SummaryWriter(log_dir) as writer:
use_cuda = not conf.no_cuda and torch.cuda.is_available()
torch.manual_seed(conf.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
train_loader, test_loader = prepare_loaders(conf, use_cuda)
model = Net().to(device)
# create grid of images and write to tensorboard
images, labels = next(iter(train_loader))
img_grid = utils.make_grid(images)
writer.add_image('mnist_images', img_grid)
# visualize NN computation graph
writer.add_graph(model, images)
# custom optimizer from torch_optimizer package
# main_optimizer = optim.DiffGrad(model.parameters(), lr=conf.lr)
# optimizer = optim.lookahead.Lookahead(main_optimizer)
optimizer = optim.Novograd(model.parameters(), lr=conf.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=conf.gamma)
for epoch in range(1, conf.epochs + 1):
train(conf, model, device, train_loader, optimizer, epoch, writer)
test(conf, model, device, test_loader, epoch, writer)
scheduler.step()
for name, param in model.named_parameters():
writer.add_histogram(name, param, epoch)
writer.add_histogram(f'{name}.grad', param.grad, epoch)
if __name__ == '__main__':
main()
| pytorch-optimizer-master | examples/mnist.py |
import torch
from torch.optim.optimizer import Optimizer
from .types import OptFloat, OptLossClosure, Params, State
__all__ = ('SGDW',)
class SGDW(Optimizer):
r"""Implements SGDW algorithm.
It has been proposed in `Decoupled Weight Decay Regularization`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
momentum: momentum factor (default: 0)
weight_decay: weight decay (L2 penalty) (default: 0)
dampening: dampening for momentum (default: 0)
nesterov: enables Nesterov momentum (default: False)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.SGDW(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1711.05101
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
momentum: float = 0.0,
dampening: float = 0.0,
weight_decay: float = 0.0,
nesterov: bool = False,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if momentum < 0.0:
raise ValueError(f'Invalid momentum value: {momentum}')
if weight_decay < 0.0:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
'Nesterov momentum requires a momentum and zero dampening'
)
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state: State) -> None:
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure: OptLossClosure = None) -> OptFloat:
"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if p.grad.is_sparse:
msg = (
'SGDW does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p
).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# Apply momentum
p.data.add_(-group['lr'], d_p)
# Apply weight decay
if weight_decay != 0:
p.data.add_(-group['lr'], weight_decay)
return loss
| pytorch-optimizer-master | torch_optimizer/sgdw.py |
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('Lamb',)
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning:
Training BERT in 76 minutes`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
adam: always use trust ratio = 1, which turns this
into Adam. Useful for comparison purposes.
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.Lamb(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0,
adam: bool = False,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = (
'Lamb does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group[
'lr'
] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
| pytorch-optimizer-master | torch_optimizer/lamb.py |
from .accsgd import AccSGD
from .adabound import AdaBound
from .adamod import AdaMod
from .diffgrad import DiffGrad
from .lamb import Lamb
from .lookahead import Lookahead
from .novograd import NovoGrad
from .radam import RAdam
from .sgdw import SGDW
from .yogi import Yogi
__all__ = (
'AccSGD',
'AdaBound',
'AdaMod',
'DiffGrad',
'Lamb',
'Lookahead',
'NovoGrad',
'RAdam',
'SGDW',
'Yogi',
)
__version__ = '0.0.1a7'
| pytorch-optimizer-master | torch_optimizer/__init__.py |
from typing import Iterable, Union, Callable, Dict, Optional, Tuple, Any
from torch import Tensor
Params = Union[Iterable[Tensor], Iterable[dict]]
LossClosure = Callable[[], float]
OptLossClosure = Optional[LossClosure]
Betas2 = Tuple[float, float]
State = Dict[str, Any]
OptFloat = Optional[float]
| pytorch-optimizer-master | torch_optimizer/types.py |
import math
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('AdaMod',)
class AdaMod(Optimizer):
r"""Implements AccSGD algorithm.
It has been proposed in `Adaptive and Momental Bounds for Adaptive
Learning Rate Methods`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing running averages of gradient
and its square (default: (0.9, 0.999))
beta3: smoothing coefficient for adaptive learning rates
(default: 0.9999)
eps: term added to the denominator to improve numerical stability
(default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.AdaMod(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1910.12249
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
beta3: float = 0.999,
eps: float = 1e-8,
weight_decay: float = 0,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= beta3 < 1.0:
raise ValueError(f'Invalid beta3 parameter: {beta3}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr, betas=betas, beta3=beta3, eps=eps, weight_decay=weight_decay
)
super(AdaMod, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = 'AdaMod does not support sparse gradients'
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of actual learning rates
state['exp_avg_lr'] = torch.zeros_like(p)
exp_avg, exp_avg_sq, exp_avg_lr = (
state['exp_avg'],
state['exp_avg_sq'],
state['exp_avg_lr'],
)
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = (
group['lr']
* math.sqrt(bias_correction2)
/ bias_correction1
)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
# Applies momental bounds on actual learning rates
step_size = torch.full_like(denom, step_size)
step_size.div_(denom)
exp_avg_lr.mul_(group['beta3']).add_(
1 - group['beta3'], step_size
)
step_size = torch.min(step_size, exp_avg_lr)
step_size.mul_(exp_avg)
p.data.add_(-step_size)
return loss
| pytorch-optimizer-master | torch_optimizer/adamod.py |
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('NovoGrad',)
class NovoGrad(Optimizer):
r"""Implements Novograd optimization algorithm.
It has been proposed in `Stochastic Gradient Methods with Layer-wise
Adaptive Moments for Training of Deep Networks`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.95, 0))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging (default: False)
amsgrad: whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.Yogi(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> scheduler = StepLR(optimizer, step_size=1, gamma=0.7)
>>> optimizer.step()
>>> scheduler.step()
__ https://arxiv.org/abs/1905.11286
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.95, 0),
eps: float = 1e-8,
weight_decay: float = 0,
grad_averaging: bool = False,
amsgrad: bool = False,
):
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad,
)
super(NovoGrad, self).__init__(params, defaults)
def __setstate__(self, state: dict) -> None:
super(NovoGrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = (
'NovoGrad does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(
state['exp_avg'].device
)
if amsgrad:
# Maintains max of all exp. moving avg. of sq.
# grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(
state['exp_avg'].device
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg.
# till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(-group['lr'], exp_avg)
return loss
| pytorch-optimizer-master | torch_optimizer/novograd.py |
import math
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('DiffGrad',)
class DiffGrad(Optimizer):
r"""Implements DiffGrad algorithm.
It has been proposed in `DiffGrad: An Optimization Method for
Convolutional Neural Networks`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.DiffGrad(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1909.11015
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(DiffGrad, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = (
'DiffGrad does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Previous gradient
state['previous_grad'] = torch.zeros_like(p)
exp_avg, exp_avg_sq, previous_grad = (
state['exp_avg'],
state['exp_avg_sq'],
state['previous_grad'],
)
state['step'] += 1
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# compute diffgrad coefficient (dfc)
diff = torch.abs(previous_grad - grad)
dfc = torch.div(1.0, (1.0 + torch.exp(-diff)))
state['previous_grad'] = grad.clone()
# update momentum with dfc
exp_avg1 = exp_avg * dfc
step_size = (
group['lr']
* math.sqrt(bias_correction2)
/ bias_correction1
)
p.data.addcdiv_(-step_size, exp_avg1, denom)
return loss
| pytorch-optimizer-master | torch_optimizer/diffgrad.py |
import math
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('Yogi',)
class Yogi(Optimizer):
r"""Implements Yogi optimization algorithm.
It has been proposed in `Adaptive Methods for Nonconvex Optimization`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.Yogi(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization # noqa
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
eps: float = 1e-3,
weight_decay: float = 0,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Yogi, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Yogi does not support sparse gradients, '
'please consider SparseAdam instead'
)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
grad_squared = grad.mul(grad)
exp_avg_sq.addcmul_(
-(1 - beta2),
torch.sign(exp_avg_sq - grad_squared),
grad_squared,
)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(
group['eps']
)
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| pytorch-optimizer-master | torch_optimizer/yogi.py |
import math
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('RAdam',)
class RAdam(Optimizer):
r"""Implements RAdam optimization algorithm.
It has been proposed in `On the Variance of the Adaptive Learning
Rate and Beyond`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.RAdam(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1908.03265
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self._buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
lr = group['lr']
weight_decay = group['weight_decay']
beta1, beta2 = group['betas']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
msg = (
'RAdam does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self._buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (
1 - beta2_t
)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
lr
* math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
)
/ (1 - beta1 ** state['step'])
)
else:
step_size = lr / (1 - beta1 ** state['step'])
buffered[2] = step_size
if weight_decay != 0:
p_data_fp32.add_(-weight_decay * lr, p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(eps)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| pytorch-optimizer-master | torch_optimizer/radam.py |
import math
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptLossClosure, Params, State, OptFloat
__all__ = ('AdaBound',)
class AdaBound(Optimizer):
r"""Implements AdaBound algorithm.
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of
Learning Rate`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing running averages of gradient
and its square (default: (0.9, 0.999))
final_lr: final (SGD) learning rate (default: 0.1)
gamma: convergence speed of the bound functions
(default: 1e-3)
eps: term added to the denominator to improve numerical stability
(default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
amsbound: whether to use the AMSBound variant of this algorithm
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.AdaBound(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1902.09843
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
final_lr: float = 0.1,
gamma: float = 1e-3,
eps: float = 1e-8,
weight_decay: float = 0,
amsbound: bool = False,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= final_lr:
raise ValueError(f'Invalid final learning rate: {final_lr}')
if not 0.0 <= gamma < 1.0:
raise ValueError(f'Invalid gamma parameter: {gamma}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr,
betas=betas,
final_lr=final_lr,
gamma=gamma,
eps=eps,
weight_decay=weight_decay,
amsbound=amsbound,
)
super(AdaBound, self).__init__(params, defaults)
self.base_lrs = [group['lr'] for group in self.param_groups]
def __setstate__(self, state: State) -> None:
super(AdaBound, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = (
'AdaBound does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsbound:
# Maintains max of all exp. moving avg. of
# sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running
# avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = (
group['lr']
* math.sqrt(bias_correction2)
/ bias_correction1
)
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround
# to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (
1 - 1 / (group['gamma'] * state['step'] + 1)
)
upper_bound = final_lr * (
1 + 1 / (group['gamma'] * state['step'])
)
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(
exp_avg
)
p.data.add_(-step_size)
return loss
| pytorch-optimizer-master | torch_optimizer/adabound.py |
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
from .types import OptLossClosure, OptFloat, State
__all__ = ('Lookahead',)
class Lookahead(Optimizer):
r"""Implements Lookahead optimization algorithm.
It has been proposed in `Lookahead Optimizer: k steps forward, 1
step back`__
Arguments:
optimizer: base inner optimizer optimize
k: number of lookahead steps (default: 5)
alpha: linear interpolation factor. 1.0 recovers the inner optimizer.
(default: 5)
Example:
>>> import torch_optimizer as optim
>>> yogi = optim.Yogi(model.parameters(), lr=0.1)
>>> optimizer = optim.Lookahead(yogi, k=5, alpha=0.5)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1907.08610
"""
def __init__(
self, optimizer: Optimizer, k: int = 5, alpha: float = 0.5
) -> None:
if not 0.0 <= k:
raise ValueError(f'Invalid number of lookahead steps: {k}')
if not 0.0 <= alpha:
raise ValueError(f'Invalid linear interpolation factor: {alpha}')
self.optimizer = optimizer
self.k = k
self.alpha = alpha
self.param_groups = self.optimizer.param_groups
self.state = defaultdict(dict)
self.fast_state = self.optimizer.state
for group in self.param_groups:
group['counter'] = 0
def _update(self, group) -> None:
for fast in group['params']:
param_state = self.state[fast]
if 'slow_param' not in param_state:
param_state['slow_param'] = torch.clone(fast.data).detach()
slow = param_state['slow_param']
fast.data.mul_(self.alpha).add_(1.0 - self.alpha, slow)
slow.data.copy_(fast)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = self.optimizer.step(closure=closure)
for group in self.param_groups:
if group['counter'] == 0:
self._update(group)
group['counter'] = (group['counter'] + 1) % self.k
return loss
def state_dict(self) -> State:
r"""Returns the state of the optimizer as a :class:`dict`.
It contains two entries:
* state - a dict holding current optimization state. Its content
differs between optimizer classes.
* param_groups - a dict containing all parameter groups
"""
fast_state_dict = self.optimizer.state_dict()
slow_state = {
(id(k) if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()
}
fast_state = fast_state_dict['state']
param_groups = fast_state_dict['param_groups']
return {
'fast_state': fast_state,
'slow_state': slow_state,
'param_groups': param_groups,
}
def load_state_dict(self, state_dict: State) -> None:
r"""Loads the optimizer state.
Arguments:
state_dict: optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
slow_state_dict = {
'state': state_dict['slow_state'],
'param_groups': state_dict['param_groups'],
}
fast_state_dict = {
'state': state_dict['fast_state'],
'param_groups': state_dict['param_groups'],
}
super(Lookahead, self).load_state_dict(slow_state_dict)
self.optimizer.load_state_dict(fast_state_dict)
self.fast_state = self.optimizer.state
def zero_grad(self) -> None:
r"""Clears the gradients of all optimized :class:`torch.Tensor` s."""
self.optimizer.zero_grad()
def __repr__(self) -> str:
base_str = self.optimizer.__repr__()
format_string = self.__class__.__name__ + ' ('
format_string += '\n'
format_string += f'k: {self.k}\n'
format_string += f'alpha: {self.alpha}\n'
format_string += base_str
format_string += '\n'
format_string += ')'
return format_string
| pytorch-optimizer-master | torch_optimizer/lookahead.py |
import copy
from torch.optim.optimizer import Optimizer
from .types import OptLossClosure, Params, OptFloat
__all__ = ('AccSGD',)
class AccSGD(Optimizer):
r"""Implements AccSGD algorithm.
It has been proposed in `On the insufficiency of existing momentum
schemes for Stochastic Optimization`__ and `Accelerating Stochastic
Gradient Descent For Least Squares Regression`__
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
kappa: ratio of long to short step (default: 1000)
xi: statistical advantage parameter (default: 10)
small_const: any value <=1 (default: 0.7)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.AccSGD(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1704.08227
__ https://arxiv.org/abs/1803.05591
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
kappa: float = 1000.0,
xi: float = 10.0,
small_const: float = 0.7,
weight_decay: float = 0,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr,
kappa=kappa,
xi=xi,
small_const=small_const,
weight_decay=weight_decay,
)
super(AccSGD, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
large_lr = (group['lr'] * group['kappa']) / (group['small_const'])
alpha = 1.0 - (
(group['small_const'] * group['small_const'] * group['xi'])
/ group['kappa']
)
beta = 1.0 - alpha
zeta = group['small_const'] / (group['small_const'] + beta)
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
param_state['momentum_buffer'] = copy.deepcopy(p.data)
buf = param_state['momentum_buffer']
buf.mul_((1.0 / beta) - 1.0)
buf.add_(-large_lr, d_p)
buf.add_(p.data)
buf.mul_(beta)
p.data.add_(-group['lr'], d_p)
p.data.mul_(zeta)
p.data.add_(1.0 - zeta, buf)
return loss
| pytorch-optimizer-master | torch_optimizer/accsgd.py |
from pathlib import Path
from typing import List
from datetime import timedelta
from dalle2_pytorch.trainer import DecoderTrainer
from dalle2_pytorch.dataloaders import create_image_embedding_dataloader
from dalle2_pytorch.trackers import Tracker
from dalle2_pytorch.train_configs import DecoderConfig, TrainDecoderConfig
from dalle2_pytorch.utils import Timer, print_ribbon
from dalle2_pytorch.dalle2_pytorch import Decoder, resize_image_to
from clip import tokenize
import torchvision
import torch
from torch import nn
from torchmetrics.image.fid import FrechetInceptionDistance
from torchmetrics.image.inception import InceptionScore
from torchmetrics.image.kid import KernelInceptionDistance
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from accelerate import Accelerator, DistributedDataParallelKwargs, InitProcessGroupKwargs
from accelerate.utils import dataclasses as accelerate_dataclasses
import webdataset as wds
import click
# constants
TRAIN_CALC_LOSS_EVERY_ITERS = 10
VALID_CALC_LOSS_EVERY_ITERS = 10
# helpers functions
def exists(val):
return val is not None
# main functions
def create_dataloaders(
available_shards,
webdataset_base_url,
img_embeddings_url=None,
text_embeddings_url=None,
shard_width=6,
num_workers=4,
batch_size=32,
n_sample_images=6,
shuffle_train=True,
resample_train=False,
img_preproc = None,
index_width=4,
train_prop = 0.75,
val_prop = 0.15,
test_prop = 0.10,
seed = 0,
**kwargs
):
"""
Randomly splits the available shards into train, val, and test sets and returns a dataloader for each
"""
assert train_prop + test_prop + val_prop == 1
num_train = round(train_prop*len(available_shards))
num_test = round(test_prop*len(available_shards))
num_val = len(available_shards) - num_train - num_test
assert num_train + num_test + num_val == len(available_shards), f"{num_train} + {num_test} + {num_val} = {num_train + num_test + num_val} != {len(available_shards)}"
train_split, test_split, val_split = torch.utils.data.random_split(available_shards, [num_train, num_test, num_val], generator=torch.Generator().manual_seed(seed))
# The shard number in the webdataset file names has a fixed width. We zero pad the shard numbers so they correspond to a filename.
train_urls = [webdataset_base_url.format(str(shard).zfill(shard_width)) for shard in train_split]
test_urls = [webdataset_base_url.format(str(shard).zfill(shard_width)) for shard in test_split]
val_urls = [webdataset_base_url.format(str(shard).zfill(shard_width)) for shard in val_split]
create_dataloader = lambda tar_urls, shuffle=False, resample=False, for_sampling=False: create_image_embedding_dataloader(
tar_url=tar_urls,
num_workers=num_workers,
batch_size=batch_size if not for_sampling else n_sample_images,
img_embeddings_url=img_embeddings_url,
text_embeddings_url=text_embeddings_url,
index_width=index_width,
shuffle_num = None,
extra_keys= ["txt"],
shuffle_shards = shuffle,
resample_shards = resample,
img_preproc=img_preproc,
handler=wds.handlers.warn_and_continue
)
train_dataloader = create_dataloader(train_urls, shuffle=shuffle_train, resample=resample_train)
train_sampling_dataloader = create_dataloader(train_urls, shuffle=False, for_sampling=True)
val_dataloader = create_dataloader(val_urls, shuffle=False)
test_dataloader = create_dataloader(test_urls, shuffle=False)
test_sampling_dataloader = create_dataloader(test_urls, shuffle=False, for_sampling=True)
return {
"train": train_dataloader,
"train_sampling": train_sampling_dataloader,
"val": val_dataloader,
"test": test_dataloader,
"test_sampling": test_sampling_dataloader
}
def get_dataset_keys(dataloader):
"""
It is sometimes neccesary to get the keys the dataloader is returning. Since the dataset is burried in the dataloader, we need to do a process to recover it.
"""
# If the dataloader is actually a WebLoader, we need to extract the real dataloader
if isinstance(dataloader, wds.WebLoader):
dataloader = dataloader.pipeline[0]
return dataloader.dataset.key_map
def get_example_data(dataloader, device, n=5):
"""
Samples the dataloader and returns a zipped list of examples
"""
images = []
img_embeddings = []
text_embeddings = []
captions = []
for img, emb, txt in dataloader:
img_emb, text_emb = emb.get('img'), emb.get('text')
if img_emb is not None:
img_emb = img_emb.to(device=device, dtype=torch.float)
img_embeddings.extend(list(img_emb))
else:
# Then we add None img.shape[0] times
img_embeddings.extend([None]*img.shape[0])
if text_emb is not None:
text_emb = text_emb.to(device=device, dtype=torch.float)
text_embeddings.extend(list(text_emb))
else:
# Then we add None img.shape[0] times
text_embeddings.extend([None]*img.shape[0])
img = img.to(device=device, dtype=torch.float)
images.extend(list(img))
captions.extend(list(txt))
if len(images) >= n:
break
return list(zip(images[:n], img_embeddings[:n], text_embeddings[:n], captions[:n]))
def generate_samples(trainer, example_data, clip=None, start_unet=1, end_unet=None, condition_on_text_encodings=False, cond_scale=1.0, device=None, text_prepend="", match_image_size=True):
"""
Takes example data and generates images from the embeddings
Returns three lists: real images, generated images, and captions
"""
real_images, img_embeddings, text_embeddings, txts = zip(*example_data)
sample_params = {}
if img_embeddings[0] is None:
# Generate image embeddings from clip
imgs_tensor = torch.stack(real_images)
assert clip is not None, "clip is None, but img_embeddings is None"
imgs_tensor.to(device=device)
img_embeddings, img_encoding = clip.embed_image(imgs_tensor)
sample_params["image_embed"] = img_embeddings
else:
# Then we are using precomputed image embeddings
img_embeddings = torch.stack(img_embeddings)
sample_params["image_embed"] = img_embeddings
if condition_on_text_encodings:
if text_embeddings[0] is None:
# Generate text embeddings from text
assert clip is not None, "clip is None, but text_embeddings is None"
tokenized_texts = tokenize(txts, truncate=True).to(device=device)
text_embed, text_encodings = clip.embed_text(tokenized_texts)
sample_params["text_encodings"] = text_encodings
else:
# Then we are using precomputed text embeddings
text_embeddings = torch.stack(text_embeddings)
sample_params["text_encodings"] = text_embeddings
sample_params["start_at_unet_number"] = start_unet
sample_params["stop_at_unet_number"] = end_unet
if start_unet > 1:
# If we are only training upsamplers
sample_params["image"] = torch.stack(real_images)
if device is not None:
sample_params["_device"] = device
samples = trainer.sample(**sample_params, _cast_deepspeed_precision=False) # At sampling time we don't want to cast to FP16
generated_images = list(samples)
captions = [text_prepend + txt for txt in txts]
if match_image_size:
generated_image_size = generated_images[0].shape[-1]
real_images = [resize_image_to(image, generated_image_size, clamp_range=(0, 1)) for image in real_images]
return real_images, generated_images, captions
def generate_grid_samples(trainer, examples, clip=None, start_unet=1, end_unet=None, condition_on_text_encodings=False, cond_scale=1.0, device=None, text_prepend=""):
"""
Generates samples and uses torchvision to put them in a side by side grid for easy viewing
"""
real_images, generated_images, captions = generate_samples(trainer, examples, clip, start_unet, end_unet, condition_on_text_encodings, cond_scale, device, text_prepend)
grid_images = [torchvision.utils.make_grid([original_image, generated_image]) for original_image, generated_image in zip(real_images, generated_images)]
return grid_images, captions
def evaluate_trainer(trainer, dataloader, device, start_unet, end_unet, clip=None, condition_on_text_encodings=False, cond_scale=1.0, inference_device=None, n_evaluation_samples=1000, FID=None, IS=None, KID=None, LPIPS=None):
"""
Computes evaluation metrics for the decoder
"""
metrics = {}
# Prepare the data
examples = get_example_data(dataloader, device, n_evaluation_samples)
if len(examples) == 0:
print("No data to evaluate. Check that your dataloader has shards.")
return metrics
real_images, generated_images, captions = generate_samples(trainer, examples, clip, start_unet, end_unet, condition_on_text_encodings, cond_scale, inference_device)
real_images = torch.stack(real_images).to(device=device, dtype=torch.float)
generated_images = torch.stack(generated_images).to(device=device, dtype=torch.float)
# Convert from [0, 1] to [0, 255] and from torch.float to torch.uint8
int_real_images = real_images.mul(255).add(0.5).clamp(0, 255).type(torch.uint8)
int_generated_images = generated_images.mul(255).add(0.5).clamp(0, 255).type(torch.uint8)
def null_sync(t, *args, **kwargs):
return [t]
if exists(FID):
fid = FrechetInceptionDistance(**FID, dist_sync_fn=null_sync)
fid.to(device=device)
fid.update(int_real_images, real=True)
fid.update(int_generated_images, real=False)
metrics["FID"] = fid.compute().item()
if exists(IS):
inception = InceptionScore(**IS, dist_sync_fn=null_sync)
inception.to(device=device)
inception.update(int_real_images)
is_mean, is_std = inception.compute()
metrics["IS_mean"] = is_mean.item()
metrics["IS_std"] = is_std.item()
if exists(KID):
kernel_inception = KernelInceptionDistance(**KID, dist_sync_fn=null_sync)
kernel_inception.to(device=device)
kernel_inception.update(int_real_images, real=True)
kernel_inception.update(int_generated_images, real=False)
kid_mean, kid_std = kernel_inception.compute()
metrics["KID_mean"] = kid_mean.item()
metrics["KID_std"] = kid_std.item()
if exists(LPIPS):
# Convert from [0, 1] to [-1, 1]
renorm_real_images = real_images.mul(2).sub(1).clamp(-1,1)
renorm_generated_images = generated_images.mul(2).sub(1).clamp(-1,1)
lpips = LearnedPerceptualImagePatchSimilarity(**LPIPS, dist_sync_fn=null_sync)
lpips.to(device=device)
lpips.update(renorm_real_images, renorm_generated_images)
metrics["LPIPS"] = lpips.compute().item()
if trainer.accelerator.num_processes > 1:
# Then we should sync the metrics
metrics_order = sorted(metrics.keys())
metrics_tensor = torch.zeros(1, len(metrics), device=device, dtype=torch.float)
for i, metric_name in enumerate(metrics_order):
metrics_tensor[0, i] = metrics[metric_name]
metrics_tensor = trainer.accelerator.gather(metrics_tensor)
metrics_tensor = metrics_tensor.mean(dim=0)
for i, metric_name in enumerate(metrics_order):
metrics[metric_name] = metrics_tensor[i].item()
return metrics
def save_trainer(tracker: Tracker, trainer: DecoderTrainer, epoch: int, sample: int, next_task: str, validation_losses: List[float], samples_seen: int, is_latest=True, is_best=False):
"""
Logs the model with an appropriate method depending on the tracker
"""
tracker.save(trainer, is_best=is_best, is_latest=is_latest, epoch=epoch, sample=sample, next_task=next_task, validation_losses=validation_losses, samples_seen=samples_seen)
def recall_trainer(tracker: Tracker, trainer: DecoderTrainer):
"""
Loads the model with an appropriate method depending on the tracker
"""
trainer.accelerator.print(print_ribbon(f"Loading model from {type(tracker.loader).__name__}"))
state_dict = tracker.recall()
trainer.load_state_dict(state_dict, only_model=False, strict=True)
return state_dict.get("epoch", 0), state_dict.get("validation_losses", []), state_dict.get("next_task", "train"), state_dict.get("sample", 0), state_dict.get("samples_seen", 0)
def train(
dataloaders,
decoder: Decoder,
accelerator: Accelerator,
tracker: Tracker,
inference_device,
clip=None,
evaluate_config=None,
epoch_samples = None, # If the training dataset is resampling, we have to manually stop an epoch
validation_samples = None,
save_immediately=False,
epochs = 20,
n_sample_images = 5,
save_every_n_samples = 100000,
unet_training_mask=None,
condition_on_text_encodings=False,
cond_scale=1.0,
**kwargs
):
"""
Trains a decoder on a dataset.
"""
is_master = accelerator.process_index == 0
if not exists(unet_training_mask):
# Then the unet mask should be true for all unets in the decoder
unet_training_mask = [True] * len(decoder.unets)
assert len(unet_training_mask) == len(decoder.unets), f"The unet training mask should be the same length as the number of unets in the decoder. Got {len(unet_training_mask)} and {trainer.num_unets}"
trainable_unet_numbers = [i+1 for i, trainable in enumerate(unet_training_mask) if trainable]
first_trainable_unet = trainable_unet_numbers[0]
last_trainable_unet = trainable_unet_numbers[-1]
def move_unets(unet_training_mask):
for i in range(len(decoder.unets)):
if not unet_training_mask[i]:
# Replace the unet from the module list with a nn.Identity(). This training script never uses unets that aren't being trained so this is fine.
decoder.unets[i] = nn.Identity().to(inference_device)
# Remove non-trainable unets
move_unets(unet_training_mask)
trainer = DecoderTrainer(
decoder=decoder,
accelerator=accelerator,
dataloaders=dataloaders,
**kwargs
)
# Set up starting model and parameters based on a recalled state dict
start_epoch = 0
validation_losses = []
next_task = 'train'
sample = 0
samples_seen = 0
val_sample = 0
step = lambda: int(trainer.num_steps_taken(unet_number=first_trainable_unet))
if tracker.can_recall:
start_epoch, validation_losses, next_task, recalled_sample, samples_seen = recall_trainer(tracker, trainer)
if next_task == 'train':
sample = recalled_sample
if next_task == 'val':
val_sample = recalled_sample
accelerator.print(f"Loaded model from {type(tracker.loader).__name__} on epoch {start_epoch} having seen {samples_seen} samples with minimum validation loss {min(validation_losses) if len(validation_losses) > 0 else 'N/A'}")
accelerator.print(f"Starting training from task {next_task} at sample {sample} and validation sample {val_sample}")
trainer.to(device=inference_device)
accelerator.print(print_ribbon("Generating Example Data", repeat=40))
accelerator.print("This can take a while to load the shard lists...")
if is_master:
train_example_data = get_example_data(dataloaders["train_sampling"], inference_device, n_sample_images)
accelerator.print("Generated training examples")
test_example_data = get_example_data(dataloaders["test_sampling"], inference_device, n_sample_images)
accelerator.print("Generated testing examples")
send_to_device = lambda arr: [x.to(device=inference_device, dtype=torch.float) for x in arr]
sample_length_tensor = torch.zeros(1, dtype=torch.int, device=inference_device)
unet_losses_tensor = torch.zeros(TRAIN_CALC_LOSS_EVERY_ITERS, trainer.num_unets, dtype=torch.float, device=inference_device)
for epoch in range(start_epoch, epochs):
accelerator.print(print_ribbon(f"Starting epoch {epoch}", repeat=40))
timer = Timer()
last_sample = sample
last_snapshot = sample
if next_task == 'train':
for i, (img, emb, txt) in enumerate(dataloaders["train"]):
# We want to count the total number of samples across all processes
sample_length_tensor[0] = len(img)
all_samples = accelerator.gather(sample_length_tensor) # TODO: accelerator.reduce is broken when this was written. If it is fixed replace this.
total_samples = all_samples.sum().item()
sample += total_samples
samples_seen += total_samples
img_emb = emb.get('img')
has_img_embedding = img_emb is not None
if has_img_embedding:
img_emb, = send_to_device((img_emb,))
text_emb = emb.get('text')
has_text_embedding = text_emb is not None
if has_text_embedding:
text_emb, = send_to_device((text_emb,))
img, = send_to_device((img,))
trainer.train()
for unet in range(1, trainer.num_unets+1):
# Check if this is a unet we are training
if not unet_training_mask[unet-1]: # Unet index is the unet number - 1
continue
forward_params = {}
if has_img_embedding:
forward_params['image_embed'] = img_emb
else:
# Forward pass automatically generates embedding
assert clip is not None
img_embed, img_encoding = clip.embed_image(img)
forward_params['image_embed'] = img_embed
if condition_on_text_encodings:
if has_text_embedding:
forward_params['text_encodings'] = text_emb
else:
# Then we need to pass the text instead
assert clip is not None
tokenized_texts = tokenize(txt, truncate=True).to(inference_device)
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
text_embed, text_encodings = clip.embed_text(tokenized_texts)
forward_params['text_encodings'] = text_encodings
loss = trainer.forward(img, **forward_params, unet_number=unet, _device=inference_device)
trainer.update(unet_number=unet)
unet_losses_tensor[i % TRAIN_CALC_LOSS_EVERY_ITERS, unet-1] = loss
samples_per_sec = (sample - last_sample) / timer.elapsed()
timer.reset()
last_sample = sample
if i % TRAIN_CALC_LOSS_EVERY_ITERS == 0:
# We want to average losses across all processes
unet_all_losses = accelerator.gather(unet_losses_tensor)
mask = unet_all_losses != 0
unet_average_loss = (unet_all_losses * mask).sum(dim=0) / mask.sum(dim=0)
loss_map = { f"Unet {index} Training Loss": loss.item() for index, loss in enumerate(unet_average_loss) if unet_training_mask[index] }
# gather decay rate on each UNet
ema_decay_list = {f"Unet {index} EMA Decay": ema_unet.get_current_decay() for index, ema_unet in enumerate(trainer.ema_unets) if unet_training_mask[index]}
log_data = {
"Epoch": epoch,
"Sample": sample,
"Step": i,
"Samples per second": samples_per_sec,
"Samples Seen": samples_seen,
**ema_decay_list,
**loss_map
}
if is_master:
tracker.log(log_data, step=step())
if is_master and (last_snapshot + save_every_n_samples < sample or (save_immediately and i == 0)): # This will miss by some amount every time, but it's not a big deal... I hope
# It is difficult to gather this kind of info on the accelerator, so we have to do it on the master
print("Saving snapshot")
last_snapshot = sample
# We need to know where the model should be saved
save_trainer(tracker, trainer, epoch, sample, next_task, validation_losses, samples_seen)
if exists(n_sample_images) and n_sample_images > 0:
trainer.eval()
train_images, train_captions = generate_grid_samples(trainer, train_example_data, clip, first_trainable_unet, last_trainable_unet, condition_on_text_encodings, cond_scale, inference_device, "Train: ")
tracker.log_images(train_images, captions=train_captions, image_section="Train Samples", step=step())
if epoch_samples is not None and sample >= epoch_samples:
break
next_task = 'val'
sample = 0
all_average_val_losses = None
if next_task == 'val':
trainer.eval()
accelerator.print(print_ribbon(f"Starting Validation {epoch}", repeat=40))
last_val_sample = val_sample
val_sample_length_tensor = torch.zeros(1, dtype=torch.int, device=inference_device)
average_val_loss_tensor = torch.zeros(1, trainer.num_unets, dtype=torch.float, device=inference_device)
timer = Timer()
accelerator.wait_for_everyone()
i = 0
for i, (img, emb, txt) in enumerate(dataloaders['val']): # Use the accelerate prepared loader
val_sample_length_tensor[0] = len(img)
all_samples = accelerator.gather(val_sample_length_tensor)
total_samples = all_samples.sum().item()
val_sample += total_samples
img_emb = emb.get('img')
has_img_embedding = img_emb is not None
if has_img_embedding:
img_emb, = send_to_device((img_emb,))
text_emb = emb.get('text')
has_text_embedding = text_emb is not None
if has_text_embedding:
text_emb, = send_to_device((text_emb,))
img, = send_to_device((img,))
for unet in range(1, len(decoder.unets)+1):
if not unet_training_mask[unet-1]: # Unet index is the unet number - 1
# No need to evaluate an unchanging unet
continue
forward_params = {}
if has_img_embedding:
forward_params['image_embed'] = img_emb.float()
else:
# Forward pass automatically generates embedding
assert clip is not None
img_embed, img_encoding = clip.embed_image(img)
forward_params['image_embed'] = img_embed
if condition_on_text_encodings:
if has_text_embedding:
forward_params['text_encodings'] = text_emb.float()
else:
# Then we need to pass the text instead
assert clip is not None
tokenized_texts = tokenize(txt, truncate=True).to(device=inference_device)
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
text_embed, text_encodings = clip.embed_text(tokenized_texts)
forward_params['text_encodings'] = text_encodings
loss = trainer.forward(img.float(), **forward_params, unet_number=unet, _device=inference_device)
average_val_loss_tensor[0, unet-1] += loss
if i % VALID_CALC_LOSS_EVERY_ITERS == 0:
samples_per_sec = (val_sample - last_val_sample) / timer.elapsed()
timer.reset()
last_val_sample = val_sample
accelerator.print(f"Epoch {epoch}/{epochs} Val Step {i} - Sample {val_sample} - {samples_per_sec:.2f} samples/sec")
accelerator.print(f"Loss: {(average_val_loss_tensor / (i+1))}")
accelerator.print("")
if validation_samples is not None and val_sample >= validation_samples:
break
print(f"Rank {accelerator.state.process_index} finished validation after {i} steps")
accelerator.wait_for_everyone()
average_val_loss_tensor /= i+1
# Gather all the average loss tensors
all_average_val_losses = accelerator.gather(average_val_loss_tensor)
if is_master:
unet_average_val_loss = all_average_val_losses.mean(dim=0)
val_loss_map = { f"Unet {index} Validation Loss": loss.item() for index, loss in enumerate(unet_average_val_loss) if loss != 0 }
tracker.log(val_loss_map, step=step())
next_task = 'eval'
if next_task == 'eval':
if exists(evaluate_config):
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.dict(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
if is_master:
tracker.log(evaluation, step=step())
next_task = 'sample'
val_sample = 0
if next_task == 'sample':
if is_master:
# Generate examples and save the model if we are the master
# Generate sample images
print(print_ribbon(f"Sampling Set {epoch}", repeat=40))
test_images, test_captions = generate_grid_samples(trainer, test_example_data, clip, first_trainable_unet, last_trainable_unet, condition_on_text_encodings, cond_scale, inference_device, "Test: ")
train_images, train_captions = generate_grid_samples(trainer, train_example_data, clip, first_trainable_unet, last_trainable_unet, condition_on_text_encodings, cond_scale, inference_device, "Train: ")
tracker.log_images(test_images, captions=test_captions, image_section="Test Samples", step=step())
tracker.log_images(train_images, captions=train_captions, image_section="Train Samples", step=step())
print(print_ribbon(f"Starting Saving {epoch}", repeat=40))
is_best = False
if all_average_val_losses is not None:
average_loss = all_average_val_losses.mean(dim=0).sum() / sum(unet_training_mask)
if len(validation_losses) == 0 or average_loss < min(validation_losses):
is_best = True
validation_losses.append(average_loss)
save_trainer(tracker, trainer, epoch, sample, next_task, validation_losses, samples_seen, is_best=is_best)
next_task = 'train'
def create_tracker(accelerator: Accelerator, config: TrainDecoderConfig, config_path: str, dummy: bool = False) -> Tracker:
tracker_config = config.tracker
accelerator_config = {
"Distributed": accelerator.distributed_type != accelerate_dataclasses.DistributedType.NO,
"DistributedType": accelerator.distributed_type,
"NumProcesses": accelerator.num_processes,
"MixedPrecision": accelerator.mixed_precision
}
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
tracker.save_config(config_path, config_name='decoder_config.json')
tracker.add_save_metadata(state_dict_key='config', metadata=config.dict())
return tracker
def initialize_training(config: TrainDecoderConfig, config_path):
# Make sure if we are not loading, distributed models are initialized to the same values
torch.manual_seed(config.seed)
# Set up accelerator for configurable distributed training
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=config.train.find_unused_parameters, static_graph=config.train.static_graph)
init_kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=60*60))
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs, init_kwargs])
if accelerator.num_processes > 1:
# We are using distributed training and want to immediately ensure all can connect
accelerator.print("Waiting for all processes to connect...")
accelerator.wait_for_everyone()
accelerator.print("All processes online and connected")
# If we are in deepspeed fp16 mode, we must ensure learned variance is off
if accelerator.mixed_precision == "fp16" and accelerator.distributed_type == accelerate_dataclasses.DistributedType.DEEPSPEED and config.decoder.learned_variance:
raise ValueError("DeepSpeed fp16 mode does not support learned variance")
# Set up data
all_shards = list(range(config.data.start_shard, config.data.end_shard + 1))
world_size = accelerator.num_processes
rank = accelerator.process_index
shards_per_process = len(all_shards) // world_size
assert shards_per_process > 0, "Not enough shards to split evenly"
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]
dataloaders = create_dataloaders (
available_shards=my_shards,
img_preproc = config.data.img_preproc,
train_prop = config.data.splits.train,
val_prop = config.data.splits.val,
test_prop = config.data.splits.test,
n_sample_images=config.train.n_sample_images,
**config.data.dict(),
rank = rank,
seed = config.seed,
)
# If clip is in the model, we need to remove it for compatibility with deepspeed
clip = None
if config.decoder.clip is not None:
clip = config.decoder.clip.create() # Of course we keep it to use it during training, just not in the decoder as that causes issues
config.decoder.clip = None
# Create the decoder model and print basic info
decoder = config.decoder.create()
get_num_parameters = lambda model, only_training=False: sum(p.numel() for p in model.parameters() if (p.requires_grad or not only_training))
# Create and initialize the tracker if we are the master
tracker = create_tracker(accelerator, config, config_path, dummy = rank!=0)
has_img_embeddings = config.data.img_embeddings_url is not None
has_text_embeddings = config.data.text_embeddings_url is not None
conditioning_on_text = any([unet.cond_on_text_encodings for unet in config.decoder.unets])
has_clip_model = clip is not None
data_source_string = ""
if has_img_embeddings:
data_source_string += "precomputed image embeddings"
elif has_clip_model:
data_source_string += "clip image embeddings generation"
else:
raise ValueError("No image embeddings source specified")
if conditioning_on_text:
if has_text_embeddings:
data_source_string += " and precomputed text embeddings"
elif has_clip_model:
data_source_string += " and clip text encoding generation"
else:
raise ValueError("No text embeddings source specified")
accelerator.print(print_ribbon("Loaded Config", repeat=40))
accelerator.print(f"Running training with {accelerator.num_processes} processes and {accelerator.distributed_type} distributed training")
accelerator.print(f"Training using {data_source_string}. {'conditioned on text' if conditioning_on_text else 'not conditioned on text'}")
accelerator.print(f"Number of parameters: {get_num_parameters(decoder)} total; {get_num_parameters(decoder, only_training=True)} training")
for i, unet in enumerate(decoder.unets):
accelerator.print(f"Unet {i} has {get_num_parameters(unet)} total; {get_num_parameters(unet, only_training=True)} training")
train(dataloaders, decoder, accelerator,
clip=clip,
tracker=tracker,
inference_device=accelerator.device,
evaluate_config=config.evaluate,
condition_on_text_encodings=conditioning_on_text,
**config.train.dict(),
)
# Create a simple click command line interface to load the config and start the training
@click.command()
@click.option("--config_file", default="./train_decoder_config.json", help="Path to config file")
def main(config_file):
config_file_path = Path(config_file)
config = TrainDecoderConfig.from_json_path(str(config_file_path))
initialize_training(config, config_path=config_file_path)
if __name__ == "__main__":
main()
| DALLE2-pytorch-main | train_decoder.py |
import click
import torch
from torch import nn
from typing import List
from accelerate import Accelerator
from accelerate.utils import set_seed
from torch.utils.data import DataLoader
from embedding_reader import EmbeddingReader
from accelerate.utils import dataclasses as accelerate_dataclasses
from dalle2_pytorch.utils import Timer
from dalle2_pytorch.trackers import Tracker
from dalle2_pytorch import DiffusionPriorTrainer
from dalle2_pytorch.dataloaders import get_reader, make_splits
from dalle2_pytorch.train_configs import (
DiffusionPriorConfig,
DiffusionPriorTrainConfig,
TrainDiffusionPriorConfig,
)
# helpers
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
def exists(val):
return val is not None
def all_between(values: list, lower_bound, upper_bound):
for value in values:
if value < lower_bound or value > upper_bound:
return False
return True
def make_model(
prior_config: DiffusionPriorConfig,
train_config: DiffusionPriorTrainConfig,
device: str = None,
accelerator: Accelerator = None,
):
# create model from config
diffusion_prior = prior_config.create()
# instantiate the trainer
trainer = DiffusionPriorTrainer(
diffusion_prior=diffusion_prior,
lr=train_config.lr,
wd=train_config.wd,
max_grad_norm=train_config.max_grad_norm,
amp=train_config.amp,
use_ema=train_config.use_ema,
device=device,
accelerator=accelerator,
warmup_steps=train_config.warmup_steps,
)
return trainer
def create_tracker(
accelerator: Accelerator,
config: TrainDiffusionPriorConfig,
config_path: str,
dummy: bool = False,
) -> Tracker:
tracker_config = config.tracker
accelerator_config = {
"Distributed": accelerator.distributed_type
!= accelerate_dataclasses.DistributedType.NO,
"DistributedType": accelerator.distributed_type,
"NumProcesses": accelerator.num_processes,
"MixedPrecision": accelerator.mixed_precision,
}
tracker: Tracker = tracker_config.create(
config, accelerator_config, dummy_mode=dummy
)
tracker.save_config(config_path, config_name="prior_config.json")
return tracker
def pad_gather_reduce(trainer: DiffusionPriorTrainer, x, method="mean"):
"""
pad a value or tensor across all processes and gather
params:
- trainer: a trainer that carries an accelerator object
- x: a number or torch tensor to reduce
- method: "mean", "sum", "max", "min"
return:
- the average tensor after maskin out 0's
- None if the gather resulted in an empty tensor
"""
assert method in [
"mean",
"sum",
"max",
"min",
], "This function has limited capabilities [sum, mean, max, min]"
assert type(x) is not None, "Cannot reduce a None type object"
# wait for everyone to arrive here before gathering
if type(x) is not torch.Tensor:
x = torch.tensor([x])
# verify that the tensor is on the proper device
x = x.to(trainer.device)
# pad across processes
padded_x = trainer.accelerator.pad_across_processes(x, dim=0)
# gather across all procesess
gathered_x = trainer.accelerator.gather(padded_x)
# mask out zeros
masked_x = gathered_x[gathered_x != 0]
# if the tensor is empty, warn and return None
if len(masked_x) == 0:
click.secho(
f"The call to this method resulted in an empty tensor after masking out zeros. The gathered tensor was this: {gathered_x} and the original value passed was: {x}.",
fg="red",
)
return None
if method == "mean":
return torch.mean(masked_x)
elif method == "sum":
return torch.sum(masked_x)
elif method == "max":
return torch.max(masked_x)
elif method == "min":
return torch.min(masked_x)
def save_trainer(
tracker: Tracker,
trainer: DiffusionPriorTrainer,
is_latest: bool,
is_best: bool,
epoch: int,
samples_seen: int,
best_validation_loss: float,
):
"""
Logs the model with an appropriate method depending on the tracker
"""
trainer.accelerator.wait_for_everyone()
if trainer.accelerator.is_main_process:
click.secho(
f"RANK:{trainer.accelerator.process_index} | Saving Model | Best={is_best} | Latest={is_latest}",
fg="magenta",
)
tracker.save(
trainer=trainer,
is_best=is_best,
is_latest=is_latest,
epoch=int(epoch),
samples_seen=int(samples_seen),
best_validation_loss=best_validation_loss,
)
def recall_trainer(tracker: Tracker, trainer: DiffusionPriorTrainer):
"""
Loads the model with an appropriate method depending on the tracker
"""
if trainer.accelerator.is_main_process:
click.secho(f"Loading model from {type(tracker.loader).__name__}", fg="yellow")
state_dict = tracker.recall()
trainer.load(state_dict, strict=True)
return (
int(state_dict.get("epoch", 0)),
state_dict.get("best_validation_loss", 0),
int(state_dict.get("samples_seen", 0)),
)
# eval functions
def report_validation_loss(
trainer: DiffusionPriorTrainer,
dataloader: DataLoader,
text_conditioned: bool,
use_ema: bool,
tracker: Tracker,
split: str,
tracker_folder: str,
loss_type: str,
):
"""
Compute the validation loss on a given subset of data.
"""
if trainer.accelerator.is_main_process:
click.secho(
f"Measuring performance on {use_ema}-{split} split",
fg="green",
blink=True,
)
total_loss = torch.zeros(1, dtype=torch.float, device=trainer.device)
for image_embeddings, text_data in dataloader:
image_embeddings = image_embeddings.to(trainer.device)
text_data = text_data.to(trainer.device)
input_args = dict(image_embed=image_embeddings)
if text_conditioned:
input_args = dict(**input_args, text=text_data)
else:
input_args = dict(**input_args, text_embed=text_data)
if use_ema:
loss = trainer.ema_diffusion_prior(**input_args)
else:
loss = trainer(**input_args)
total_loss += loss
# compute the average loss across all processes
avg_loss = pad_gather_reduce(trainer, total_loss, method="mean")
stats = {f"{tracker_folder}/{loss_type}-loss": avg_loss}
# print and log results on main process
tracker.log(stats, step=trainer.step.item() + 1)
return avg_loss
def report_cosine_sims(
trainer: DiffusionPriorTrainer,
dataloader: DataLoader,
text_conditioned: bool,
tracker: Tracker,
split: str,
timesteps: int,
tracker_folder: str,
):
trainer.eval()
if trainer.accelerator.is_main_process:
click.secho(
f"Measuring Cosine-Similarity on {split} split with {timesteps} timesteps",
fg="green",
blink=True,
)
for test_image_embeddings, text_data in dataloader:
test_image_embeddings = test_image_embeddings.to(trainer.device)
text_data = text_data.to(trainer.device)
# we are text conditioned, we produce an embedding from the tokenized text
if text_conditioned:
text_embedding, text_encodings = trainer.embed_text(text_data)
text_cond = dict(text_embed=text_embedding, text_encodings=text_encodings)
else:
text_embedding = text_data
text_cond = dict(text_embed=text_embedding)
# make a copy of the text embeddings for shuffling
text_embed_shuffled = text_embedding.clone()
# roll the text to simulate "unrelated" captions
rolled_idx = torch.roll(torch.arange(text_embedding.shape[0]), 1)
text_embed_shuffled = text_embed_shuffled[rolled_idx]
text_embed_shuffled = text_embed_shuffled / text_embed_shuffled.norm(
dim=1, keepdim=True
)
if text_conditioned:
text_encodings_shuffled = text_encodings[rolled_idx]
else:
text_encodings_shuffled = None
text_cond_shuffled = dict(
text_embed=text_embed_shuffled, text_encodings=text_encodings_shuffled
)
# prepare the text embedding
text_embed = text_embedding / text_embedding.norm(dim=1, keepdim=True)
# prepare image embeddings
test_image_embeddings = test_image_embeddings / test_image_embeddings.norm(
dim=1, keepdim=True
)
# predict on the unshuffled text embeddings
predicted_image_embeddings = trainer.p_sample_loop(
test_image_embeddings.shape,
text_cond,
timesteps=timesteps,
)
predicted_image_embeddings = (
predicted_image_embeddings
/ predicted_image_embeddings.norm(dim=1, keepdim=True)
)
# predict on the shuffled embeddings
predicted_unrelated_embeddings = trainer.p_sample_loop(
test_image_embeddings.shape,
text_cond_shuffled,
timesteps=timesteps,
)
predicted_unrelated_embeddings = (
predicted_unrelated_embeddings
/ predicted_unrelated_embeddings.norm(dim=1, keepdim=True)
)
# calculate similarities
orig_sim = pad_gather_reduce(
trainer, cos(text_embed, test_image_embeddings), method="mean"
)
pred_sim = pad_gather_reduce(
trainer, cos(text_embed, predicted_image_embeddings), method="mean"
)
unrel_sim = pad_gather_reduce(
trainer, cos(text_embed, predicted_unrelated_embeddings), method="mean"
)
pred_img_sim = pad_gather_reduce(
trainer,
cos(test_image_embeddings, predicted_image_embeddings),
method="mean",
)
stats = {
f"{tracker_folder}/baseline similarity [steps={timesteps}]": orig_sim,
f"{tracker_folder}/similarity with text [steps={timesteps}]": pred_sim,
f"{tracker_folder}/similarity with original image [steps={timesteps}]": pred_img_sim,
f"{tracker_folder}/similarity with unrelated caption [steps={timesteps}]": unrel_sim,
f"{tracker_folder}/difference from baseline similarity [steps={timesteps}]": pred_sim
- orig_sim,
}
tracker.log(stats, step=trainer.step.item() + 1)
def eval_model(
trainer: DiffusionPriorTrainer,
dataloader: DataLoader,
text_conditioned: bool,
split: str,
tracker: Tracker,
use_ema: bool,
report_cosine: bool,
report_loss: bool,
timesteps: List[int],
loss_type: str = None,
):
"""
Run evaluation on a model and track metrics
returns: loss if requested
"""
trainer.eval()
use_ema = "ema" if use_ema else "online"
tracker_folder = f"metrics/{use_ema}-{split}"
# detemine if valid timesteps are passed
min_timesteps = trainer.accelerator.unwrap_model(
trainer.diffusion_prior
).sample_timesteps
max_timesteps = trainer.accelerator.unwrap_model(
trainer.diffusion_prior
).noise_scheduler.num_timesteps
assert all_between(
timesteps, lower_bound=min_timesteps, upper_bound=max_timesteps
), f"all timesteps values must be between {min_timesteps} and {max_timesteps}: got {timesteps}"
# measure cosine metrics across various eta and timesteps
if report_cosine:
for timestep in timesteps:
report_cosine_sims(
trainer,
dataloader=dataloader,
text_conditioned=text_conditioned,
tracker=tracker,
split=split,
timesteps=timestep,
tracker_folder=tracker_folder,
)
# measure loss on a seperate split of data
if report_loss:
loss = report_validation_loss(
trainer=trainer,
dataloader=dataloader,
text_conditioned=text_conditioned,
use_ema=use_ema,
tracker=tracker,
split=split,
tracker_folder=tracker_folder,
loss_type=loss_type,
)
return loss
# training script
def train(
trainer: DiffusionPriorTrainer,
tracker: Tracker,
train_loader: DataLoader,
eval_loader: DataLoader,
test_loader: DataLoader,
config: DiffusionPriorTrainConfig,
):
# init timers
save_timer = Timer() # when to save
samples_timer = Timer() # samples/sec
validation_profiler = Timer() # how long is validation taking
validation_countdown = Timer() # when to perform evalutation
# keep track of best validation loss
best_validation_loss = config.train.best_validation_loss
samples_seen = config.train.num_samples_seen
# do training
start_epoch = config.train.current_epoch
for epoch in range(start_epoch, config.train.epochs):
# if we finished out an old epoch, reset the distribution to be a full epoch
tracker.log({"tracking/epoch": epoch}, step=trainer.step.item())
if train_loader.dataset.get_start() > 0 and epoch == start_epoch+1:
if trainer.accelerator.is_main_process:
click.secho(f"Finished resumed epoch...resetting dataloader.")
train_loader.dataset.set_start(0)
for img, txt in train_loader:
# setup things every step
trainer.train()
current_step = trainer.step.item()
samples_timer.reset()
# place data on device
img = img.to(trainer.device)
txt = txt.to(trainer.device)
# pass to model
loss = trainer(text=txt, image_embed=img)
# perform backprop & apply EMA updates
trainer.update()
# gather info about training step
all_loss = pad_gather_reduce(trainer, loss, method="mean")
num_samples = pad_gather_reduce(trainer, len(txt), method="sum")
samples_per_sec = num_samples / samples_timer.elapsed()
samples_seen += num_samples
ema_decay = trainer.ema_diffusion_prior.get_current_decay()
# log
tracker.log(
{
"tracking/samples-sec": samples_per_sec,
"tracking/samples-seen": samples_seen,
"tracking/ema-decay": ema_decay,
f"tracking/training-{config.prior.loss_type}": all_loss,
},
step=current_step,
)
# Metric Tracking @ Timed Intervals
eval_delta = pad_gather_reduce(
trainer, validation_countdown.elapsed(), method="min"
)
if eval_delta != None and eval_delta > config.data.eval_every_seconds:
# begin timing how long this takes
validation_profiler.reset()
# package kwargs for evaluation
eval_kwargs = {
"trainer": trainer,
"tracker": tracker,
"text_conditioned": config.prior.condition_on_text_encodings,
"timesteps": config.train.eval_timesteps,
}
# ONLINE MODEL : COSINE : LOSS : VALIDATION SPLIT
eval_model(
dataloader=eval_loader,
loss_type=config.prior.loss_type,
split="validation",
use_ema=False,
report_cosine=False,
report_loss=True,
**eval_kwargs,
)
# EMA MODEL : COSINE : LOSS : VALIDATION DATA
ema_val_loss = eval_model(
dataloader=eval_loader,
loss_type=config.prior.loss_type,
split="validation",
use_ema=True,
report_cosine=True,
report_loss=True,
**eval_kwargs,
)
tracker.log(
{
"tracking/validation length (minutes)": validation_profiler.elapsed()
/ 60
}
)
# check if the ema validation is the lowest seen yet
if ema_val_loss < best_validation_loss:
best_validation_loss = ema_val_loss
# go save the model as best
save_trainer(
trainer=trainer,
tracker=tracker,
is_best=True,
is_latest=False,
samples_seen=samples_seen,
epoch=epoch,
best_validation_loss=best_validation_loss,
)
# reset timer for validaiton
validation_countdown.reset()
elif eval_delta is None:
click.secho(
f"Error occured reading the eval time on rank: {trainer.device}",
fg="yellow",
)
# save as latest model on schedule
save_delta = pad_gather_reduce(trainer, save_timer.elapsed(), method="min")
if save_delta != None and save_delta >= config.train.save_every_seconds:
save_trainer(
trainer=trainer,
tracker=tracker,
is_best=False,
is_latest=True,
samples_seen=samples_seen,
epoch=epoch,
best_validation_loss=best_validation_loss,
)
save_timer.reset()
elif save_delta is None:
click.secho(
f"Error occured reading the save time on rank: {trainer.device}",
fg="yellow",
)
# evaluate on test data
if trainer.accelerator.is_main_process:
click.secho(f"Starting Test", fg="red")
# save one last time as latest before beginning validation
save_trainer(
tracker=tracker,
trainer=trainer,
is_best=False,
is_latest=True,
samples_seen=samples_seen,
epoch=epoch,
best_validation_loss=best_validation_loss,
)
test_loss = eval_model(
trainer=trainer,
dataloader=test_loader,
text_conditioned=config.prior.condition_on_text_encodings,
split="test",
tracker=tracker,
use_ema=True,
report_cosine=False,
report_loss=True,
timesteps=config.train.eval_timesteps,
loss_type=config.prior.loss_type,
)
if test_loss < best_validation_loss:
best_validation_loss = test_loss
# go save the model as best
save_trainer(
trainer=trainer,
tracker=tracker,
is_best=True,
is_latest=False,
samples_seen=samples_seen,
epoch=epoch,
best_validation_loss=test_loss,
)
def initialize_training(config_file, accelerator):
"""
Parse the configuration file, and prepare everything necessary for training
"""
# load the configuration file
if accelerator.is_main_process:
click.secho(f"Loading configuration from {config_file}", fg="green")
config = TrainDiffusionPriorConfig.from_json_path(config_file)
# seed
set_seed(config.train.random_seed)
# get a device
device = accelerator.device
# make the trainer (will automatically distribute if possible & configured)
trainer: DiffusionPriorTrainer = make_model(
config.prior, config.train, device, accelerator
).to(device)
# create a tracker
tracker = create_tracker(
accelerator, config, config_file, dummy=accelerator.process_index != 0
)
# reload from chcekpoint
if tracker.can_recall:
current_epoch, best_validation_loss, samples_seen = recall_trainer(
tracker=tracker, trainer=trainer
)
# display best values
if trainer.accelerator.is_main_process:
click.secho(f"Current Epoch: {current_epoch} | Best Val Loss: {best_validation_loss} | Samples Seen: {samples_seen}", fg="yellow")
# update config to reflect recalled values
config.train.num_samples_seen = samples_seen
config.train.current_epoch = current_epoch
config.train.best_validation_loss = best_validation_loss
# fetch and prepare data
if trainer.accelerator.is_main_process:
click.secho("Grabbing data...", fg="blue", blink=True)
trainer.accelerator.wait_for_everyone()
img_reader = get_reader(
text_conditioned=trainer.text_conditioned,
img_url=config.data.image_url,
meta_url=config.data.meta_url,
)
# calculate start point within epoch
trainer.accelerator.wait_for_everyone()
train_loader, eval_loader, test_loader = make_splits(
text_conditioned=trainer.text_conditioned,
batch_size=config.data.batch_size,
num_data_points=config.data.num_data_points,
train_split=config.data.splits.train,
eval_split=config.data.splits.val,
image_reader=img_reader,
rank=accelerator.state.process_index,
world_size=accelerator.state.num_processes,
start=0,
)
# update the start point to finish out the epoch on a resumed run
if tracker.can_recall:
samples_seen = config.train.num_samples_seen
length = (
config.data.num_data_points
if samples_seen <= img_reader.count
else img_reader.count
)
scaled_samples = length * config.train.current_epoch
start_point = (
scaled_samples - samples_seen if scaled_samples > samples_seen else samples_seen
)
if trainer.accelerator.is_main_process:
click.secho(f"Resuming at sample: {start_point}", fg="yellow")
train_loader.dataset.set_start(start_point)
# start training
if trainer.accelerator.is_main_process:
click.secho(
f"Beginning Prior Training : Distributed={accelerator.state.distributed_type != accelerate_dataclasses.DistributedType.NO}",
fg="yellow",
)
train(
trainer=trainer,
tracker=tracker,
train_loader=train_loader,
eval_loader=eval_loader,
test_loader=test_loader,
config=config,
)
@click.command()
@click.option("--config_file", default="configs/train_prior_config.example.json")
def main(config_file):
# start HFA
accelerator = Accelerator()
# setup training
initialize_training(config_file, accelerator)
if __name__ == "__main__":
main()
| DALLE2-pytorch-main | train_diffusion_prior.py |
from setuptools import setup, find_packages
exec(open('dalle2_pytorch/version.py').read())
setup(
name = 'dalle2-pytorch',
packages = find_packages(exclude=[]),
include_package_data = True,
entry_points={
'console_scripts': [
'dalle2_pytorch = dalle2_pytorch.cli:main',
'dream = dalle2_pytorch.cli:dream'
],
},
version = __version__,
license='MIT',
description = 'DALL-E 2',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/dalle2-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'text to image'
],
install_requires=[
'accelerate',
'click',
'open-clip-torch>=2.0.0,<3.0.0',
'clip-anytorch>=2.5.2',
'coca-pytorch>=0.0.5',
'ema-pytorch>=0.0.7',
'einops>=0.6.1',
'embedding-reader',
'kornia>=0.5.4',
'numpy',
'packaging',
'pillow',
'pydantic>=2',
'pytorch-warmup',
'resize-right>=0.0.2',
'rotary-embedding-torch',
'torch>=1.10',
'torchvision',
'tqdm',
'vector-quantize-pytorch',
'x-clip>=0.4.4',
'webdataset>=0.2.5',
'fsspec>=2022.1.0',
'torchmetrics[image]>=0.8.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| DALLE2-pytorch-main | setup.py |
import math
import random
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager
from collections import namedtuple
from pathlib import Path
import torch
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from torch import nn, einsum
import torchvision.transforms as T
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange
from kornia.filters import gaussian_blur2d
import kornia.augmentation as K
from dalle2_pytorch.tokenizer import tokenizer
from dalle2_pytorch.vqgan_vae import NullVQGanVAE, VQGanVAE
from resize_right import resize
# rotary embeddings
from rotary_embedding_torch import RotaryEmbedding
# use x-clip
from x_clip import CLIP
from coca_pytorch import CoCa
# constants
NAT = 1. / math.log(2.)
UnetOutput = namedtuple('UnetOutput', ['pred', 'var_interp_frac_unnormalized'])
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def first(arr, d = None):
if len(arr) == 0:
return d
return arr[0]
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = None, validate = True):
if isinstance(val, list):
val = tuple(val)
out = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length) and validate:
assert len(out) == length
return out
def module_device(module):
if isinstance(module, nn.Identity):
return 'cpu' # It doesn't matter
return next(module.parameters()).device
def zero_init_(m):
nn.init.zeros_(m.weight)
if exists(m.bias):
nn.init.zeros_(m.bias)
@contextmanager
def null_context(*args, **kwargs):
yield
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def is_float_dtype(dtype):
return any([dtype == float_dtype for float_dtype in (torch.float64, torch.float32, torch.float16, torch.bfloat16)])
def is_list_str(x):
if not isinstance(x, (list, tuple)):
return False
return all([type(el) == str for el in x])
def pad_tuple_to_length(t, length, fillvalue = None):
remain_length = length - len(t)
if remain_length <= 0:
return t
return (*t, *((fillvalue,) * remain_length))
# checkpointing helper function
def make_checkpointable(fn, **kwargs):
if isinstance(fn, nn.ModuleList):
return [maybe(make_checkpointable)(el, **kwargs) for el in fn]
condition = kwargs.pop('condition', None)
if exists(condition) and not condition(fn):
return fn
@wraps(fn)
def inner(*args):
input_needs_grad = any([isinstance(el, torch.Tensor) and el.requires_grad for el in args])
if not input_needs_grad:
return fn(*args)
return checkpoint(fn, *args)
return inner
# for controlling freezing of CLIP
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
def freeze_model_and_make_eval_(model):
model.eval()
freeze_all_layers_(model)
# tensor helpers
def log(t, eps = 1e-12):
return torch.log(t.clamp(min = eps))
def l2norm(t):
return F.normalize(t, dim = -1)
def resize_image_to(
image,
target_image_size,
clamp_range = None,
nearest = False,
**kwargs
):
orig_image_size = image.shape[-1]
if orig_image_size == target_image_size:
return image
if not nearest:
scale_factors = target_image_size / orig_image_size
out = resize(image, scale_factors = scale_factors, **kwargs)
else:
out = F.interpolate(image, target_image_size, mode = 'nearest')
if exists(clamp_range):
out = out.clamp(*clamp_range)
return out
# image normalization functions
# ddpms expect images to be in the range of -1 to 1
# but CLIP may otherwise
def normalize_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_zero_to_one(normed_img):
return (normed_img + 1) * 0.5
# clip related adapters
EmbeddedText = namedtuple('EmbedTextReturn', ['text_embed', 'text_encodings'])
EmbeddedImage = namedtuple('EmbedImageReturn', ['image_embed', 'image_encodings'])
class BaseClipAdapter(nn.Module):
def __init__(self, clip, **kwargs):
super().__init__()
self.clip = clip
self.overrides = kwargs
def validate_and_resize_image(self, image):
image_size = image.shape[-1]
assert image_size >= self.image_size, f'you are passing in an image of size {image_size} but CLIP requires the image size to be at least {self.image_size}'
return resize_image_to(image, self.image_size)
@property
def dim_latent(self):
raise NotImplementedError
@property
def image_size(self):
raise NotImplementedError
@property
def image_channels(self):
raise NotImplementedError
@property
def max_text_len(self):
raise NotImplementedError
def embed_text(self, text):
raise NotImplementedError
def embed_image(self, image):
raise NotImplementedError
class XClipAdapter(BaseClipAdapter):
@property
def dim_latent(self):
return self.clip.dim_latent
@property
def image_size(self):
return self.clip.image_size
@property
def image_channels(self):
return self.clip.image_channels
@property
def max_text_len(self):
return self.clip.text_seq_len
@torch.no_grad()
def embed_text(self, text):
text = text[..., :self.max_text_len]
text_mask = text != 0
encoder_output = self.clip.text_transformer(text)
encoder_output_is_cls = encoder_output.ndim == 3
text_cls, text_encodings = (encoder_output[:, 0], encoder_output[:, 1:]) if encoder_output_is_cls else (encoder_output, None)
text_embed = self.clip.to_text_latent(text_cls)
if exists(text_encodings):
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
return EmbeddedText(l2norm(text_embed), text_encodings)
@torch.no_grad()
def embed_image(self, image):
image = self.validate_and_resize_image(image)
encoder_output = self.clip.visual_transformer(image)
image_cls, image_encodings = encoder_output[:, 0], encoder_output[:, 1:]
image_embed = self.clip.to_visual_latent(image_cls)
return EmbeddedImage(l2norm(image_embed), image_encodings)
class CoCaAdapter(BaseClipAdapter):
@property
def dim_latent(self):
return self.clip.dim
@property
def image_size(self):
assert 'image_size' in self.overrides
return self.overrides['image_size']
@property
def image_channels(self):
assert 'image_channels' in self.overrides
return self.overrides['image_channels']
@property
def max_text_len(self):
assert 'max_text_len' in self.overrides
return self.overrides['max_text_len']
@torch.no_grad()
def embed_text(self, text):
text = text[..., :self.max_text_len]
text_mask = text != 0
text_embed, text_encodings = self.clip.embed_text(text)
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
return EmbeddedText(text_embed, text_encodings)
@torch.no_grad()
def embed_image(self, image):
image = self.validate_and_resize_image(image)
image_embed, image_encodings = self.clip.embed_image(image)
return EmbeddedImage(image_embed, image_encodings)
class OpenAIClipAdapter(BaseClipAdapter):
def __init__(
self,
name = 'ViT-B/32'
):
import clip
openai_clip, preprocess = clip.load(name)
super().__init__(openai_clip)
self.eos_id = 49407 # for handling 0 being also '!'
text_attention_final = self.find_layer('ln_final')
self.dim_latent_ = text_attention_final.weight.shape[0]
self.handle = text_attention_final.register_forward_hook(self._hook)
self.clip_normalize = preprocess.transforms[-1]
self.cleared = False
def find_layer(self, layer):
modules = dict([*self.clip.named_modules()])
return modules.get(layer, None)
def clear(self):
if self.cleared:
return
self.handle()
def _hook(self, _, inputs, outputs):
self.text_encodings = outputs
@property
def dim_latent(self):
return self.dim_latent_
@property
def image_size(self):
return self.clip.visual.input_resolution
@property
def image_channels(self):
return 3
@property
def max_text_len(self):
return self.clip.context_length
@torch.no_grad()
def embed_text(self, text):
text = text[..., :self.max_text_len]
is_eos_id = (text == self.eos_id)
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
text_mask = text_mask & (text != 0)
assert not self.cleared
text_embed = self.clip.encode_text(text)
text_encodings = self.text_encodings
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
del self.text_encodings
return EmbeddedText(l2norm(text_embed.float()), text_encodings.float())
@torch.no_grad()
def embed_image(self, image):
assert not self.cleared
image = self.validate_and_resize_image(image)
image = self.clip_normalize(image)
image_embed = self.clip.encode_image(image)
return EmbeddedImage(l2norm(image_embed.float()), None)
class OpenClipAdapter(BaseClipAdapter):
def __init__(
self,
name = 'ViT-B/32',
pretrained = 'laion400m_e32'
):
import open_clip
clip, _, preprocess = open_clip.create_model_and_transforms(name, pretrained = pretrained)
super().__init__(clip)
self.eos_id = 49407
text_attention_final = self.find_layer('ln_final')
self._dim_latent = text_attention_final.weight.shape[0]
self.handle = text_attention_final.register_forward_hook(self._hook)
self.clip_normalize = preprocess.transforms[-1]
self.cleared = False
def find_layer(self, layer):
modules = dict([*self.clip.named_modules()])
return modules.get(layer, None)
def clear(self):
if self.cleared:
return
self.handle()
def _hook(self, _, inputs, outputs):
self.text_encodings = outputs
@property
def dim_latent(self):
return self._dim_latent
@property
def image_size(self):
image_size = self.clip.visual.image_size
if isinstance(image_size, tuple):
return max(image_size)
return image_size
@property
def image_channels(self):
return 3
@property
def max_text_len(self):
return self.clip.context_length
@torch.no_grad()
def embed_text(self, text):
text = text[..., :self.max_text_len]
is_eos_id = (text == self.eos_id)
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
text_mask = text_mask & (text != 0)
assert not self.cleared
text_embed = self.clip.encode_text(text)
text_encodings = self.text_encodings
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
del self.text_encodings
return EmbeddedText(l2norm(text_embed.float()), text_encodings.float())
@torch.no_grad()
def embed_image(self, image):
assert not self.cleared
image = self.validate_and_resize_image(image)
image = self.clip_normalize(image)
image_embed = self.clip.encode_image(image)
return EmbeddedImage(l2norm(image_embed.float()), None)
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# gaussian diffusion helper functions
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def meanflat(x):
return x.mean(dim = tuple(range(1, len(x.shape))))
def normal_kl(mean1, logvar1, mean2, logvar2):
return 0.5 * (-1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * torch.exp(-logvar2))
def approx_standard_normal_cdf(x):
return 0.5 * (1.0 + torch.tanh(((2.0 / math.pi) ** 0.5) * (x + 0.044715 * (x ** 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales, thres = 0.999):
assert x.shape == means.shape == log_scales.shape
# attempting to correct nan gradients when learned variance is turned on
# in the setting of deepspeed fp16
eps = 1e-12 if x.dtype == torch.float32 else 1e-3
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = log(cdf_plus, eps = eps)
log_one_minus_cdf_min = log(1. - cdf_min, eps = eps)
cdf_delta = cdf_plus - cdf_min
log_probs = torch.where(x < -thres,
log_cdf_plus,
torch.where(x > thres,
log_one_minus_cdf_min,
log(cdf_delta, eps = eps)))
return log_probs
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / first(alphas_cumprod)
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
def linear_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def quadratic_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype = torch.float64) ** 2
def sigmoid_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
betas = torch.linspace(-6, 6, timesteps, dtype = torch.float64)
return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
class NoiseScheduler(nn.Module):
def __init__(self, *, beta_schedule, timesteps, loss_type, p2_loss_weight_gamma = 0., p2_loss_weight_k = 1):
super().__init__()
if beta_schedule == "cosine":
betas = cosine_beta_schedule(timesteps)
elif beta_schedule == "linear":
betas = linear_beta_schedule(timesteps)
elif beta_schedule == "quadratic":
betas = quadratic_beta_schedule(timesteps)
elif beta_schedule == "jsd":
betas = 1.0 / torch.linspace(timesteps, 1, timesteps)
elif beta_schedule == "sigmoid":
betas = sigmoid_beta_schedule(timesteps)
else:
raise NotImplementedError()
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis = 0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
if loss_type == 'l1':
loss_fn = F.l1_loss
elif loss_type == 'l2':
loss_fn = F.mse_loss
elif loss_type == 'huber':
loss_fn = F.smooth_l1_loss
else:
raise NotImplementedError()
self.loss_type = loss_type
self.loss_fn = loss_fn
# register buffer helper function to cast double back to float
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# p2 loss reweighting
self.has_p2_loss_reweighting = p2_loss_weight_gamma > 0.
register_buffer('p2_loss_weight', (p2_loss_weight_k + alphas_cumprod / (1 - alphas_cumprod)) ** -p2_loss_weight_gamma)
def sample_random_times(self, batch):
return torch.randint(0, self.num_timesteps, (batch,), device = self.betas.device, dtype = torch.long)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def q_sample(self, x_start, t, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def calculate_v(self, x_start, t, noise = None):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def q_sample_from_to(self, x_from, from_t, to_t, noise = None):
shape = x_from.shape
noise = default(noise, lambda: torch.randn_like(x_from))
alpha = extract(self.sqrt_alphas_cumprod, from_t, shape)
sigma = extract(self.sqrt_one_minus_alphas_cumprod, from_t, shape)
alpha_next = extract(self.sqrt_alphas_cumprod, to_t, shape)
sigma_next = extract(self.sqrt_one_minus_alphas_cumprod, to_t, shape)
return x_from * (alpha_next / alpha) + noise * (sigma_next * alpha - sigma * alpha_next) / alpha
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def p2_reweigh_loss(self, loss, times):
if not self.has_p2_loss_reweighting:
return loss
return loss * extract(self.p2_loss_weight, times, loss.shape)
# rearrange image to sequence
class RearrangeToSequence(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
x = rearrange(x, 'b c ... -> b ... c')
x, ps = pack([x], 'b * c')
x = self.fn(x)
x, = unpack(x, ps, 'b * c')
x = rearrange(x, 'b ... c -> b c ...')
return x
# diffusion prior
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5, fp16_eps = 1e-3, stable = False):
super().__init__()
self.eps = eps
self.fp16_eps = fp16_eps
self.stable = stable
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
eps = self.eps if x.dtype == torch.float32 else self.fp16_eps
if self.stable:
x = x / x.amax(dim = -1, keepdim = True).detach()
var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = -1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class ChanLayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5, fp16_eps = 1e-3, stable = False):
super().__init__()
self.eps = eps
self.fp16_eps = fp16_eps
self.stable = stable
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = self.eps if x.dtype == torch.float32 else self.fp16_eps
if self.stable:
x = x / x.amax(dim = 1, keepdim = True).detach()
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
# mlp
class MLP(nn.Module):
def __init__(
self,
dim_in,
dim_out,
*,
expansion_factor = 2.,
depth = 2,
norm = False,
):
super().__init__()
hidden_dim = int(expansion_factor * dim_out)
norm_fn = lambda: nn.LayerNorm(hidden_dim) if norm else nn.Identity()
layers = [nn.Sequential(
nn.Linear(dim_in, hidden_dim),
nn.SiLU(),
norm_fn()
)]
for _ in range(depth - 1):
layers.append(nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.SiLU(),
norm_fn()
))
layers.append(nn.Linear(hidden_dim, dim_out))
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x.float())
# relative positional bias for causal transformer
class RelPosBias(nn.Module):
def __init__(
self,
heads = 8,
num_buckets = 32,
max_distance = 128,
):
super().__init__()
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(
relative_position,
num_buckets = 32,
max_distance = 128
):
n = -relative_position
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
return torch.where(is_small, n, val_if_large)
def forward(self, i, j, *, device):
q_pos = torch.arange(i, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
return rearrange(values, 'i j h -> h i j')
# feedforward
class SwiGLU(nn.Module):
""" used successfully in https://arxiv.org/abs/2204.0231 """
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.silu(gate)
def FeedForward(
dim,
mult = 4,
dropout = 0.,
post_activation_norm = False
):
""" post-activation norm https://arxiv.org/abs/2110.09456 """
inner_dim = int(mult * dim)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
SwiGLU(),
LayerNorm(inner_dim) if post_activation_norm else nn.Identity(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
dropout = 0.,
causal = False,
rotary_emb = None,
cosine_sim = True,
cosine_sim_scale = 16
):
super().__init__()
self.scale = cosine_sim_scale if cosine_sim else (dim_head ** -0.5)
self.cosine_sim = cosine_sim
self.heads = heads
inner_dim = dim_head * heads
self.causal = causal
self.norm = LayerNorm(dim)
self.dropout = nn.Dropout(dropout)
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.rotary_emb = rotary_emb
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, mask = None, attn_bias = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
q = q * self.scale
# rotary embeddings
if exists(self.rotary_emb):
q, k = map(self.rotary_emb.rotate_queries_or_keys, (q, k))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b 1 d', b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# whether to use cosine sim
if self.cosine_sim:
q, k = map(l2norm, (q, k))
q, k = map(lambda t: t * math.sqrt(self.scale), (q, k))
# calculate query / key similarities
sim = einsum('b h i d, b j d -> b h i j', q, k)
# relative positional encoding (T5 style)
if exists(attn_bias):
sim = sim + attn_bias
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.type(sim.dtype)
attn = self.dropout(attn)
# aggregate values
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class CausalTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
norm_in = False,
norm_out = True,
attn_dropout = 0.,
ff_dropout = 0.,
final_proj = True,
normformer = False,
rotary_emb = True
):
super().__init__()
self.init_norm = LayerNorm(dim) if norm_in else nn.Identity() # from latest BLOOM model and Yandex's YaLM
self.rel_pos_bias = RelPosBias(heads = heads)
rotary_emb = RotaryEmbedding(dim = min(32, dim_head)) if rotary_emb else None
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, causal = True, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_emb = rotary_emb),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, post_activation_norm = normformer)
]))
self.norm = LayerNorm(dim, stable = True) if norm_out else nn.Identity() # unclear in paper whether they projected after the classic layer norm for the final denoised image embedding, or just had the transformer output it directly: plan on offering both options
self.project_out = nn.Linear(dim, dim, bias = False) if final_proj else nn.Identity()
def forward(self, x):
n, device = x.shape[1], x.device
x = self.init_norm(x)
attn_bias = self.rel_pos_bias(n, n + 1, device = device)
for attn, ff in self.layers:
x = attn(x, attn_bias = attn_bias) + x
x = ff(x) + x
out = self.norm(x)
return self.project_out(out)
class DiffusionPriorNetwork(nn.Module):
def __init__(
self,
dim,
num_timesteps = None,
num_time_embeds = 1,
num_image_embeds = 1,
num_text_embeds = 1,
max_text_len = 256,
self_cond = False,
**kwargs
):
super().__init__()
self.dim = dim
self.num_time_embeds = num_time_embeds
self.num_image_embeds = num_image_embeds
self.num_text_embeds = num_text_embeds
self.to_text_embeds = nn.Sequential(
nn.Linear(dim, dim * num_text_embeds) if num_text_embeds > 1 else nn.Identity(),
Rearrange('b (n d) -> b n d', n = num_text_embeds)
)
self.continuous_embedded_time = not exists(num_timesteps)
self.to_time_embeds = nn.Sequential(
nn.Embedding(num_timesteps, dim * num_time_embeds) if exists(num_timesteps) else nn.Sequential(SinusoidalPosEmb(dim), MLP(dim, dim * num_time_embeds)), # also offer a continuous version of timestep embeddings, with a 2 layer MLP
Rearrange('b (n d) -> b n d', n = num_time_embeds)
)
self.to_image_embeds = nn.Sequential(
nn.Linear(dim, dim * num_image_embeds) if num_image_embeds > 1 else nn.Identity(),
Rearrange('b (n d) -> b n d', n = num_image_embeds)
)
self.learned_query = nn.Parameter(torch.randn(dim))
self.causal_transformer = CausalTransformer(dim = dim, **kwargs)
# dalle1 learned padding strategy
self.max_text_len = max_text_len
self.null_text_encodings = nn.Parameter(torch.randn(1, max_text_len, dim))
self.null_text_embeds = nn.Parameter(torch.randn(1, num_text_embeds, dim))
self.null_image_embed = nn.Parameter(torch.randn(1, dim))
# whether to use self conditioning, Hinton's group's new ddpm technique
self.self_cond = self_cond
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, text_cond_drop_prob = 1., image_cond_drop_prob = 1, **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
image_embed,
diffusion_timesteps,
*,
text_embed,
text_encodings = None,
self_cond = None,
text_cond_drop_prob = 0.,
image_cond_drop_prob = 0.
):
batch, dim, device, dtype = *image_embed.shape, image_embed.device, image_embed.dtype
num_time_embeds, num_image_embeds, num_text_embeds = self.num_time_embeds, self.num_image_embeds, self.num_text_embeds
# setup self conditioning
if self.self_cond:
self_cond = default(self_cond, lambda: torch.zeros(batch, self.dim, device = device, dtype = dtype))
self_cond = rearrange(self_cond, 'b d -> b 1 d')
# in section 2.2, last paragraph
# "... consisting of encoded text, CLIP text embedding, diffusion timestep embedding, noised CLIP image embedding, final embedding for prediction"
text_embed = self.to_text_embeds(text_embed)
image_embed = self.to_image_embeds(image_embed)
# classifier free guidance masks
text_keep_mask = prob_mask_like((batch,), 1 - text_cond_drop_prob, device = device)
text_keep_mask = rearrange(text_keep_mask, 'b -> b 1 1')
image_keep_mask = prob_mask_like((batch,), 1 - image_cond_drop_prob, device = device)
image_keep_mask = rearrange(image_keep_mask, 'b -> b 1 1')
# make text encodings optional
# although the paper seems to suggest it is present <--
if not exists(text_encodings):
text_encodings = torch.empty((batch, 0, dim), device = device, dtype = dtype)
mask = torch.any(text_encodings != 0., dim = -1)
# replace any padding in the text encodings with learned padding tokens unique across position
text_encodings = text_encodings[:, :self.max_text_len]
mask = mask[:, :self.max_text_len]
text_len = text_encodings.shape[-2]
remainder = self.max_text_len - text_len
if remainder > 0:
text_encodings = F.pad(text_encodings, (0, 0, 0, remainder), value = 0.)
mask = F.pad(mask, (0, remainder), value = False)
# mask out text encodings with null encodings
null_text_encodings = self.null_text_encodings.to(text_encodings.dtype)
text_encodings = torch.where(
rearrange(mask, 'b n -> b n 1').clone() & text_keep_mask,
text_encodings,
null_text_encodings
)
# mask out text embeddings with null text embeddings
null_text_embeds = self.null_text_embeds.to(text_embed.dtype)
text_embed = torch.where(
text_keep_mask,
text_embed,
null_text_embeds
)
# mask out image embeddings with null image embeddings
null_image_embed = self.null_image_embed.to(image_embed.dtype)
image_embed = torch.where(
image_keep_mask,
image_embed,
null_image_embed
)
# whether text embedding is used for conditioning depends on whether text encodings are available for attention (for classifier free guidance, even though it seems from the paper it was not used in the prior ddpm, as the objective is different)
# but let's just do it right
if self.continuous_embedded_time:
diffusion_timesteps = diffusion_timesteps.type(dtype)
time_embed = self.to_time_embeds(diffusion_timesteps)
learned_queries = repeat(self.learned_query, 'd -> b 1 d', b = batch)
if self.self_cond:
learned_queries = torch.cat((self_cond, learned_queries), dim = -2)
tokens = torch.cat((
text_encodings,
text_embed,
time_embed,
image_embed,
learned_queries
), dim = -2)
# attend
tokens = self.causal_transformer(tokens)
# get learned query, which should predict the image embedding (per DDPM timestep)
pred_image_embed = tokens[..., -1, :]
return pred_image_embed
class DiffusionPrior(nn.Module):
def __init__(
self,
net,
*,
clip = None,
image_embed_dim = None,
image_size = None,
image_channels = 3,
timesteps = 1000,
sample_timesteps = None,
cond_drop_prob = 0.,
text_cond_drop_prob = None,
image_cond_drop_prob = None,
loss_type = "l2",
predict_x_start = True,
predict_v = False,
beta_schedule = "cosine",
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
sampling_clamp_l2norm = False, # whether to l2norm clamp the image embed at each denoising iteration (analogous to -1 to 1 clipping for usual DDPMs)
sampling_final_clamp_l2norm = False, # whether to l2norm the final image embedding output (this is also done for images in ddpm)
training_clamp_l2norm = False,
init_image_embed_l2norm = False,
image_embed_scale = None, # this is for scaling the l2-normed image embedding, so it is more suitable for gaussian diffusion, as outlined by Katherine (@crowsonkb) https://github.com/lucidrains/DALLE2-pytorch/issues/60#issue-1226116132
clip_adapter_overrides = dict()
):
super().__init__()
self.sample_timesteps = sample_timesteps
self.noise_scheduler = NoiseScheduler(
beta_schedule = beta_schedule,
timesteps = timesteps,
loss_type = loss_type
)
if exists(clip):
assert image_channels == clip.image_channels, f'channels of image ({image_channels}) should be equal to the channels that CLIP accepts ({clip.image_channels})'
if isinstance(clip, CLIP):
clip = XClipAdapter(clip, **clip_adapter_overrides)
elif isinstance(clip, CoCa):
clip = CoCaAdapter(clip, **clip_adapter_overrides)
assert isinstance(clip, BaseClipAdapter)
freeze_model_and_make_eval_(clip)
self.clip = clip
else:
assert exists(image_embed_dim), 'latent dimension must be given, if training prior network without CLIP given'
self.clip = None
self.net = net
self.image_embed_dim = default(image_embed_dim, lambda: clip.dim_latent)
assert net.dim == self.image_embed_dim, f'your diffusion prior network has a dimension of {net.dim}, but you set your image embedding dimension (keyword image_embed_dim) on DiffusionPrior to {self.image_embed_dim}'
assert not exists(clip) or clip.dim_latent == self.image_embed_dim, f'you passed in a CLIP to the diffusion prior with latent dimensions of {clip.dim_latent}, but your image embedding dimension (keyword image_embed_dim) for the DiffusionPrior was set to {self.image_embed_dim}'
self.channels = default(image_channels, lambda: clip.image_channels)
self.text_cond_drop_prob = default(text_cond_drop_prob, cond_drop_prob)
self.image_cond_drop_prob = default(image_cond_drop_prob, cond_drop_prob)
self.can_classifier_guidance = self.text_cond_drop_prob > 0. and self.image_cond_drop_prob > 0.
self.condition_on_text_encodings = condition_on_text_encodings
# in paper, they do not predict the noise, but predict x0 directly for image embedding, claiming empirically better results. I'll just offer both.
self.predict_x_start = predict_x_start
self.predict_v = predict_v # takes precedence over predict_x_start
# @crowsonkb 's suggestion - https://github.com/lucidrains/DALLE2-pytorch/issues/60#issue-1226116132
self.image_embed_scale = default(image_embed_scale, self.image_embed_dim ** 0.5)
# whether to force an l2norm, similar to clipping denoised, when sampling
self.sampling_clamp_l2norm = sampling_clamp_l2norm
self.sampling_final_clamp_l2norm = sampling_final_clamp_l2norm
self.training_clamp_l2norm = training_clamp_l2norm
self.init_image_embed_l2norm = init_image_embed_l2norm
# device tracker
self.register_buffer('_dummy', torch.tensor([True]), persistent = False)
@property
def device(self):
return self._dummy.device
def l2norm_clamp_embed(self, image_embed):
return l2norm(image_embed) * self.image_embed_scale
def p_mean_variance(self, x, t, text_cond, self_cond = None, clip_denoised = False, cond_scale = 1.):
assert not (cond_scale != 1. and not self.can_classifier_guidance), 'the model was not trained with conditional dropout, and thus one cannot use classifier free guidance (cond_scale anything other than 1)'
pred = self.net.forward_with_cond_scale(x, t, cond_scale = cond_scale, self_cond = self_cond, **text_cond)
if self.predict_v:
x_start = self.noise_scheduler.predict_start_from_v(x, t = t, v = pred)
elif self.predict_x_start:
x_start = pred
else:
x_start = self.noise_scheduler.predict_start_from_noise(x, t = t, noise = pred)
if clip_denoised and not self.predict_x_start:
x_start.clamp_(-1., 1.)
if self.predict_x_start and self.sampling_clamp_l2norm:
x_start = l2norm(x_start) * self.image_embed_scale
model_mean, posterior_variance, posterior_log_variance = self.noise_scheduler.q_posterior(x_start=x_start, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.no_grad()
def p_sample(self, x, t, text_cond = None, self_cond = None, clip_denoised = True, cond_scale = 1.):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = t, text_cond = text_cond, self_cond = self_cond, clip_denoised = clip_denoised, cond_scale = cond_scale)
noise = torch.randn_like(x)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
pred = model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
return pred, x_start
@torch.no_grad()
def p_sample_loop_ddpm(self, shape, text_cond, cond_scale = 1.):
batch, device = shape[0], self.device
image_embed = torch.randn(shape, device = device)
x_start = None # for self-conditioning
if self.init_image_embed_l2norm:
image_embed = l2norm(image_embed) * self.image_embed_scale
for i in tqdm(reversed(range(0, self.noise_scheduler.num_timesteps)), desc='sampling loop time step', total=self.noise_scheduler.num_timesteps):
times = torch.full((batch,), i, device = device, dtype = torch.long)
self_cond = x_start if self.net.self_cond else None
image_embed, x_start = self.p_sample(image_embed, times, text_cond = text_cond, self_cond = self_cond, cond_scale = cond_scale)
if self.sampling_final_clamp_l2norm and self.predict_x_start:
image_embed = self.l2norm_clamp_embed(image_embed)
return image_embed
@torch.no_grad()
def p_sample_loop_ddim(self, shape, text_cond, *, timesteps, eta = 1., cond_scale = 1.):
batch, device, alphas, total_timesteps = shape[0], self.device, self.noise_scheduler.alphas_cumprod_prev, self.noise_scheduler.num_timesteps
times = torch.linspace(-1., total_timesteps, steps = timesteps + 1)[:-1]
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:]))
image_embed = torch.randn(shape, device = device)
x_start = None # for self-conditioning
if self.init_image_embed_l2norm:
image_embed = l2norm(image_embed) * self.image_embed_scale
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
alpha = alphas[time]
alpha_next = alphas[time_next]
time_cond = torch.full((batch,), time, device = device, dtype = torch.long)
self_cond = x_start if self.net.self_cond else None
pred = self.net.forward_with_cond_scale(image_embed, time_cond, self_cond = self_cond, cond_scale = cond_scale, **text_cond)
# derive x0
if self.predict_v:
x_start = self.noise_scheduler.predict_start_from_v(image_embed, t = time_cond, v = pred)
elif self.predict_x_start:
x_start = pred
else:
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred)
# clip x0 before maybe predicting noise
if not self.predict_x_start:
x_start.clamp_(-1., 1.)
if self.predict_x_start and self.sampling_clamp_l2norm:
x_start = self.l2norm_clamp_embed(x_start)
# predict noise
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = x_start)
if time_next < 0:
image_embed = x_start
continue
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
noise = torch.randn_like(image_embed) if time_next > 0 else 0.
image_embed = x_start * alpha_next.sqrt() + \
c1 * noise + \
c2 * pred_noise
if self.predict_x_start and self.sampling_final_clamp_l2norm:
image_embed = self.l2norm_clamp_embed(image_embed)
return image_embed
@torch.no_grad()
def p_sample_loop(self, *args, timesteps = None, **kwargs):
timesteps = default(timesteps, self.noise_scheduler.num_timesteps)
assert timesteps <= self.noise_scheduler.num_timesteps
is_ddim = timesteps < self.noise_scheduler.num_timesteps
if not is_ddim:
normalized_image_embed = self.p_sample_loop_ddpm(*args, **kwargs)
else:
normalized_image_embed = self.p_sample_loop_ddim(*args, **kwargs, timesteps = timesteps)
image_embed = normalized_image_embed / self.image_embed_scale
return image_embed
def p_losses(self, image_embed, times, text_cond, noise = None):
noise = default(noise, lambda: torch.randn_like(image_embed))
image_embed_noisy = self.noise_scheduler.q_sample(x_start = image_embed, t = times, noise = noise)
self_cond = None
if self.net.self_cond and random.random() < 0.5:
with torch.no_grad():
self_cond = self.net(image_embed_noisy, times, **text_cond).detach()
pred = self.net(
image_embed_noisy,
times,
self_cond = self_cond,
text_cond_drop_prob = self.text_cond_drop_prob,
image_cond_drop_prob = self.image_cond_drop_prob,
**text_cond
)
if self.predict_x_start and self.training_clamp_l2norm:
pred = self.l2norm_clamp_embed(pred)
if self.predict_v:
target = self.noise_scheduler.calculate_v(image_embed, times, noise)
elif self.predict_x_start:
target = image_embed
else:
target = noise
loss = self.noise_scheduler.loss_fn(pred, target)
return loss
@torch.no_grad()
@eval_decorator
def sample_batch_size(self, batch_size, text_cond, cond_scale = 1.):
device = self.betas.device
shape = (batch_size, self.image_embed_dim)
img = torch.randn(shape, device = device)
for i in tqdm(reversed(range(0, self.noise_scheduler.num_timesteps)), desc = 'sampling loop time step', total = self.noise_scheduler.num_timesteps):
img = self.p_sample(img, torch.full((batch_size,), i, device = device, dtype = torch.long), text_cond = text_cond, cond_scale = cond_scale)
return img
@torch.no_grad()
@eval_decorator
def sample(
self,
text,
num_samples_per_batch = 2,
cond_scale = 1.,
timesteps = None
):
timesteps = default(timesteps, self.sample_timesteps)
# in the paper, what they did was
# sample 2 image embeddings, choose the top 1 similarity, as judged by CLIP
text = repeat(text, 'b ... -> (b r) ...', r = num_samples_per_batch)
batch_size = text.shape[0]
image_embed_dim = self.image_embed_dim
text_embed, text_encodings = self.clip.embed_text(text)
text_cond = dict(text_embed = text_embed)
if self.condition_on_text_encodings:
text_cond = {**text_cond, 'text_encodings': text_encodings}
image_embeds = self.p_sample_loop((batch_size, image_embed_dim), text_cond = text_cond, cond_scale = cond_scale, timesteps = timesteps)
# retrieve original unscaled image embed
text_embeds = text_cond['text_embed']
text_embeds = rearrange(text_embeds, '(b r) d -> b r d', r = num_samples_per_batch)
image_embeds = rearrange(image_embeds, '(b r) d -> b r d', r = num_samples_per_batch)
text_image_sims = einsum('b r d, b r d -> b r', l2norm(text_embeds), l2norm(image_embeds))
top_sim_indices = text_image_sims.topk(k = 1).indices
top_sim_indices = repeat(top_sim_indices, 'b 1 -> b 1 d', d = image_embed_dim)
top_image_embeds = image_embeds.gather(1, top_sim_indices)
return rearrange(top_image_embeds, 'b 1 d -> b d')
def forward(
self,
text = None,
image = None,
text_embed = None, # allow for training on preprocessed CLIP text and image embeddings
image_embed = None,
text_encodings = None, # as well as CLIP text encodings
*args,
**kwargs
):
assert exists(text) ^ exists(text_embed), 'either text or text embedding must be supplied'
assert exists(image) ^ exists(image_embed), 'either image or image embedding must be supplied'
assert not (self.condition_on_text_encodings and (not exists(text_encodings) and not exists(text))), 'text encodings must be present if you specified you wish to condition on it on initialization'
if exists(image):
image_embed, _ = self.clip.embed_image(image)
# calculate text conditionings, based on what is passed in
if exists(text):
text_embed, text_encodings = self.clip.embed_text(text)
text_cond = dict(text_embed = text_embed)
if self.condition_on_text_encodings:
assert exists(text_encodings), 'text encodings must be present for diffusion prior if specified'
text_cond = {**text_cond, 'text_encodings': text_encodings}
# timestep conditioning from ddpm
batch, device = image_embed.shape[0], image_embed.device
times = self.noise_scheduler.sample_random_times(batch)
# scale image embed (Katherine)
image_embed *= self.image_embed_scale
# calculate forward loss
return self.p_losses(image_embed, times, text_cond = text_cond, *args, **kwargs)
# decoder
def NearestUpsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, dim_out, 3, padding = 1)
)
class PixelShuffleUpsample(nn.Module):
"""
code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts
https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf
"""
def __init__(self, dim, dim_out = None):
super().__init__()
dim_out = default(dim_out, dim)
conv = nn.Conv2d(dim, dim_out * 4, 1)
self.net = nn.Sequential(
conv,
nn.SiLU(),
nn.PixelShuffle(2)
)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, h, w = conv.weight.shape
conv_weight = torch.empty(o // 4, i, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(dim, dim_out = None):
# https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample
# named SP-conv in the paper, but basically a pixel unshuffle
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),
nn.Conv2d(dim * 4, dim_out, 1)
)
class WeightStandardizedConv2d(nn.Conv2d):
"""
https://arxiv.org/abs/1903.10520
weight standardization purportedly works synergistically with group normalization
"""
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
flattened_weights = rearrange(weight, 'o ... -> o (...)')
mean = reduce(weight, 'o ... -> o 1 1 1', 'mean')
var = torch.var(flattened_weights, dim = -1, unbiased = False)
var = rearrange(var, 'o -> o 1 1 1')
weight = (weight - mean) * (var + eps).rsqrt()
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
dtype, device = x.dtype, x.device
assert is_float_dtype(dtype), 'input to sinusoidal pos emb must be a float type'
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = device, dtype = dtype) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1).type(dtype)
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
weight_standardization = False
):
super().__init__()
conv_klass = nn.Conv2d if not weight_standardization else WeightStandardizedConv2d
self.project = conv_klass(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.project(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
cond_dim = None,
time_cond_dim = None,
groups = 8,
weight_standardization = False,
cosine_sim_cross_attn = False
):
super().__init__()
self.time_mlp = None
if exists(time_cond_dim):
self.time_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_cond_dim, dim_out * 2)
)
self.cross_attn = None
if exists(cond_dim):
self.cross_attn = CrossAttention(
dim = dim_out,
context_dim = cond_dim,
cosine_sim = cosine_sim_cross_attn
)
self.block1 = Block(dim, dim_out, groups = groups, weight_standardization = weight_standardization)
self.block2 = Block(dim_out, dim_out, groups = groups, weight_standardization = weight_standardization)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None, cond = None):
scale_shift = None
if exists(self.time_mlp) and exists(time_emb):
time_emb = self.time_mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
if exists(self.cross_attn):
assert exists(cond)
h = rearrange(h, 'b c ... -> b ... c')
h, ps = pack([h], 'b * c')
h = self.cross_attn(h, context = cond) + h
h, = unpack(h, ps, 'b * c')
h = rearrange(h, 'b ... c -> b c ...')
h = self.block2(h)
return h + self.res_conv(x)
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
dropout = 0.,
norm_context = False,
cosine_sim = False,
cosine_sim_scale = 16
):
super().__init__()
self.cosine_sim = cosine_sim
self.scale = cosine_sim_scale if cosine_sim else (dim_head ** -0.5)
self.heads = heads
inner_dim = dim_head * heads
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.norm_context = LayerNorm(context_dim) if norm_context else nn.Identity()
self.dropout = nn.Dropout(dropout)
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b h 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
if self.cosine_sim:
q, k = map(l2norm, (q, k))
q, k = map(lambda t: t * math.sqrt(self.scale), (q, k))
sim = einsum('b h i d, b h j d -> b h i j', q, k)
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.type(sim.dtype)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
heads = 8,
**kwargs
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = ChanLayerNorm(dim)
self.nonlin = nn.GELU()
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(inner_dim, dim, 1, bias = False),
ChanLayerNorm(dim)
)
def forward(self, fmap):
h, x, y = self.heads, *fmap.shape[-2:]
seq_len = x * y
fmap = self.norm(fmap)
q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
v = l2norm(v)
k, v = map(lambda t: t / math.sqrt(seq_len), (k, v))
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
class CrossEmbedLayer(nn.Module):
def __init__(
self,
dim_in,
kernel_sizes,
dim_out = None,
stride = 2
):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return torch.cat(fmaps, dim = 1)
class UpsampleCombiner(nn.Module):
def __init__(
self,
dim,
*,
enabled = False,
dim_ins = tuple(),
dim_outs = tuple()
):
super().__init__()
assert len(dim_ins) == len(dim_outs)
self.enabled = enabled
if not self.enabled:
self.dim_out = dim
return
self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])
self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)
def forward(self, x, fmaps = None):
target_size = x.shape[-1]
fmaps = default(fmaps, tuple())
if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:
return x
fmaps = [resize_image_to(fmap, target_size) for fmap in fmaps]
outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]
return torch.cat((x, *outs), dim = 1)
class Unet(nn.Module):
def __init__(
self,
dim,
*,
image_embed_dim = None,
text_embed_dim = None,
cond_dim = None,
num_image_tokens = 4,
num_time_tokens = 2,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
channels_out = None,
self_attn = False,
attn_dim_head = 32,
attn_heads = 16,
lowres_cond = False, # for cascading diffusion - https://cascaded-diffusion.github.io/
lowres_noise_cond = False, # for conditioning on low resolution noising, based on Imagen
self_cond = False, # set this to True to use the self-conditioning technique from - https://arxiv.org/abs/2208.04202
sparse_attn = False,
cosine_sim_cross_attn = False,
cosine_sim_self_attn = False,
attend_at_middle = True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)
cond_on_text_encodings = False,
max_text_len = 256,
cond_on_image_embeds = False,
add_image_embeds_to_time = True, # alerted by @mhh0318 to a phrase in the paper - "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and adding CLIP embeddings to the existing timestep embedding"
init_dim = None,
init_conv_kernel_size = 7,
resnet_groups = 8,
resnet_weight_standardization = False,
num_resnet_blocks = 2,
init_cross_embed = True,
init_cross_embed_kernel_sizes = (3, 7, 15),
cross_embed_downsample = False,
cross_embed_downsample_kernel_sizes = (2, 4),
memory_efficient = False,
scale_skip_connection = False,
pixel_shuffle_upsample = True,
final_conv_kernel_size = 1,
combine_upsample_fmaps = False, # whether to combine the outputs of all upsample blocks, as in unet squared paper
checkpoint_during_training = False,
**kwargs
):
super().__init__()
# save locals to take care of some hyperparameters for cascading DDPM
self._locals = locals()
del self._locals['self']
del self._locals['__class__']
# for eventual cascading diffusion
self.lowres_cond = lowres_cond
# whether to do self conditioning
self.self_cond = self_cond
# determine dimensions
self.channels = channels
self.channels_out = default(channels_out, channels)
# initial number of channels depends on
# (1) low resolution conditioning from cascading ddpm paper, conditioned on previous unet output in the cascade
# (2) self conditioning (bit diffusion paper)
init_channels = channels * (1 + int(lowres_cond) + int(self_cond))
init_dim = default(init_dim, dim)
self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1) if init_cross_embed else nn.Conv2d(init_channels, init_dim, init_conv_kernel_size, padding = init_conv_kernel_size // 2)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
num_stages = len(in_out)
# time, image embeddings, and optional text encoding
cond_dim = default(cond_dim, dim)
time_cond_dim = dim * 4
self.to_time_hiddens = nn.Sequential(
SinusoidalPosEmb(dim),
nn.Linear(dim, time_cond_dim),
nn.GELU()
)
self.to_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
self.to_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
self.image_to_tokens = nn.Sequential(
nn.Linear(image_embed_dim, cond_dim * num_image_tokens),
Rearrange('b (n d) -> b n d', n = num_image_tokens)
) if cond_on_image_embeds and image_embed_dim != cond_dim else nn.Identity()
self.to_image_hiddens = nn.Sequential(
nn.Linear(image_embed_dim, time_cond_dim),
nn.GELU()
) if cond_on_image_embeds and add_image_embeds_to_time else None
self.norm_cond = nn.LayerNorm(cond_dim)
self.norm_mid_cond = nn.LayerNorm(cond_dim)
# text encoding conditioning (optional)
self.text_to_cond = None
self.text_embed_dim = None
if cond_on_text_encodings:
assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text_encodings is True'
self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)
self.text_embed_dim = text_embed_dim
# low resolution noise conditiong, based on Imagen's upsampler training technique
self.lowres_noise_cond = lowres_noise_cond
self.to_lowres_noise_cond = nn.Sequential(
SinusoidalPosEmb(dim),
nn.Linear(dim, time_cond_dim),
nn.GELU(),
nn.Linear(time_cond_dim, time_cond_dim)
) if lowres_noise_cond else None
# finer control over whether to condition on image embeddings and text encodings
# so one can have the latter unets in the cascading DDPMs only focus on super-resoluting
self.cond_on_text_encodings = cond_on_text_encodings
self.cond_on_image_embeds = cond_on_image_embeds
# for classifier free guidance
self.null_image_embed = nn.Parameter(torch.randn(1, num_image_tokens, cond_dim))
self.null_image_hiddens = nn.Parameter(torch.randn(1, time_cond_dim))
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))
# whether to scale skip connection, adopted in Imagen
self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)
# attention related params
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head, cosine_sim = cosine_sim_self_attn)
self_attn = cast_tuple(self_attn, num_stages)
create_self_attn = lambda dim: RearrangeToSequence(Residual(Attention(dim, **attn_kwargs)))
# resnet block klass
resnet_groups = cast_tuple(resnet_groups, num_stages)
top_level_resnet_group = first(resnet_groups)
num_resnet_blocks = cast_tuple(num_resnet_blocks, num_stages)
# downsample klass
downsample_klass = Downsample
if cross_embed_downsample:
downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)
# upsample klass
upsample_klass = NearestUpsample if not pixel_shuffle_upsample else PixelShuffleUpsample
# prepare resnet klass
resnet_block = partial(ResnetBlock, cosine_sim_cross_attn = cosine_sim_cross_attn, weight_standardization = resnet_weight_standardization)
# give memory efficient unet an initial resnet block
self.init_resnet_block = resnet_block(init_dim, init_dim, time_cond_dim = time_cond_dim, groups = top_level_resnet_group) if memory_efficient else None
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
skip_connect_dims = [] # keeping track of skip connection dimensions
upsample_combiner_dims = [] # keeping track of dimensions for final upsample feature map combiner
for ind, ((dim_in, dim_out), groups, layer_num_resnet_blocks, layer_self_attn) in enumerate(zip(in_out, resnet_groups, num_resnet_blocks, self_attn)):
is_first = ind == 0
is_last = ind >= (num_resolutions - 1)
layer_cond_dim = cond_dim if not is_first else None
dim_layer = dim_out if memory_efficient else dim_in
skip_connect_dims.append(dim_layer)
attention = nn.Identity()
if layer_self_attn:
attention = create_self_attn(dim_layer)
elif sparse_attn:
attention = Residual(LinearAttention(dim_layer, **attn_kwargs))
self.downs.append(nn.ModuleList([
downsample_klass(dim_in, dim_out = dim_out) if memory_efficient else None,
resnet_block(dim_layer, dim_layer, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([resnet_block(dim_layer, dim_layer, cond_dim = layer_cond_dim, time_cond_dim = time_cond_dim, groups = groups) for _ in range(layer_num_resnet_blocks)]),
attention,
downsample_klass(dim_layer, dim_out = dim_out) if not is_last and not memory_efficient else nn.Conv2d(dim_layer, dim_out, 1)
]))
mid_dim = dims[-1]
self.mid_block1 = resnet_block(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
self.mid_attn = create_self_attn(mid_dim)
self.mid_block2 = resnet_block(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
for ind, ((dim_in, dim_out), groups, layer_num_resnet_blocks, layer_self_attn) in enumerate(zip(reversed(in_out), reversed(resnet_groups), reversed(num_resnet_blocks), reversed(self_attn))):
is_last = ind >= (len(in_out) - 1)
layer_cond_dim = cond_dim if not is_last else None
skip_connect_dim = skip_connect_dims.pop()
attention = nn.Identity()
if layer_self_attn:
attention = create_self_attn(dim_out)
elif sparse_attn:
attention = Residual(LinearAttention(dim_out, **attn_kwargs))
upsample_combiner_dims.append(dim_out)
self.ups.append(nn.ModuleList([
resnet_block(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([resnet_block(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, time_cond_dim = time_cond_dim, groups = groups) for _ in range(layer_num_resnet_blocks)]),
attention,
upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else nn.Identity()
]))
# whether to combine outputs from all upsample blocks for final resnet block
self.upsample_combiner = UpsampleCombiner(
dim = dim,
enabled = combine_upsample_fmaps,
dim_ins = upsample_combiner_dims,
dim_outs = (dim,) * len(upsample_combiner_dims)
)
# a final resnet block
self.final_resnet_block = resnet_block(self.upsample_combiner.dim_out + dim, dim, time_cond_dim = time_cond_dim, groups = top_level_resnet_group)
out_dim_in = dim + (channels if lowres_cond else 0)
self.to_out = nn.Conv2d(out_dim_in, self.channels_out, kernel_size = final_conv_kernel_size, padding = final_conv_kernel_size // 2)
zero_init_(self.to_out) # since both OpenAI and @crowsonkb are doing it
# whether to checkpoint during training
self.checkpoint_during_training = checkpoint_during_training
# if the current settings for the unet are not correct
# for cascading DDPM, then reinit the unet with the right settings
def cast_model_parameters(
self,
*,
lowres_cond,
lowres_noise_cond,
channels,
channels_out,
cond_on_image_embeds,
cond_on_text_encodings,
):
if lowres_cond == self.lowres_cond and \
channels == self.channels and \
cond_on_image_embeds == self.cond_on_image_embeds and \
cond_on_text_encodings == self.cond_on_text_encodings and \
lowres_noise_cond == self.lowres_noise_cond and \
channels_out == self.channels_out:
return self
updated_kwargs = dict(
lowres_cond = lowres_cond,
channels = channels,
channels_out = channels_out,
cond_on_image_embeds = cond_on_image_embeds,
cond_on_text_encodings = cond_on_text_encodings,
lowres_noise_cond = lowres_noise_cond
)
return self.__class__(**{**self._locals, **updated_kwargs})
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, text_cond_drop_prob = 1., image_cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
*,
image_embed,
lowres_cond_img = None,
lowres_noise_level = None,
text_encodings = None,
image_cond_drop_prob = 0.,
text_cond_drop_prob = 0.,
blur_sigma = None,
blur_kernel_size = None,
disable_checkpoint = False,
self_cond = None
):
batch_size, device = x.shape[0], x.device
# add low resolution conditioning, if present
assert not (self.lowres_cond and not exists(lowres_cond_img)), 'low resolution conditioning image must be present'
# concat self conditioning, if needed
if self.self_cond:
self_cond = default(self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x, self_cond), dim = 1)
# concat low resolution conditioning
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
# initial convolution
x = self.init_conv(x)
r = x.clone() # final residual
# time conditioning
time = time.type_as(x)
time_hiddens = self.to_time_hiddens(time)
time_tokens = self.to_time_tokens(time_hiddens)
t = self.to_time_cond(time_hiddens)
# low res noise conditioning (similar to time above)
if exists(lowres_noise_level):
assert exists(self.to_lowres_noise_cond), 'lowres_noise_cond must be set to True on instantiation of the unet in order to conditiong on lowres noise'
lowres_noise_level = lowres_noise_level.type_as(x)
t = t + self.to_lowres_noise_cond(lowres_noise_level)
# conditional dropout
image_keep_mask = prob_mask_like((batch_size,), 1 - image_cond_drop_prob, device = device)
text_keep_mask = prob_mask_like((batch_size,), 1 - text_cond_drop_prob, device = device)
text_keep_mask = rearrange(text_keep_mask, 'b -> b 1 1')
# image embedding to be summed to time embedding
# discovered by @mhh0318 in the paper
if exists(image_embed) and exists(self.to_image_hiddens):
image_hiddens = self.to_image_hiddens(image_embed)
image_keep_mask_hidden = rearrange(image_keep_mask, 'b -> b 1')
null_image_hiddens = self.null_image_hiddens.to(image_hiddens.dtype)
image_hiddens = torch.where(
image_keep_mask_hidden,
image_hiddens,
null_image_hiddens
)
t = t + image_hiddens
# mask out image embedding depending on condition dropout
# for classifier free guidance
image_tokens = None
if self.cond_on_image_embeds:
image_keep_mask_embed = rearrange(image_keep_mask, 'b -> b 1 1')
image_tokens = self.image_to_tokens(image_embed)
null_image_embed = self.null_image_embed.to(image_tokens.dtype) # for some reason pytorch AMP not working
image_tokens = torch.where(
image_keep_mask_embed,
image_tokens,
null_image_embed
)
# take care of text encodings (optional)
text_tokens = None
if exists(text_encodings) and self.cond_on_text_encodings:
assert text_encodings.shape[0] == batch_size, f'the text encodings being passed into the unet does not have the proper batch size - text encoding shape {text_encodings.shape} - required batch size is {batch_size}'
assert self.text_embed_dim == text_encodings.shape[-1], f'the text encodings you are passing in have a dimension of {text_encodings.shape[-1]}, but the unet was created with text_embed_dim of {self.text_embed_dim}.'
text_mask = torch.any(text_encodings != 0., dim = -1)
text_tokens = self.text_to_cond(text_encodings)
text_tokens = text_tokens[:, :self.max_text_len]
text_mask = text_mask[:, :self.max_text_len]
text_tokens_len = text_tokens.shape[1]
remainder = self.max_text_len - text_tokens_len
if remainder > 0:
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
text_mask = F.pad(text_mask, (0, remainder), value = False)
text_mask = rearrange(text_mask, 'b n -> b n 1')
assert text_mask.shape[0] == text_keep_mask.shape[0], f'text_mask has shape of {text_mask.shape} while text_keep_mask has shape {text_keep_mask.shape}. text encoding is of shape {text_encodings.shape}'
text_keep_mask = text_mask & text_keep_mask
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
text_tokens = torch.where(
text_keep_mask,
text_tokens,
null_text_embed
)
# main conditioning tokens (c)
c = time_tokens
if exists(image_tokens):
c = torch.cat((c, image_tokens), dim = -2)
# text and image conditioning tokens (mid_c)
# to save on compute, only do cross attention based conditioning on the inner most layers of the Unet
mid_c = c if not exists(text_tokens) else torch.cat((c, text_tokens), dim = -2)
# normalize conditioning tokens
c = self.norm_cond(c)
mid_c = self.norm_mid_cond(mid_c)
# gradient checkpointing
can_checkpoint = self.training and self.checkpoint_during_training and not disable_checkpoint
apply_checkpoint_fn = make_checkpointable if can_checkpoint else identity
# make checkpointable modules
init_resnet_block, mid_block1, mid_attn, mid_block2, final_resnet_block = [maybe(apply_checkpoint_fn)(module) for module in (self.init_resnet_block, self.mid_block1, self.mid_attn, self.mid_block2, self.final_resnet_block)]
can_checkpoint_cond = lambda m: isinstance(m, ResnetBlock)
downs, ups = [maybe(apply_checkpoint_fn)(m, condition = can_checkpoint_cond) for m in (self.downs, self.ups)]
# initial resnet block
if exists(init_resnet_block):
x = init_resnet_block(x, t)
# go through the layers of the unet, down and up
down_hiddens = []
up_hiddens = []
for pre_downsample, init_block, resnet_blocks, attn, post_downsample in downs:
if exists(pre_downsample):
x = pre_downsample(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = resnet_block(x, t, c)
down_hiddens.append(x.contiguous())
x = attn(x)
down_hiddens.append(x.contiguous())
if exists(post_downsample):
x = post_downsample(x)
x = mid_block1(x, t, mid_c)
if exists(mid_attn):
x = mid_attn(x)
x = mid_block2(x, t, mid_c)
connect_skip = lambda fmap: torch.cat((fmap, down_hiddens.pop() * self.skip_connect_scale), dim = 1)
for init_block, resnet_blocks, attn, upsample in ups:
x = connect_skip(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = connect_skip(x)
x = resnet_block(x, t, c)
x = attn(x)
up_hiddens.append(x.contiguous())
x = upsample(x)
x = self.upsample_combiner(x, up_hiddens)
x = torch.cat((x, r), dim = 1)
x = final_resnet_block(x, t)
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
return self.to_out(x)
class LowresConditioner(nn.Module):
def __init__(
self,
downsample_first = True,
use_blur = True,
blur_prob = 0.5,
blur_sigma = 0.6,
blur_kernel_size = 3,
use_noise = False,
input_image_range = None,
normalize_img_fn = identity,
unnormalize_img_fn = identity
):
super().__init__()
self.downsample_first = downsample_first
self.input_image_range = input_image_range
self.use_blur = use_blur
self.blur_prob = blur_prob
self.blur_sigma = blur_sigma
self.blur_kernel_size = blur_kernel_size
self.use_noise = use_noise
self.normalize_img = normalize_img_fn
self.unnormalize_img = unnormalize_img_fn
self.noise_scheduler = NoiseScheduler(beta_schedule = 'linear', timesteps = 1000, loss_type = 'l2') if use_noise else None
def noise_image(self, cond_fmap, noise_levels = None):
assert exists(self.noise_scheduler)
batch = cond_fmap.shape[0]
cond_fmap = self.normalize_img(cond_fmap)
random_noise_levels = default(noise_levels, lambda: self.noise_scheduler.sample_random_times(batch))
cond_fmap = self.noise_scheduler.q_sample(cond_fmap, t = random_noise_levels, noise = torch.randn_like(cond_fmap))
cond_fmap = self.unnormalize_img(cond_fmap)
return cond_fmap, random_noise_levels
def forward(
self,
cond_fmap,
*,
target_image_size,
downsample_image_size = None,
should_blur = True,
blur_sigma = None,
blur_kernel_size = None
):
if self.downsample_first and exists(downsample_image_size):
cond_fmap = resize_image_to(cond_fmap, downsample_image_size, clamp_range = self.input_image_range, nearest = True)
# blur is only applied 50% of the time
# section 3.1 in https://arxiv.org/abs/2106.15282
if self.use_blur and should_blur and random.random() < self.blur_prob:
# when training, blur the low resolution conditional image
blur_sigma = default(blur_sigma, self.blur_sigma)
blur_kernel_size = default(blur_kernel_size, self.blur_kernel_size)
# allow for drawing a random sigma between lo and hi float values
if isinstance(blur_sigma, tuple):
blur_sigma = tuple(map(float, blur_sigma))
blur_sigma = random.uniform(*blur_sigma)
# allow for drawing a random kernel size between lo and hi int values
if isinstance(blur_kernel_size, tuple):
blur_kernel_size = tuple(map(int, blur_kernel_size))
kernel_size_lo, kernel_size_hi = blur_kernel_size
blur_kernel_size = random.randrange(kernel_size_lo, kernel_size_hi + 1)
cond_fmap = gaussian_blur2d(cond_fmap, cast_tuple(blur_kernel_size, 2), cast_tuple(blur_sigma, 2))
# resize to target image size
cond_fmap = resize_image_to(cond_fmap, target_image_size, clamp_range = self.input_image_range, nearest = True)
# noise conditioning, as done in Imagen
# as a replacement for the BSR noising, and potentially replace blurring for first stage too
random_noise_levels = None
if self.use_noise:
cond_fmap, random_noise_levels = self.noise_image(cond_fmap)
# return conditioning feature map, as well as the augmentation noise levels
return cond_fmap, random_noise_levels
class Decoder(nn.Module):
def __init__(
self,
unet,
*,
clip = None,
image_size = None,
channels = 3,
vae = tuple(),
timesteps = 1000,
sample_timesteps = None,
image_cond_drop_prob = 0.1,
text_cond_drop_prob = 0.5,
loss_type = 'l2',
beta_schedule = None,
predict_x_start = False,
predict_v = False,
predict_x_start_for_latent_diffusion = False,
image_sizes = None, # for cascading ddpm, image size at each stage
random_crop_sizes = None, # whether to random crop the image at that stage in the cascade (super resoluting convolutions at the end may be able to generalize on smaller crops)
use_noise_for_lowres_cond = False, # whether to use Imagen-like noising for low resolution conditioning
use_blur_for_lowres_cond = True, # whether to use the blur conditioning used in the original cascading ddpm paper, as well as DALL-E2
lowres_downsample_first = True, # cascading ddpm - resizes to lower resolution, then to next conditional resolution + blur
blur_prob = 0.5, # cascading ddpm - when training, the gaussian blur is only applied 50% of the time
blur_sigma = 0.6, # cascading ddpm - blur sigma
blur_kernel_size = 3, # cascading ddpm - blur kernel size
lowres_noise_sample_level = 0.2, # in imagen paper, they use a 0.2 noise level at sample time for low resolution conditioning
clip_denoised = True,
clip_x_start = True,
clip_adapter_overrides = dict(),
learned_variance = True,
learned_variance_constrain_frac = False,
vb_loss_weight = 0.001,
unconditional = False, # set to True for generating images without conditioning
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
use_dynamic_thres = False, # from the Imagen paper
dynamic_thres_percentile = 0.95,
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
p2_loss_weight_k = 1,
ddim_sampling_eta = 0. # can be set to 0. for deterministic sampling afaict
):
super().__init__()
# clip
self.clip = None
if exists(clip):
assert not unconditional, 'clip must not be given if doing unconditional image training'
assert channels == clip.image_channels, f'channels of image ({channels}) should be equal to the channels that CLIP accepts ({clip.image_channels})'
if isinstance(clip, CLIP):
clip = XClipAdapter(clip, **clip_adapter_overrides)
elif isinstance(clip, CoCa):
clip = CoCaAdapter(clip, **clip_adapter_overrides)
freeze_model_and_make_eval_(clip)
assert isinstance(clip, BaseClipAdapter)
self.clip = clip
# determine image size, with image_size and image_sizes taking precedence
if exists(image_size) or exists(image_sizes):
assert exists(image_size) ^ exists(image_sizes), 'only one of image_size or image_sizes must be given'
image_size = default(image_size, lambda: image_sizes[-1])
elif exists(clip):
image_size = clip.image_size
else:
raise Error('either image_size, image_sizes, or clip must be given to decoder')
# channels
self.channels = channels
# normalize and unnormalize image functions
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
# verify conditioning method
unets = cast_tuple(unet)
num_unets = len(unets)
self.num_unets = num_unets
self.unconditional = unconditional
# automatically take care of ensuring that first unet is unconditional
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
vaes = pad_tuple_to_length(cast_tuple(vae), len(unets), fillvalue = NullVQGanVAE(channels = self.channels))
# whether to use learned variance, defaults to True for the first unet in the cascade, as in paper
learned_variance = pad_tuple_to_length(cast_tuple(learned_variance), len(unets), fillvalue = False)
self.learned_variance = learned_variance
self.learned_variance_constrain_frac = learned_variance_constrain_frac # whether to constrain the output of the network (the interpolation fraction) from 0 to 1
self.vb_loss_weight = vb_loss_weight
# default and validate conditioning parameters
use_noise_for_lowres_cond = cast_tuple(use_noise_for_lowres_cond, num_unets - 1, validate = False)
use_blur_for_lowres_cond = cast_tuple(use_blur_for_lowres_cond, num_unets - 1, validate = False)
if len(use_noise_for_lowres_cond) < num_unets:
use_noise_for_lowres_cond = (False, *use_noise_for_lowres_cond)
if len(use_blur_for_lowres_cond) < num_unets:
use_blur_for_lowres_cond = (False, *use_blur_for_lowres_cond)
assert not use_noise_for_lowres_cond[0], 'first unet will never need low res noise conditioning'
assert not use_blur_for_lowres_cond[0], 'first unet will never need low res blur conditioning'
assert num_unets == 1 or all((use_noise or use_blur) for use_noise, use_blur in zip(use_noise_for_lowres_cond[1:], use_blur_for_lowres_cond[1:]))
# construct unets and vaes
self.unets = nn.ModuleList([])
self.vaes = nn.ModuleList([])
for ind, (one_unet, one_vae, one_unet_learned_var, lowres_noise_cond) in enumerate(zip(unets, vaes, learned_variance, use_noise_for_lowres_cond)):
assert isinstance(one_unet, Unet)
assert isinstance(one_vae, (VQGanVAE, NullVQGanVAE))
is_first = ind == 0
latent_dim = one_vae.encoded_dim if exists(one_vae) else None
unet_channels = default(latent_dim, self.channels)
unet_channels_out = unet_channels * (1 if not one_unet_learned_var else 2)
one_unet = one_unet.cast_model_parameters(
lowres_cond = not is_first,
lowres_noise_cond = lowres_noise_cond,
cond_on_image_embeds = not unconditional and is_first,
cond_on_text_encodings = not unconditional and one_unet.cond_on_text_encodings,
channels = unet_channels,
channels_out = unet_channels_out
)
self.unets.append(one_unet)
self.vaes.append(one_vae.copy_for_eval())
# sampling timesteps, defaults to non-ddim with full timesteps sampling
self.sample_timesteps = cast_tuple(sample_timesteps, num_unets)
self.ddim_sampling_eta = ddim_sampling_eta
# create noise schedulers per unet
if not exists(beta_schedule):
beta_schedule = ('cosine', *(('cosine',) * max(num_unets - 2, 0)), *(('linear',) * int(num_unets > 1)))
beta_schedule = cast_tuple(beta_schedule, num_unets)
p2_loss_weight_gamma = cast_tuple(p2_loss_weight_gamma, num_unets)
self.noise_schedulers = nn.ModuleList([])
for ind, (unet_beta_schedule, unet_p2_loss_weight_gamma, sample_timesteps) in enumerate(zip(beta_schedule, p2_loss_weight_gamma, self.sample_timesteps)):
assert not exists(sample_timesteps) or sample_timesteps <= timesteps, f'sampling timesteps {sample_timesteps} must be less than or equal to the number of training timesteps {timesteps} for unet {ind + 1}'
noise_scheduler = NoiseScheduler(
beta_schedule = unet_beta_schedule,
timesteps = timesteps,
loss_type = loss_type,
p2_loss_weight_gamma = unet_p2_loss_weight_gamma,
p2_loss_weight_k = p2_loss_weight_k
)
self.noise_schedulers.append(noise_scheduler)
# unet image sizes
image_sizes = default(image_sizes, (image_size,))
image_sizes = tuple(sorted(set(image_sizes)))
assert self.num_unets == len(image_sizes), f'you did not supply the correct number of u-nets ({self.num_unets}) for resolutions {image_sizes}'
self.image_sizes = image_sizes
self.sample_channels = cast_tuple(self.channels, len(image_sizes))
# random crop sizes (for super-resoluting unets at the end of cascade?)
self.random_crop_sizes = cast_tuple(random_crop_sizes, len(image_sizes))
assert not exists(self.random_crop_sizes[0]), 'you would not need to randomly crop the image for the base unet'
# predict x0 config
self.predict_x_start = cast_tuple(predict_x_start, len(unets)) if not predict_x_start_for_latent_diffusion else tuple(map(lambda t: isinstance(t, VQGanVAE), self.vaes))
# predict v
self.predict_v = cast_tuple(predict_v, len(unets))
# input image range
self.input_image_range = (-1. if not auto_normalize_img else 0., 1.)
# cascading ddpm related stuff
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
assert lowres_conditions == (False, *((True,) * (num_unets - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'
self.lowres_conds = nn.ModuleList([])
for unet_index, use_noise, use_blur in zip(range(num_unets), use_noise_for_lowres_cond, use_blur_for_lowres_cond):
if unet_index == 0:
self.lowres_conds.append(None)
continue
lowres_cond = LowresConditioner(
downsample_first = lowres_downsample_first,
use_blur = use_blur,
use_noise = use_noise,
blur_prob = blur_prob,
blur_sigma = blur_sigma,
blur_kernel_size = blur_kernel_size,
input_image_range = self.input_image_range,
normalize_img_fn = self.normalize_img,
unnormalize_img_fn = self.unnormalize_img
)
self.lowres_conds.append(lowres_cond)
self.lowres_noise_sample_level = lowres_noise_sample_level
# classifier free guidance
self.image_cond_drop_prob = image_cond_drop_prob
self.text_cond_drop_prob = text_cond_drop_prob
self.can_classifier_guidance = image_cond_drop_prob > 0. or text_cond_drop_prob > 0.
# whether to clip when sampling
self.clip_denoised = clip_denoised
self.clip_x_start = clip_x_start
# dynamic thresholding settings, if clipping denoised during sampling
self.use_dynamic_thres = use_dynamic_thres
self.dynamic_thres_percentile = dynamic_thres_percentile
# device tracker
self.register_buffer('_dummy', torch.Tensor([True]), persistent = False)
@property
def device(self):
return self._dummy.device
@property
def condition_on_text_encodings(self):
return any([unet.cond_on_text_encodings for unet in self.unets if isinstance(unet, Unet)])
def get_unet(self, unet_number):
assert 0 < unet_number <= self.num_unets
index = unet_number - 1
return self.unets[index]
def parse_unet_output(self, learned_variance, output):
var_interp_frac_unnormalized = None
if learned_variance:
output, var_interp_frac_unnormalized = output.chunk(2, dim = 1)
return UnetOutput(output, var_interp_frac_unnormalized)
@contextmanager
def one_unet_in_gpu(self, unet_number = None, unet = None):
assert exists(unet_number) ^ exists(unet)
if exists(unet_number):
unet = self.get_unet(unet_number)
# devices
cuda, cpu = torch.device('cuda'), torch.device('cpu')
self.cuda()
devices = [module_device(unet) for unet in self.unets]
self.unets.to(cpu)
unet.to(cuda)
yield
for unet, device in zip(self.unets, devices):
unet.to(device)
def dynamic_threshold(self, x):
""" proposed in https://arxiv.org/abs/2205.11487 as an improved clamping in the setting of classifier free guidance """
# s is the threshold amount
# static thresholding would just be s = 1
s = 1.
if self.use_dynamic_thres:
s = torch.quantile(
rearrange(x, 'b ... -> b (...)').abs(),
self.dynamic_thres_percentile,
dim = -1
)
s.clamp_(min = 1.)
s = s.view(-1, *((1,) * (x.ndim - 1)))
# clip by threshold, depending on whether static or dynamic
x = x.clamp(-s, s) / s
return x
def p_mean_variance(self, unet, x, t, image_embed, noise_scheduler, text_encodings = None, lowres_cond_img = None, self_cond = None, clip_denoised = True, predict_x_start = False, predict_v = False, learned_variance = False, cond_scale = 1., model_output = None, lowres_noise_level = None):
assert not (cond_scale != 1. and not self.can_classifier_guidance), 'the decoder was not trained with conditional dropout, and thus one cannot use classifier free guidance (cond_scale anything other than 1)'
model_output = default(model_output, lambda: unet.forward_with_cond_scale(x, t, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img, self_cond = self_cond, lowres_noise_level = lowres_noise_level))
pred, var_interp_frac_unnormalized = self.parse_unet_output(learned_variance, model_output)
if predict_v:
x_start = noise_scheduler.predict_start_from_v(x, t = t, v = pred)
elif predict_x_start:
x_start = pred
else:
x_start = noise_scheduler.predict_start_from_noise(x, t = t, noise = pred)
if clip_denoised:
x_start = self.dynamic_threshold(x_start)
model_mean, posterior_variance, posterior_log_variance = noise_scheduler.q_posterior(x_start=x_start, x_t=x, t=t)
if learned_variance:
# if learned variance, posterio variance and posterior log variance are predicted by the network
# by an interpolation of the max and min log beta values
# eq 15 - https://arxiv.org/abs/2102.09672
min_log = extract(noise_scheduler.posterior_log_variance_clipped, t, x.shape)
max_log = extract(torch.log(noise_scheduler.betas), t, x.shape)
var_interp_frac = unnormalize_zero_to_one(var_interp_frac_unnormalized)
if self.learned_variance_constrain_frac:
var_interp_frac = var_interp_frac.sigmoid()
posterior_log_variance = var_interp_frac * max_log + (1 - var_interp_frac) * min_log
posterior_variance = posterior_log_variance.exp()
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.no_grad()
def p_sample(self, unet, x, t, image_embed, noise_scheduler, text_encodings = None, cond_scale = 1., lowres_cond_img = None, self_cond = None, predict_x_start = False, predict_v = False, learned_variance = False, clip_denoised = True, lowres_noise_level = None):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance, x_start = self.p_mean_variance(unet, x = x, t = t, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img, self_cond = self_cond, clip_denoised = clip_denoised, predict_x_start = predict_x_start, predict_v = predict_v, noise_scheduler = noise_scheduler, learned_variance = learned_variance, lowres_noise_level = lowres_noise_level)
noise = torch.randn_like(x)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
pred = model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
return pred, x_start
@torch.no_grad()
def p_sample_loop_ddpm(
self,
unet,
shape,
image_embed,
noise_scheduler,
predict_x_start = False,
predict_v = False,
learned_variance = False,
clip_denoised = True,
lowres_cond_img = None,
text_encodings = None,
cond_scale = 1,
is_latent_diffusion = False,
lowres_noise_level = None,
inpaint_image = None,
inpaint_mask = None,
inpaint_resample_times = 5
):
device = self.device
b = shape[0]
img = torch.randn(shape, device = device)
x_start = None # for self-conditioning
is_inpaint = exists(inpaint_image)
resample_times = inpaint_resample_times if is_inpaint else 1
if is_inpaint:
inpaint_image = self.normalize_img(inpaint_image)
inpaint_image = resize_image_to(inpaint_image, shape[-1], nearest = True)
inpaint_mask = rearrange(inpaint_mask, 'b h w -> b 1 h w').float()
inpaint_mask = resize_image_to(inpaint_mask, shape[-1], nearest = True)
inpaint_mask = inpaint_mask.bool()
if not is_latent_diffusion:
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
for time in tqdm(reversed(range(0, noise_scheduler.num_timesteps)), desc = 'sampling loop time step', total = noise_scheduler.num_timesteps):
is_last_timestep = time == 0
for r in reversed(range(0, resample_times)):
is_last_resample_step = r == 0
times = torch.full((b,), time, device = device, dtype = torch.long)
if is_inpaint:
# following the repaint paper
# https://arxiv.org/abs/2201.09865
noised_inpaint_image = noise_scheduler.q_sample(inpaint_image, t = times)
img = (img * ~inpaint_mask) + (noised_inpaint_image * inpaint_mask)
self_cond = x_start if unet.self_cond else None
img, x_start = self.p_sample(
unet,
img,
times,
image_embed = image_embed,
text_encodings = text_encodings,
cond_scale = cond_scale,
self_cond = self_cond,
lowres_cond_img = lowres_cond_img,
lowres_noise_level = lowres_noise_level,
predict_x_start = predict_x_start,
predict_v = predict_v,
noise_scheduler = noise_scheduler,
learned_variance = learned_variance,
clip_denoised = clip_denoised
)
if is_inpaint and not (is_last_timestep or is_last_resample_step):
# in repaint, you renoise and resample up to 10 times every step
img = noise_scheduler.q_sample_from_to(img, times - 1, times)
if is_inpaint:
img = (img * ~inpaint_mask) + (inpaint_image * inpaint_mask)
unnormalize_img = self.unnormalize_img(img)
return unnormalize_img
@torch.no_grad()
def p_sample_loop_ddim(
self,
unet,
shape,
image_embed,
noise_scheduler,
timesteps,
eta = 1.,
predict_x_start = False,
predict_v = False,
learned_variance = False,
clip_denoised = True,
lowres_cond_img = None,
text_encodings = None,
cond_scale = 1,
is_latent_diffusion = False,
lowres_noise_level = None,
inpaint_image = None,
inpaint_mask = None,
inpaint_resample_times = 5
):
batch, device, total_timesteps, alphas, eta = shape[0], self.device, noise_scheduler.num_timesteps, noise_scheduler.alphas_cumprod, self.ddim_sampling_eta
times = torch.linspace(0., total_timesteps, steps = timesteps + 2)[:-1]
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:]))
time_pairs = list(filter(lambda t: t[0] > t[1], time_pairs))
is_inpaint = exists(inpaint_image)
resample_times = inpaint_resample_times if is_inpaint else 1
if is_inpaint:
inpaint_image = self.normalize_img(inpaint_image)
inpaint_image = resize_image_to(inpaint_image, shape[-1], nearest = True)
inpaint_mask = rearrange(inpaint_mask, 'b h w -> b 1 h w').float()
inpaint_mask = resize_image_to(inpaint_mask, shape[-1], nearest = True)
inpaint_mask = inpaint_mask.bool()
img = torch.randn(shape, device = device)
x_start = None # for self-conditioning
if not is_latent_diffusion:
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
is_last_timestep = time_next == 0
for r in reversed(range(0, resample_times)):
is_last_resample_step = r == 0
alpha = alphas[time]
alpha_next = alphas[time_next]
time_cond = torch.full((batch,), time, device = device, dtype = torch.long)
if is_inpaint:
# following the repaint paper
# https://arxiv.org/abs/2201.09865
noised_inpaint_image = noise_scheduler.q_sample(inpaint_image, t = time_cond)
img = (img * ~inpaint_mask) + (noised_inpaint_image * inpaint_mask)
self_cond = x_start if unet.self_cond else None
unet_output = unet.forward_with_cond_scale(img, time_cond, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, self_cond = self_cond, lowres_cond_img = lowres_cond_img, lowres_noise_level = lowres_noise_level)
pred, _ = self.parse_unet_output(learned_variance, unet_output)
# predict x0
if predict_v:
x_start = noise_scheduler.predict_start_from_v(img, t = time_cond, v = pred)
elif predict_x_start:
x_start = pred
else:
x_start = noise_scheduler.predict_start_from_noise(img, t = time_cond, noise = pred)
# maybe clip x0
if clip_denoised:
x_start = self.dynamic_threshold(x_start)
# predict noise
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = x_start)
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
noise = torch.randn_like(img) if not is_last_timestep else 0.
img = x_start * alpha_next.sqrt() + \
c1 * noise + \
c2 * pred_noise
if is_inpaint and not (is_last_timestep or is_last_resample_step):
# in repaint, you renoise and resample up to 10 times every step
time_next_cond = torch.full((batch,), time_next, device = device, dtype = torch.long)
img = noise_scheduler.q_sample_from_to(img, time_next_cond, time_cond)
if exists(inpaint_image):
img = (img * ~inpaint_mask) + (inpaint_image * inpaint_mask)
img = self.unnormalize_img(img)
return img
@torch.no_grad()
def p_sample_loop(self, *args, noise_scheduler, timesteps = None, **kwargs):
num_timesteps = noise_scheduler.num_timesteps
timesteps = default(timesteps, num_timesteps)
assert timesteps <= num_timesteps
is_ddim = timesteps < num_timesteps
if not is_ddim:
return self.p_sample_loop_ddpm(*args, noise_scheduler = noise_scheduler, **kwargs)
return self.p_sample_loop_ddim(*args, noise_scheduler = noise_scheduler, timesteps = timesteps, **kwargs)
def p_losses(self, unet, x_start, times, *, image_embed, noise_scheduler, lowres_cond_img = None, text_encodings = None, predict_x_start = False, predict_v = False, noise = None, learned_variance = False, clip_denoised = False, is_latent_diffusion = False, lowres_noise_level = None):
noise = default(noise, lambda: torch.randn_like(x_start))
# normalize to [-1, 1]
if not is_latent_diffusion:
x_start = self.normalize_img(x_start)
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
# get x_t
x_noisy = noise_scheduler.q_sample(x_start = x_start, t = times, noise = noise)
# unet kwargs
unet_kwargs = dict(
image_embed = image_embed,
text_encodings = text_encodings,
lowres_cond_img = lowres_cond_img,
lowres_noise_level = lowres_noise_level,
)
# self conditioning
self_cond = None
if unet.self_cond and random.random() < 0.5:
with torch.no_grad():
unet_output = unet(x_noisy, times, **unet_kwargs)
self_cond, _ = self.parse_unet_output(learned_variance, unet_output)
self_cond = self_cond.detach()
# forward to get model prediction
unet_output = unet(
x_noisy,
times,
**unet_kwargs,
self_cond = self_cond,
image_cond_drop_prob = self.image_cond_drop_prob,
text_cond_drop_prob = self.text_cond_drop_prob,
)
pred, _ = self.parse_unet_output(learned_variance, unet_output)
if predict_v:
target = noise_scheduler.calculate_v(x_start, times, noise)
elif predict_x_start:
target = x_start
else:
target = noise
loss = noise_scheduler.loss_fn(pred, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = noise_scheduler.p2_reweigh_loss(loss, times)
loss = loss.mean()
if not learned_variance:
# return simple loss if not using learned variance
return loss
# most of the code below is transcribed from
# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/diffusion_utils_2.py
# the Improved DDPM paper then further modified it so that the mean is detached (shown a couple lines before), and weighted to be smaller than the l1 or l2 "simple" loss
# it is questionable whether this is really needed, looking at some of the figures in the paper, but may as well stay faithful to their implementation
# if learning the variance, also include the extra weight kl loss
true_mean, _, true_log_variance_clipped = noise_scheduler.q_posterior(x_start = x_start, x_t = x_noisy, t = times)
model_mean, _, model_log_variance, _ = self.p_mean_variance(unet, x = x_noisy, t = times, image_embed = image_embed, noise_scheduler = noise_scheduler, clip_denoised = clip_denoised, learned_variance = True, model_output = unet_output)
# kl loss with detached model predicted mean, for stability reasons as in paper
detached_model_mean = model_mean.detach()
kl = normal_kl(true_mean, true_log_variance_clipped, detached_model_mean, model_log_variance)
kl = meanflat(kl) * NAT
decoder_nll = -discretized_gaussian_log_likelihood(x_start, means = detached_model_mean, log_scales = 0.5 * model_log_variance)
decoder_nll = meanflat(decoder_nll) * NAT
# at the first timestep return the decoder NLL, otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
vb_losses = torch.where(times == 0, decoder_nll, kl)
# weight the vb loss smaller, for stability, as in the paper (recommended 0.001)
vb_loss = vb_losses.mean() * self.vb_loss_weight
return loss + vb_loss
@torch.no_grad()
@eval_decorator
def sample(
self,
image = None,
image_embed = None,
text = None,
text_encodings = None,
batch_size = 1,
cond_scale = 1.,
start_at_unet_number = 1,
stop_at_unet_number = None,
distributed = False,
inpaint_image = None,
inpaint_mask = None,
inpaint_resample_times = 5,
one_unet_in_gpu_at_time = True
):
assert self.unconditional or exists(image_embed), 'image embed must be present on sampling from decoder unless if trained unconditionally'
if not self.unconditional:
batch_size = image_embed.shape[0]
if exists(text) and not exists(text_encodings) and not self.unconditional:
assert exists(self.clip)
_, text_encodings = self.clip.embed_text(text)
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
assert not (not self.condition_on_text_encodings and exists(text_encodings)), 'decoder specified not to be conditioned on text, yet it is presented'
assert not (exists(inpaint_image) ^ exists(inpaint_mask)), 'inpaint_image and inpaint_mask (boolean mask of [batch, height, width]) must be both given for inpainting'
img = None
if start_at_unet_number > 1:
# Then we are not generating the first image and one must have been passed in
assert exists(image), 'image must be passed in if starting at unet number > 1'
assert image.shape[0] == batch_size, 'image must have batch size of {} if starting at unet number > 1'.format(batch_size)
prev_unet_output_size = self.image_sizes[start_at_unet_number - 2]
img = resize_image_to(image, prev_unet_output_size, nearest = True)
is_cuda = next(self.parameters()).is_cuda
num_unets = self.num_unets
cond_scale = cast_tuple(cond_scale, num_unets)
for unet_number, unet, vae, channel, image_size, predict_x_start, predict_v, learned_variance, noise_scheduler, lowres_cond, sample_timesteps, unet_cond_scale in tqdm(zip(range(1, num_unets + 1), self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start, self.predict_v, self.learned_variance, self.noise_schedulers, self.lowres_conds, self.sample_timesteps, cond_scale)):
if unet_number < start_at_unet_number:
continue # It's the easiest way to do it
context = self.one_unet_in_gpu(unet = unet) if is_cuda and one_unet_in_gpu_at_time else null_context()
with context:
# prepare low resolution conditioning for upsamplers
lowres_cond_img = lowres_noise_level = None
shape = (batch_size, channel, image_size, image_size)
if unet.lowres_cond:
lowres_cond_img = resize_image_to(img, target_image_size = image_size, clamp_range = self.input_image_range, nearest = True)
if lowres_cond.use_noise:
lowres_noise_level = torch.full((batch_size,), int(self.lowres_noise_sample_level * 1000), dtype = torch.long, device = self.device)
lowres_cond_img, _ = lowres_cond.noise_image(lowres_cond_img, lowres_noise_level)
# latent diffusion
is_latent_diffusion = isinstance(vae, VQGanVAE)
image_size = vae.get_encoded_fmap_size(image_size)
shape = (batch_size, vae.encoded_dim, image_size, image_size)
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
# denoising loop for image
img = self.p_sample_loop(
unet,
shape,
image_embed = image_embed,
text_encodings = text_encodings,
cond_scale = unet_cond_scale,
predict_x_start = predict_x_start,
predict_v = predict_v,
learned_variance = learned_variance,
clip_denoised = not is_latent_diffusion,
lowres_cond_img = lowres_cond_img,
lowres_noise_level = lowres_noise_level,
is_latent_diffusion = is_latent_diffusion,
noise_scheduler = noise_scheduler,
timesteps = sample_timesteps,
inpaint_image = inpaint_image,
inpaint_mask = inpaint_mask,
inpaint_resample_times = inpaint_resample_times
)
img = vae.decode(img)
if exists(stop_at_unet_number) and stop_at_unet_number == unet_number:
break
return img
def forward(
self,
image,
text = None,
image_embed = None,
text_encodings = None,
unet_number = None,
return_lowres_cond_image = False # whether to return the low resolution conditioning images, for debugging upsampler purposes
):
assert not (self.num_unets > 1 and not exists(unet_number)), f'you must specify which unet you want trained, from a range of 1 to {self.num_unets}, if you are training cascading DDPM (multiple unets)'
unet_number = default(unet_number, 1)
unet_index = unet_number - 1
unet = self.get_unet(unet_number)
vae = self.vaes[unet_index]
noise_scheduler = self.noise_schedulers[unet_index]
lowres_conditioner = self.lowres_conds[unet_index]
target_image_size = self.image_sizes[unet_index]
predict_x_start = self.predict_x_start[unet_index]
predict_v = self.predict_v[unet_index]
random_crop_size = self.random_crop_sizes[unet_index]
learned_variance = self.learned_variance[unet_index]
b, c, h, w, device, = *image.shape, image.device
assert image.shape[1] == self.channels
assert h >= target_image_size and w >= target_image_size
times = torch.randint(0, noise_scheduler.num_timesteps, (b,), device = device, dtype = torch.long)
if not exists(image_embed) and not self.unconditional:
assert exists(self.clip), 'if you want to derive CLIP image embeddings automatically, you must supply `clip` to the decoder on init'
image_embed, _ = self.clip.embed_image(image)
if exists(text) and not exists(text_encodings) and not self.unconditional:
assert exists(self.clip), 'if you are passing in raw text, you need to supply `clip` to the decoder'
_, text_encodings = self.clip.embed_text(text)
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
assert not (not self.condition_on_text_encodings and exists(text_encodings)), 'decoder specified not to be conditioned on text, yet it is presented'
lowres_cond_img, lowres_noise_level = lowres_conditioner(image, target_image_size = target_image_size, downsample_image_size = self.image_sizes[unet_index - 1]) if exists(lowres_conditioner) else (None, None)
image = resize_image_to(image, target_image_size, nearest = True)
if exists(random_crop_size):
aug = K.RandomCrop((random_crop_size, random_crop_size), p = 1.)
# make sure low res conditioner and image both get augmented the same way
# detailed https://kornia.readthedocs.io/en/latest/augmentation.module.html?highlight=randomcrop#kornia.augmentation.RandomCrop
image = aug(image)
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
is_latent_diffusion = not isinstance(vae, NullVQGanVAE)
vae.eval()
with torch.no_grad():
image = vae.encode(image)
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
losses = self.p_losses(unet, image, times, image_embed = image_embed, text_encodings = text_encodings, lowres_cond_img = lowres_cond_img, predict_x_start = predict_x_start, predict_v = predict_v, learned_variance = learned_variance, is_latent_diffusion = is_latent_diffusion, noise_scheduler = noise_scheduler, lowres_noise_level = lowres_noise_level)
if not return_lowres_cond_image:
return losses
return losses, lowres_cond_img
# main class
class DALLE2(nn.Module):
def __init__(
self,
*,
prior,
decoder,
prior_num_samples = 2
):
super().__init__()
assert isinstance(prior, DiffusionPrior)
assert isinstance(decoder, Decoder)
self.prior = prior
self.decoder = decoder
self.prior_num_samples = prior_num_samples
self.decoder_need_text_cond = self.decoder.condition_on_text_encodings
self.to_pil = T.ToPILImage()
@torch.no_grad()
@eval_decorator
def forward(
self,
text,
cond_scale = 1.,
prior_cond_scale = 1.,
return_pil_images = False
):
device = module_device(self)
one_text = isinstance(text, str) or (not is_list_str(text) and text.shape[0] == 1)
if isinstance(text, str) or is_list_str(text):
text = [text] if not isinstance(text, (list, tuple)) else text
text = tokenizer.tokenize(text).to(device)
image_embed = self.prior.sample(text, num_samples_per_batch = self.prior_num_samples, cond_scale = prior_cond_scale)
text_cond = text if self.decoder_need_text_cond else None
images = self.decoder.sample(image_embed = image_embed, text = text_cond, cond_scale = cond_scale)
if return_pil_images:
images = list(map(self.to_pil, images.unbind(dim = 0)))
if one_text:
return first(images)
return images
| DALLE2-pytorch-main | dalle2_pytorch/dalle2_pytorch.py |
import json
from torchvision import transforms as T
from pydantic import BaseModel, validator, model_validator
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
from x_clip import CLIP as XCLIP
from open_clip import list_pretrained
from coca_pytorch import CoCa
from dalle2_pytorch.dalle2_pytorch import (
CoCaAdapter,
OpenAIClipAdapter,
OpenClipAdapter,
Unet,
Decoder,
DiffusionPrior,
DiffusionPriorNetwork,
XClipAdapter
)
from dalle2_pytorch.trackers import Tracker, create_loader, create_logger, create_saver
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
InnerType = TypeVar('InnerType')
ListOrTuple = Union[List[InnerType], Tuple[InnerType]]
SingularOrIterable = Union[InnerType, ListOrTuple[InnerType]]
# general pydantic classes
class TrainSplitConfig(BaseModel):
train: float = 0.75
val: float = 0.15
test: float = 0.1
@model_validator(mode = 'after')
def validate_all(self, m):
actual_sum = sum([*dict(self).values()])
if actual_sum != 1.:
raise ValueError(f'{dict(self).keys()} must sum to 1.0. Found: {actual_sum}')
return self
class TrackerLogConfig(BaseModel):
log_type: str = 'console'
resume: bool = False # For logs that are saved to unique locations, resume a previous run
auto_resume: bool = False # If the process crashes and restarts, resume from the run that crashed
verbose: bool = False
class Config:
# Each individual log type has it's own arguments that will be passed through the config
extra = "allow"
def create(self, data_path: str):
kwargs = self.dict()
return create_logger(self.log_type, data_path, **kwargs)
class TrackerLoadConfig(BaseModel):
load_from: Optional[str] = None
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
class Config:
extra = "allow"
def create(self, data_path: str):
kwargs = self.dict()
if self.load_from is None:
return None
return create_loader(self.load_from, data_path, **kwargs)
class TrackerSaveConfig(BaseModel):
save_to: str = 'local'
save_all: bool = False
save_latest: bool = True
save_best: bool = True
class Config:
extra = "allow"
def create(self, data_path: str):
kwargs = self.dict()
return create_saver(self.save_to, data_path, **kwargs)
class TrackerConfig(BaseModel):
data_path: str = '.tracker_data'
overwrite_data_path: bool = False
log: TrackerLogConfig
load: Optional[TrackerLoadConfig]
save: Union[List[TrackerSaveConfig], TrackerSaveConfig]
def create(self, full_config: BaseModel, extra_config: dict, dummy_mode: bool = False) -> Tracker:
tracker = Tracker(self.data_path, dummy_mode=dummy_mode, overwrite_data_path=self.overwrite_data_path)
# Add the logger
tracker.add_logger(self.log.create(self.data_path))
# Add the loader
if self.load is not None:
tracker.add_loader(self.load.create(self.data_path))
# Add the saver or savers
if isinstance(self.save, list):
for save_config in self.save:
tracker.add_saver(save_config.create(self.data_path))
else:
tracker.add_saver(self.save.create(self.data_path))
# Initialize all the components and verify that all data is valid
tracker.init(full_config, extra_config)
return tracker
# diffusion prior pydantic classes
class AdapterConfig(BaseModel):
make: str = "openai"
model: str = "ViT-L/14"
base_model_kwargs: Dict[str, Any] = None
def create(self):
if self.make == "openai":
return OpenAIClipAdapter(self.model)
elif self.make == "open_clip":
pretrained = dict(list_pretrained())
checkpoint = pretrained[self.model]
return OpenClipAdapter(name=self.model, pretrained=checkpoint)
elif self.make == "x-clip":
return XClipAdapter(XCLIP(**self.base_model_kwargs))
elif self.make == "coca":
return CoCaAdapter(CoCa(**self.base_model_kwargs))
else:
raise AttributeError("No adapter with that name is available.")
class DiffusionPriorNetworkConfig(BaseModel):
dim: int
depth: int
max_text_len: int = None
num_timesteps: int = None
num_time_embeds: int = 1
num_image_embeds: int = 1
num_text_embeds: int = 1
dim_head: int = 64
heads: int = 8
ff_mult: int = 4
norm_in: bool = False
norm_out: bool = True
attn_dropout: float = 0.
ff_dropout: float = 0.
final_proj: bool = True
normformer: bool = False
rotary_emb: bool = True
class Config:
extra = "allow"
def create(self):
kwargs = self.dict()
return DiffusionPriorNetwork(**kwargs)
class DiffusionPriorConfig(BaseModel):
clip: AdapterConfig = None
net: DiffusionPriorNetworkConfig
image_embed_dim: int
image_size: int
image_channels: int = 3
timesteps: int = 1000
sample_timesteps: Optional[int] = None
cond_drop_prob: float = 0.
loss_type: str = 'l2'
predict_x_start: bool = True
beta_schedule: str = 'cosine'
condition_on_text_encodings: bool = True
class Config:
extra = "allow"
def create(self):
kwargs = self.dict()
has_clip = exists(kwargs.pop('clip'))
kwargs.pop('net')
clip = None
if has_clip:
clip = self.clip.create()
diffusion_prior_network = self.net.create()
return DiffusionPrior(net = diffusion_prior_network, clip = clip, **kwargs)
class DiffusionPriorTrainConfig(BaseModel):
epochs: int = 1
lr: float = 1.1e-4
wd: float = 6.02e-2
max_grad_norm: float = 0.5
use_ema: bool = True
ema_beta: float = 0.99
amp: bool = False
warmup_steps: int = None # number of warmup steps
save_every_seconds: int = 3600 # how often to save
eval_timesteps: List[int] = [64] # which sampling timesteps to evaluate with
best_validation_loss: float = 1e9 # the current best valudation loss observed
current_epoch: int = 0 # the current epoch
num_samples_seen: int = 0 # the current number of samples seen
random_seed: int = 0 # manual seed for torch
class DiffusionPriorDataConfig(BaseModel):
image_url: str # path to embeddings folder
meta_url: str # path to metadata (captions) for images
splits: TrainSplitConfig # define train, validation, test splits for your dataset
batch_size: int # per-gpu batch size used to train the model
num_data_points: int = 25e7 # total number of datapoints to train on
eval_every_seconds: int = 3600 # validation statistics will be performed this often
class TrainDiffusionPriorConfig(BaseModel):
prior: DiffusionPriorConfig
data: DiffusionPriorDataConfig
train: DiffusionPriorTrainConfig
tracker: TrackerConfig
@classmethod
def from_json_path(cls, json_path):
with open(json_path) as f:
config = json.load(f)
return cls(**config)
# decoder pydantic classes
class UnetConfig(BaseModel):
dim: int
dim_mults: ListOrTuple[int]
image_embed_dim: int = None
text_embed_dim: int = None
cond_on_text_encodings: bool = None
cond_dim: int = None
channels: int = 3
self_attn: ListOrTuple[int]
attn_dim_head: int = 32
attn_heads: int = 16
init_cross_embed: bool = True
class Config:
extra = "allow"
class DecoderConfig(BaseModel):
unets: ListOrTuple[UnetConfig]
image_size: int = None
image_sizes: ListOrTuple[int] = None
clip: Optional[AdapterConfig] # The clip model to use if embeddings are not provided
channels: int = 3
timesteps: int = 1000
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None
loss_type: str = 'l2'
beta_schedule: ListOrTuple[str] = None # None means all cosine
learned_variance: SingularOrIterable[bool] = True
image_cond_drop_prob: float = 0.1
text_cond_drop_prob: float = 0.5
def create(self):
decoder_kwargs = self.dict()
unet_configs = decoder_kwargs.pop('unets')
unets = [Unet(**config) for config in unet_configs]
has_clip = exists(decoder_kwargs.pop('clip'))
clip = None
if has_clip:
clip = self.clip.create()
return Decoder(unets, clip=clip, **decoder_kwargs)
@validator('image_sizes')
def check_image_sizes(cls, image_sizes, values):
if exists(values.get('image_size')) ^ exists(image_sizes):
return image_sizes
raise ValueError('either image_size or image_sizes is required, but not both')
class Config:
extra = "allow"
class DecoderDataConfig(BaseModel):
webdataset_base_url: str # path to a webdataset with jpg images
img_embeddings_url: Optional[str] = None # path to .npy files with embeddings
text_embeddings_url: Optional[str] = None # path to .npy files with embeddings
num_workers: int = 4
batch_size: int = 64
start_shard: int = 0
end_shard: int = 9999999
shard_width: int = 6
index_width: int = 4
splits: TrainSplitConfig
shuffle_train: bool = True
resample_train: bool = False
preprocessing: Dict[str, Any] = {'ToTensor': True}
@property
def img_preproc(self):
def _get_transformation(transformation_name, **kwargs):
if transformation_name == "RandomResizedCrop":
return T.RandomResizedCrop(**kwargs)
elif transformation_name == "RandomHorizontalFlip":
return T.RandomHorizontalFlip()
elif transformation_name == "ToTensor":
return T.ToTensor()
transforms = []
for transform_name, transform_kwargs_or_bool in self.preprocessing.items():
transform_kwargs = {} if not isinstance(transform_kwargs_or_bool, dict) else transform_kwargs_or_bool
transforms.append(_get_transformation(transform_name, **transform_kwargs))
return T.Compose(transforms)
class DecoderTrainConfig(BaseModel):
epochs: int = 20
lr: SingularOrIterable[float] = 1e-4
wd: SingularOrIterable[float] = 0.01
warmup_steps: Optional[SingularOrIterable[int]] = None
find_unused_parameters: bool = True
static_graph: bool = True
max_grad_norm: SingularOrIterable[float] = 0.5
save_every_n_samples: int = 100000
n_sample_images: int = 6 # The number of example images to produce when sampling the train and test dataset
cond_scale: Union[float, List[float]] = 1.0
device: str = 'cuda:0'
epoch_samples: int = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
validation_samples: int = None # Same as above but for validation.
save_immediately: bool = False
use_ema: bool = True
ema_beta: float = 0.999
amp: bool = False
unet_training_mask: ListOrTuple[bool] = None # If None, use all unets
class DecoderEvaluateConfig(BaseModel):
n_evaluation_samples: int = 1000
FID: Dict[str, Any] = None
IS: Dict[str, Any] = None
KID: Dict[str, Any] = None
LPIPS: Dict[str, Any] = None
class TrainDecoderConfig(BaseModel):
decoder: DecoderConfig
data: DecoderDataConfig
train: DecoderTrainConfig
evaluate: DecoderEvaluateConfig
tracker: TrackerConfig
seed: int = 0
@classmethod
def from_json_path(cls, json_path):
with open(json_path) as f:
config = json.load(f)
print(config)
return cls(**config)
@model_validator(mode = 'after')
def check_has_embeddings(self, m):
# Makes sure that enough information is provided to get the embeddings specified for training
values = dict(self)
data_config, decoder_config = values.get('data'), values.get('decoder')
if not exists(data_config) or not exists(decoder_config):
# Then something else errored and we should just pass through
return values
using_text_embeddings = any([unet.cond_on_text_encodings for unet in decoder_config.unets])
using_clip = exists(decoder_config.clip)
img_emb_url = data_config.img_embeddings_url
text_emb_url = data_config.text_embeddings_url
if using_text_embeddings:
# Then we need some way to get the embeddings
assert using_clip or exists(text_emb_url), 'If text conditioning, either clip or text_embeddings_url must be provided'
if using_clip:
if using_text_embeddings:
assert not exists(text_emb_url) or not exists(img_emb_url), 'Loaded clip, but also provided text_embeddings_url and img_embeddings_url. This is redundant. Remove the clip model or the text embeddings'
else:
assert not exists(img_emb_url), 'Loaded clip, but also provided img_embeddings_url. This is redundant. Remove the clip model or the embeddings'
if text_emb_url:
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."
return m
| DALLE2-pytorch-main | dalle2_pytorch/train_configs.py |
__version__ = '1.15.1'
| DALLE2-pytorch-main | dalle2_pytorch/version.py |
from math import sqrt
import copy
from random import choice
from pathlib import Path
from shutil import rmtree
from PIL import Image
import torch
from torch import nn
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
from einops import rearrange
from dalle2_pytorch.vqgan_vae import VQGanVAE
from dalle2_pytorch.optimizer import get_optimizer
from ema_pytorch import EMA
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# classes
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
print(f'{len(self.paths)} training samples found at {folder}')
self.transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(image_size),
T.RandomHorizontalFlip(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# main trainer class
class VQGanVAETrainer(nn.Module):
def __init__(
self,
vae,
*,
num_train_steps,
lr,
batch_size,
folder,
grad_accum_every,
wd = 0.,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
valid_frac = 0.05,
random_split_seed = 42,
ema_beta = 0.995,
ema_update_after_step = 500,
ema_update_every = 10,
apply_grad_penalty_every = 4,
amp = False
):
super().__init__()
assert isinstance(vae, VQGanVAE), 'vae must be instance of VQGanVAE'
image_size = vae.image_size
self.vae = vae
self.ema_vae = EMA(vae, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
all_parameters = set(vae.parameters())
discr_parameters = set(vae.discr.parameters())
vae_parameters = all_parameters - discr_parameters
self.optim = get_optimizer(vae_parameters, lr = lr, wd = wd)
self.discr_optim = get_optimizer(discr_parameters, lr = lr, wd = wd)
self.amp = amp
self.scaler = GradScaler(enabled = amp)
self.discr_scaler = GradScaler(enabled = amp)
# create dataset
self.ds = ImageDataset(folder, image_size = image_size)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
print(f'training with shared training and valid dataset of {len(self.ds)} samples')
# dataloader
self.dl = cycle(DataLoader(
self.ds,
batch_size = batch_size,
shuffle = True
))
self.valid_dl = cycle(DataLoader(
self.valid_ds,
batch_size = batch_size,
shuffle = True
))
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?'):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
def train_step(self):
device = next(self.vae.parameters()).device
steps = int(self.steps.item())
apply_grad_penalty = not (steps % self.apply_grad_penalty_every)
self.vae.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
img = next(self.dl)
img = img.to(device)
with autocast(enabled = self.amp):
loss = self.vae(
img,
return_loss = True,
apply_grad_penalty = apply_grad_penalty
)
self.scaler.scale(loss / self.grad_accum_every).backward()
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
self.scaler.step(self.optim)
self.scaler.update()
self.optim.zero_grad()
# update discriminator
if exists(self.vae.discr):
discr_loss = 0
for _ in range(self.grad_accum_every):
img = next(self.dl)
img = img.to(device)
with autocast(enabled = self.amp):
loss = self.vae(img, return_discr_loss = True)
self.discr_scaler.scale(loss / self.grad_accum_every).backward()
accum_log(logs, {'discr_loss': loss.item() / self.grad_accum_every})
self.discr_scaler.step(self.discr_optim)
self.discr_scaler.update()
self.discr_optim.zero_grad()
# log
print(f"{steps}: vae loss: {logs['loss']} - discr loss: {logs['discr_loss']}")
# update exponential moving averaged generator
self.ema_vae.update()
# sample results every so often
if not (steps % self.save_results_every):
for model, filename in ((self.ema_vae.ema_model, f'{steps}.ema'), (self.vae, str(steps))):
model.eval()
imgs = next(self.dl)
imgs = imgs.to(device)
recons = model(imgs)
nrows = int(sqrt(self.batch_size))
imgs_and_recons = torch.stack((imgs, recons), dim = 0)
imgs_and_recons = rearrange(imgs_and_recons, 'r b ... -> (b r) ...')
imgs_and_recons = imgs_and_recons.detach().cpu().float().clamp(0., 1.)
grid = make_grid(imgs_and_recons, nrow = 2, normalize = True, value_range = (0, 1))
logs['reconstructions'] = grid
save_image(grid, str(self.results_folder / f'{filename}.png'))
print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
if not (steps % self.save_model_every):
state_dict = self.vae.state_dict()
model_path = str(self.results_folder / f'vae.{steps}.pt')
torch.save(state_dict, model_path)
ema_state_dict = self.ema_vae.state_dict()
model_path = str(self.results_folder / f'vae.{steps}.ema.pt')
torch.save(ema_state_dict, model_path)
print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
device = next(self.vae.parameters()).device
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
print('training complete')
| DALLE2-pytorch-main | dalle2_pytorch/vqgan_vae_trainer.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from dalle2_pytorch.version import __version__
from dalle2_pytorch.dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder
from dalle2_pytorch.dalle2_pytorch import OpenAIClipAdapter, OpenClipAdapter
from dalle2_pytorch.trainer import DecoderTrainer, DiffusionPriorTrainer
from dalle2_pytorch.vqgan_vae import VQGanVAE
from x_clip import CLIP
| DALLE2-pytorch-main | dalle2_pytorch/__init__.py |
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import torch
import html
import os
import ftfy
import regex as re
from functools import lru_cache
from pathlib import Path
from dalle2_pytorch.utils import import_or_print_error
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens, remove_start_end = True, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = ''.join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
tokenizer = SimpleTokenizer()
# YTTM tokenizer
class YttmTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
self.yttm = import_or_print_error('youtokentome', 'you need to install youtokentome by `pip install youtokentome`')
tokenizer = self.yttm.BPE(model = str(bpe_path))
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size()
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
return self.tokenizer.decode(tokens, ignore_ids = pad_tokens.union({0}))
def encode(self, texts):
encoded = self.tokenizer.encode(texts, output_type = self.yttm.OutputType.ID)
return list(map(torch.tensor, encoded))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = self.encode(texts)
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| DALLE2-pytorch-main | dalle2_pytorch/tokenizer.py |
import click
import torch
import torchvision.transforms as T
from functools import reduce
from pathlib import Path
from dalle2_pytorch import DALLE2, Decoder, DiffusionPrior
def safeget(dictionary, keys, default = None):
return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split('.'), dictionary)
def simple_slugify(text, max_length = 255):
return text.replace("-", "_").replace(",", "").replace(" ", "_").replace("|", "--").strip('-_')[:max_length]
def get_pkg_version():
from pkg_resources import get_distribution
return get_distribution('dalle2_pytorch').version
def main():
pass
@click.command()
@click.option('--model', default = './dalle2.pt', help = 'path to trained DALL-E2 model')
@click.option('--cond_scale', default = 2, help = 'conditioning scale (classifier free guidance) in decoder')
@click.argument('text')
def dream(
model,
cond_scale,
text
):
model_path = Path(model)
full_model_path = str(model_path.resolve())
assert model_path.exists(), f'model not found at {full_model_path}'
loaded = torch.load(str(model_path))
version = safeget(loaded, 'version')
print(f'loading DALL-E2 from {full_model_path}, saved at version {version} - current package version is {get_pkg_version()}')
prior_init_params = safeget(loaded, 'init_params.prior')
decoder_init_params = safeget(loaded, 'init_params.decoder')
model_params = safeget(loaded, 'model_params')
prior = DiffusionPrior(**prior_init_params)
decoder = Decoder(**decoder_init_params)
dalle2 = DALLE2(prior, decoder)
dalle2.load_state_dict(model_params)
image = dalle2(text, cond_scale = cond_scale)
pil_image = T.ToPILImage()(image)
return pil_image.save(f'./{simple_slugify(text)}.png')
| DALLE2-pytorch-main | dalle2_pytorch/cli.py |
import urllib.request
import os
import json
from pathlib import Path
import shutil
from itertools import zip_longest
from typing import Any, Optional, List, Union
from pydantic import BaseModel
import torch
from dalle2_pytorch.dalle2_pytorch import Decoder, DiffusionPrior
from dalle2_pytorch.utils import import_or_print_error
from dalle2_pytorch.trainer import DecoderTrainer, DiffusionPriorTrainer
from dalle2_pytorch.version import __version__
from packaging import version
# constants
DEFAULT_DATA_PATH = './.tracker-data'
# helper functions
def exists(val):
return val is not None
class BaseLogger:
"""
An abstract class representing an object that can log data.
Parameters:
data_path (str): A file path for storing temporary data.
verbose (bool): Whether of not to always print logs to the console.
"""
def __init__(self, data_path: str, resume: bool = False, auto_resume: bool = False, verbose: bool = False, **kwargs):
self.data_path = Path(data_path)
self.resume = resume
self.auto_resume = auto_resume
self.verbose = verbose
def init(self, full_config: BaseModel, extra_config: dict, **kwargs) -> None:
"""
Initializes the logger.
Errors if the logger is invalid.
full_config is the config file dict while extra_config is anything else from the script that is not defined the config file.
"""
raise NotImplementedError
def log(self, log, **kwargs) -> None:
raise NotImplementedError
def log_images(self, images, captions=[], image_section="images", **kwargs) -> None:
raise NotImplementedError
def log_file(self, file_path, **kwargs) -> None:
raise NotImplementedError
def log_error(self, error_string, **kwargs) -> None:
raise NotImplementedError
def get_resume_data(self, **kwargs) -> dict:
"""
Sets tracker attributes that along with { "resume": True } will be used to resume training.
It is assumed that after init is called this data will be complete.
If the logger does not have any resume functionality, it should return an empty dict.
"""
raise NotImplementedError
class ConsoleLogger(BaseLogger):
def init(self, full_config: BaseModel, extra_config: dict, **kwargs) -> None:
print("Logging to console")
def log(self, log, **kwargs) -> None:
print(log)
def log_images(self, images, captions=[], image_section="images", **kwargs) -> None:
pass
def log_file(self, file_path, **kwargs) -> None:
pass
def log_error(self, error_string, **kwargs) -> None:
print(error_string)
def get_resume_data(self, **kwargs) -> dict:
return {}
class WandbLogger(BaseLogger):
"""
Logs to a wandb run.
Parameters:
data_path (str): A file path for storing temporary data.
wandb_entity (str): The wandb entity to log to.
wandb_project (str): The wandb project to log to.
wandb_run_id (str): The wandb run id to resume.
wandb_run_name (str): The wandb run name to use.
"""
def __init__(self,
data_path: str,
wandb_entity: str,
wandb_project: str,
wandb_run_id: Optional[str] = None,
wandb_run_name: Optional[str] = None,
**kwargs
):
super().__init__(data_path, **kwargs)
self.entity = wandb_entity
self.project = wandb_project
self.run_id = wandb_run_id
self.run_name = wandb_run_name
def init(self, full_config: BaseModel, extra_config: dict, **kwargs) -> None:
assert self.entity is not None, "wandb_entity must be specified for wandb logger"
assert self.project is not None, "wandb_project must be specified for wandb logger"
self.wandb = import_or_print_error('wandb', '`pip install wandb` to use the wandb logger')
os.environ["WANDB_SILENT"] = "true"
# Initializes the wandb run
init_object = {
"entity": self.entity,
"project": self.project,
"config": {**full_config.dict(), **extra_config}
}
if self.run_name is not None:
init_object['name'] = self.run_name
if self.resume:
assert self.run_id is not None, '`wandb_run_id` must be provided if `wandb_resume` is True'
if self.run_name is not None:
print("You are renaming a run. I hope that is what you intended.")
init_object['resume'] = 'must'
init_object['id'] = self.run_id
self.wandb.init(**init_object)
print(f"Logging to wandb run {self.wandb.run.path}-{self.wandb.run.name}")
def log(self, log, **kwargs) -> None:
if self.verbose:
print(log)
self.wandb.log(log, **kwargs)
def log_images(self, images, captions=[], image_section="images", **kwargs) -> None:
"""
Takes a tensor of images and a list of captions and logs them to wandb.
"""
wandb_images = [self.wandb.Image(image, caption=caption) for image, caption in zip_longest(images, captions)]
self.wandb.log({ image_section: wandb_images }, **kwargs)
def log_file(self, file_path, base_path: Optional[str] = None, **kwargs) -> None:
if base_path is None:
# Then we take the basepath as the parent of the file_path
base_path = Path(file_path).parent
self.wandb.save(str(file_path), base_path = str(base_path))
def log_error(self, error_string, step=None, **kwargs) -> None:
if self.verbose:
print(error_string)
self.wandb.log({"error": error_string, **kwargs}, step=step)
def get_resume_data(self, **kwargs) -> dict:
# In order to resume, we need wandb_entity, wandb_project, and wandb_run_id
return {
"entity": self.entity,
"project": self.project,
"run_id": self.wandb.run.id
}
logger_type_map = {
'console': ConsoleLogger,
'wandb': WandbLogger,
}
def create_logger(logger_type: str, data_path: str, **kwargs) -> BaseLogger:
if logger_type == 'custom':
raise NotImplementedError('Custom loggers are not supported yet. Please use a different logger type.')
try:
logger_class = logger_type_map[logger_type]
except KeyError:
raise ValueError(f'Unknown logger type: {logger_type}. Must be one of {list(logger_type_map.keys())}')
return logger_class(data_path, **kwargs)
class BaseLoader:
"""
An abstract class representing an object that can load a model checkpoint.
Parameters:
data_path (str): A file path for storing temporary data.
"""
def __init__(self, data_path: str, only_auto_resume: bool = False, **kwargs):
self.data_path = Path(data_path)
self.only_auto_resume = only_auto_resume
def init(self, logger: BaseLogger, **kwargs) -> None:
raise NotImplementedError
def recall() -> dict:
raise NotImplementedError
class UrlLoader(BaseLoader):
"""
A loader that downloads the file from a url and loads it
Parameters:
data_path (str): A file path for storing temporary data.
url (str): The url to download the file from.
"""
def __init__(self, data_path: str, url: str, **kwargs):
super().__init__(data_path, **kwargs)
self.url = url
def init(self, logger: BaseLogger, **kwargs) -> None:
# Makes sure the file exists to be downloaded
pass # TODO: Actually implement that
def recall(self) -> dict:
# Download the file
save_path = self.data_path / 'loaded_checkpoint.pth'
urllib.request.urlretrieve(self.url, str(save_path))
# Load the file
return torch.load(str(save_path), map_location='cpu')
class LocalLoader(BaseLoader):
"""
A loader that loads a file from a local path
Parameters:
data_path (str): A file path for storing temporary data.
file_path (str): The path to the file to load.
"""
def __init__(self, data_path: str, file_path: str, **kwargs):
super().__init__(data_path, **kwargs)
self.file_path = Path(file_path)
def init(self, logger: BaseLogger, **kwargs) -> None:
# Makes sure the file exists to be loaded
if not self.file_path.exists() and not self.only_auto_resume:
raise FileNotFoundError(f'Model not found at {self.file_path}')
def recall(self) -> dict:
# Load the file
return torch.load(str(self.file_path), map_location='cpu')
class WandbLoader(BaseLoader):
"""
A loader that loads a model from an existing wandb run
"""
def __init__(self, data_path: str, wandb_file_path: str, wandb_run_path: Optional[str] = None, **kwargs):
super().__init__(data_path, **kwargs)
self.run_path = wandb_run_path
self.file_path = wandb_file_path
def init(self, logger: BaseLogger, **kwargs) -> None:
self.wandb = import_or_print_error('wandb', '`pip install wandb` to use the wandb recall function')
# Make sure the file can be downloaded
if self.wandb.run is not None and self.run_path is None:
self.run_path = self.wandb.run.path
assert self.run_path is not None, 'wandb run was not found to load from. If not using the wandb logger must specify the `wandb_run_path`.'
assert self.run_path is not None, '`wandb_run_path` must be provided for the wandb loader'
assert self.file_path is not None, '`wandb_file_path` must be provided for the wandb loader'
os.environ["WANDB_SILENT"] = "true"
pass # TODO: Actually implement that
def recall(self) -> dict:
file_reference = self.wandb.restore(self.file_path, run_path=self.run_path)
return torch.load(file_reference.name, map_location='cpu')
loader_type_map = {
'url': UrlLoader,
'local': LocalLoader,
'wandb': WandbLoader,
}
def create_loader(loader_type: str, data_path: str, **kwargs) -> BaseLoader:
if loader_type == 'custom':
raise NotImplementedError('Custom loaders are not supported yet. Please use a different loader type.')
try:
loader_class = loader_type_map[loader_type]
except KeyError:
raise ValueError(f'Unknown loader type: {loader_type}. Must be one of {list(loader_type_map.keys())}')
return loader_class(data_path, **kwargs)
class BaseSaver:
def __init__(self,
data_path: str,
save_latest_to: Optional[Union[str, bool]] = None,
save_best_to: Optional[Union[str, bool]] = None,
save_meta_to: Optional[str] = None,
save_type: str = 'checkpoint',
**kwargs
):
self.data_path = Path(data_path)
self.save_latest_to = save_latest_to
self.saving_latest = save_latest_to is not None and save_latest_to is not False
self.save_best_to = save_best_to
self.saving_best = save_best_to is not None and save_best_to is not False
self.save_meta_to = save_meta_to
self.saving_meta = save_meta_to is not None
self.save_type = save_type
assert save_type in ['checkpoint', 'model'], '`save_type` must be one of `checkpoint` or `model`'
assert self.saving_latest or self.saving_best or self.saving_meta, 'At least one saving option must be specified'
def init(self, logger: BaseLogger, **kwargs) -> None:
raise NotImplementedError
def save_file(self, local_path: Path, save_path: str, is_best=False, is_latest=False, **kwargs) -> None:
"""
Save a general file under save_meta_to
"""
raise NotImplementedError
class LocalSaver(BaseSaver):
def __init__(self,
data_path: str,
**kwargs
):
super().__init__(data_path, **kwargs)
def init(self, logger: BaseLogger, **kwargs) -> None:
# Makes sure the directory exists to be saved to
print(f"Saving {self.save_type} locally")
if not self.data_path.exists():
self.data_path.mkdir(parents=True)
def save_file(self, local_path: str, save_path: str, **kwargs) -> None:
# Copy the file to save_path
save_path_file_name = Path(save_path).name
# Make sure parent directory exists
save_path_parent = Path(save_path).parent
if not save_path_parent.exists():
save_path_parent.mkdir(parents=True)
print(f"Saving {save_path_file_name} {self.save_type} to local path {save_path}")
shutil.copy(local_path, save_path)
class WandbSaver(BaseSaver):
def __init__(self, data_path: str, wandb_run_path: Optional[str] = None, **kwargs):
super().__init__(data_path, **kwargs)
self.run_path = wandb_run_path
def init(self, logger: BaseLogger, **kwargs) -> None:
self.wandb = import_or_print_error('wandb', '`pip install wandb` to use the wandb logger')
os.environ["WANDB_SILENT"] = "true"
# Makes sure that the user can upload tot his run
if self.run_path is not None:
entity, project, run_id = self.run_path.split("/")
self.run = self.wandb.init(entity=entity, project=project, id=run_id)
else:
assert self.wandb.run is not None, 'You must be using the wandb logger if you are saving to wandb and have not set `wandb_run_path`'
self.run = self.wandb.run
# TODO: Now actually check if upload is possible
print(f"Saving to wandb run {self.run.path}-{self.run.name}")
def save_file(self, local_path: Path, save_path: str, **kwargs) -> None:
# In order to log something in the correct place in wandb, we need to have the same file structure here
save_path_file_name = Path(save_path).name
print(f"Saving {save_path_file_name} {self.save_type} to wandb run {self.run.path}-{self.run.name}")
save_path = Path(self.data_path) / save_path
save_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(local_path, save_path)
self.run.save(str(save_path), base_path = str(self.data_path), policy='now')
class HuggingfaceSaver(BaseSaver):
def __init__(self, data_path: str, huggingface_repo: str, token_path: Optional[str] = None, **kwargs):
super().__init__(data_path, **kwargs)
self.huggingface_repo = huggingface_repo
self.token_path = token_path
def init(self, logger: BaseLogger, **kwargs):
# Makes sure this user can upload to the repo
self.hub = import_or_print_error('huggingface_hub', '`pip install huggingface_hub` to use the huggingface saver')
try:
identity = self.hub.whoami() # Errors if not logged in
# Then we are logged in
except:
# We are not logged in. Use the token_path to set the token.
if not os.path.exists(self.token_path):
raise Exception("Not logged in to huggingface and no token_path specified. Please login with `huggingface-cli login` or if that does not work set the token_path.")
with open(self.token_path, "r") as f:
token = f.read().strip()
self.hub.HfApi.set_access_token(token)
identity = self.hub.whoami()
print(f"Saving to huggingface repo {self.huggingface_repo}")
def save_file(self, local_path: Path, save_path: str, **kwargs) -> None:
# Saving to huggingface is easy, we just need to upload the file with the correct name
save_path_file_name = Path(save_path).name
print(f"Saving {save_path_file_name} {self.save_type} to huggingface repo {self.huggingface_repo}")
self.hub.upload_file(
path_or_fileobj=str(local_path),
path_in_repo=str(save_path),
repo_id=self.huggingface_repo
)
saver_type_map = {
'local': LocalSaver,
'wandb': WandbSaver,
'huggingface': HuggingfaceSaver
}
def create_saver(saver_type: str, data_path: str, **kwargs) -> BaseSaver:
if saver_type == 'custom':
raise NotImplementedError('Custom savers are not supported yet. Please use a different saver type.')
try:
saver_class = saver_type_map[saver_type]
except KeyError:
raise ValueError(f'Unknown saver type: {saver_type}. Must be one of {list(saver_type_map.keys())}')
return saver_class(data_path, **kwargs)
class Tracker:
def __init__(self, data_path: Optional[str] = DEFAULT_DATA_PATH, overwrite_data_path: bool = False, dummy_mode: bool = False):
self.data_path = Path(data_path)
if not dummy_mode:
if not overwrite_data_path:
assert not self.data_path.exists(), f'Data path {self.data_path} already exists. Set overwrite_data_path to True to overwrite.'
if not self.data_path.exists():
self.data_path.mkdir(parents=True)
self.logger: BaseLogger = None
self.loader: Optional[BaseLoader] = None
self.savers: List[BaseSaver]= []
self.dummy_mode = dummy_mode
def _load_auto_resume(self) -> bool:
# If the file does not exist, we return False. If autoresume is enabled we print a warning so that the user can know that this is the first run.
if not self.auto_resume_path.exists():
if self.logger.auto_resume:
print("Auto_resume is enabled but no auto_resume.json file exists. Assuming this is the first run.")
return False
# Now we know that the autoresume file exists, but if we are not auto resuming we should remove it so that we don't accidentally load it next time
if not self.logger.auto_resume:
print(f'Removing auto_resume.json because auto_resume is not enabled in the config')
self.auto_resume_path.unlink()
return False
# Otherwise we read the json into a dictionary will will override parts of logger.__dict__
with open(self.auto_resume_path, 'r') as f:
auto_resume_dict = json.load(f)
# Check if the logger is of the same type as the autoresume save
if auto_resume_dict["logger_type"] != self.logger.__class__.__name__:
raise Exception(f'The logger type in the auto_resume file is {auto_resume_dict["logger_type"]} but the current logger is {self.logger.__class__.__name__}. Either use the original logger type, set `auto_resume` to `False`, or delete your existing tracker-data folder.')
# Then we are ready to override the logger with the autoresume save
self.logger.__dict__["resume"] = True
print(f"Updating {self.logger.__dict__} with {auto_resume_dict}")
self.logger.__dict__.update(auto_resume_dict)
return True
def _save_auto_resume(self):
# Gets the autoresume dict from the logger and adds "logger_type" to it then saves it to the auto_resume file
auto_resume_dict = self.logger.get_resume_data()
auto_resume_dict['logger_type'] = self.logger.__class__.__name__
with open(self.auto_resume_path, 'w') as f:
json.dump(auto_resume_dict, f)
def init(self, full_config: BaseModel, extra_config: dict):
self.auto_resume_path = self.data_path / 'auto_resume.json'
# Check for resuming the run
self.did_auto_resume = self._load_auto_resume()
if self.did_auto_resume:
print(f'\n\nWARNING: RUN HAS BEEN AUTO-RESUMED WITH THE LOGGER TYPE {self.logger.__class__.__name__}.\nIf this was not your intention, stop this run and set `auto_resume` to `False` in the config.\n\n')
print(f"New logger config: {self.logger.__dict__}")
self.save_metadata = dict(
version = version.parse(__version__)
) # Data that will be saved alongside the checkpoint or model
self.blacklisted_checkpoint_metadata_keys = ['scaler', 'optimizer', 'model', 'version', 'step', 'steps'] # These keys would cause us to error if we try to save them as metadata
assert self.logger is not None, '`logger` must be set before `init` is called'
if self.dummy_mode:
# The only thing we need is a loader
if self.loader is not None:
self.loader.init(self.logger)
return
assert len(self.savers) > 0, '`savers` must be set before `init` is called'
self.logger.init(full_config, extra_config)
if self.loader is not None:
self.loader.init(self.logger)
for saver in self.savers:
saver.init(self.logger)
if self.logger.auto_resume:
# Then we need to save the autoresume file. It is assumed after logger.init is called that the logger is ready to be saved.
self._save_auto_resume()
def add_logger(self, logger: BaseLogger):
self.logger = logger
def add_loader(self, loader: BaseLoader):
self.loader = loader
def add_saver(self, saver: BaseSaver):
self.savers.append(saver)
def log(self, *args, **kwargs):
if self.dummy_mode:
return
self.logger.log(*args, **kwargs)
def log_images(self, *args, **kwargs):
if self.dummy_mode:
return
self.logger.log_images(*args, **kwargs)
def log_file(self, *args, **kwargs):
if self.dummy_mode:
return
self.logger.log_file(*args, **kwargs)
def save_config(self, current_config_path: str, config_name = 'config.json'):
if self.dummy_mode:
return
# Save the config under config_name in the root folder of data_path
shutil.copy(current_config_path, self.data_path / config_name)
for saver in self.savers:
if saver.saving_meta:
remote_path = Path(saver.save_meta_to) / config_name
saver.save_file(current_config_path, str(remote_path))
def add_save_metadata(self, state_dict_key: str, metadata: Any):
"""
Adds a new piece of metadata that will be saved along with the model or decoder.
"""
self.save_metadata[state_dict_key] = metadata
def _save_state_dict(self, trainer: Union[DiffusionPriorTrainer, DecoderTrainer], save_type: str, file_path: str, **kwargs) -> Path:
"""
Gets the state dict to be saved and writes it to file_path.
If save_type is 'checkpoint', we save the entire trainer state dict.
If save_type is 'model', we save only the model state dict.
"""
assert save_type in ['checkpoint', 'model']
if save_type == 'checkpoint':
# Create a metadata dict without the blacklisted keys so we do not error when we create the state dict
metadata = {k: v for k, v in self.save_metadata.items() if k not in self.blacklisted_checkpoint_metadata_keys}
trainer.save(file_path, overwrite=True, **kwargs, **metadata)
elif save_type == 'model':
if isinstance(trainer, DiffusionPriorTrainer):
prior = trainer.ema_diffusion_prior.ema_model if trainer.use_ema else trainer.diffusion_prior
prior: DiffusionPrior = trainer.accelerator.unwrap_model(prior)
# Remove CLIP if it is part of the model
original_clip = prior.clip
prior.clip = None
model_state_dict = prior.state_dict()
prior.clip = original_clip
elif isinstance(trainer, DecoderTrainer):
decoder: Decoder = trainer.accelerator.unwrap_model(trainer.decoder)
# Remove CLIP if it is part of the model
original_clip = decoder.clip
decoder.clip = None
if trainer.use_ema:
trainable_unets = decoder.unets
decoder.unets = trainer.unets # Swap EMA unets in
model_state_dict = decoder.state_dict()
decoder.unets = trainable_unets # Swap back
else:
model_state_dict = decoder.state_dict()
decoder.clip = original_clip
else:
raise NotImplementedError('Saving this type of model with EMA mode enabled is not yet implemented. Actually, how did you get here?')
state_dict = {
**self.save_metadata,
'model': model_state_dict
}
torch.save(state_dict, file_path)
return Path(file_path)
def save(self, trainer, is_best: bool, is_latest: bool, **kwargs):
if self.dummy_mode:
return
if not is_best and not is_latest:
# Nothing to do
return
# Save the checkpoint and model to data_path
checkpoint_path = self.data_path / 'checkpoint.pth'
self._save_state_dict(trainer, 'checkpoint', checkpoint_path, **kwargs)
model_path = self.data_path / 'model.pth'
self._save_state_dict(trainer, 'model', model_path, **kwargs)
print("Saved cached models")
# Call the save methods on the savers
for saver in self.savers:
local_path = checkpoint_path if saver.save_type == 'checkpoint' else model_path
if saver.saving_latest and is_latest:
latest_checkpoint_path = saver.save_latest_to.format(**kwargs)
try:
saver.save_file(local_path, latest_checkpoint_path, is_latest=True, **kwargs)
except Exception as e:
self.logger.log_error(f'Error saving checkpoint: {e}', **kwargs)
print(f'Error saving checkpoint: {e}')
if saver.saving_best and is_best:
best_checkpoint_path = saver.save_best_to.format(**kwargs)
try:
saver.save_file(local_path, best_checkpoint_path, is_best=True, **kwargs)
except Exception as e:
self.logger.log_error(f'Error saving checkpoint: {e}', **kwargs)
print(f'Error saving checkpoint: {e}')
@property
def can_recall(self):
# Defines whether a recall can be performed.
return self.loader is not None and (not self.loader.only_auto_resume or self.did_auto_resume)
def recall(self):
if self.can_recall:
return self.loader.recall()
else:
raise ValueError('Tried to recall, but no loader was set or auto-resume was not performed.')
| DALLE2-pytorch-main | dalle2_pytorch/trackers.py |
import time
import importlib
# helper functions
def exists(val):
return val is not None
# time helpers
class Timer:
def __init__(self):
self.reset()
def reset(self):
self.last_time = time.time()
def elapsed(self):
return time.time() - self.last_time
# print helpers
def print_ribbon(s, symbol = '=', repeat = 40):
flank = symbol * repeat
return f'{flank} {s} {flank}'
# import helpers
def import_or_print_error(pkg_name, err_str = None):
try:
return importlib.import_module(pkg_name)
except ModuleNotFoundError as e:
if exists(err_str):
print(err_str)
exit()
| DALLE2-pytorch-main | dalle2_pytorch/utils.py |
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
if group_wd_params:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
| DALLE2-pytorch-main | dalle2_pytorch/optimizer.py |
import copy
import math
from math import sqrt
from functools import partial, wraps
from vector_quantize_pytorch import VectorQuantize as VQ
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torchvision
from einops import rearrange, reduce, repeat, pack, unpack
from einops.layers.torch import Rearrange
# constants
MList = nn.ModuleList
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def remove_vgg(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
has_vgg = hasattr(self, 'vgg')
if has_vgg:
vgg = self.vgg
delattr(self, 'vgg')
out = fn(self, *args, **kwargs)
if has_vgg:
self.vgg = vgg
return out
return inner
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, string_input):
return string_input.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# tensor helper functions
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(outputs = output, inputs = images,
grad_outputs = torch.ones(output.size(), device = images.device),
create_graph = True, retain_graph = True, only_inputs = True)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
def l2norm(t):
return F.normalize(t, dim = -1)
def leaky_relu(p = 0.1):
return nn.LeakyReLU(0.1)
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def safe_div(numer, denom, eps = 1e-8):
return numer / (denom + eps)
# gan losses
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def bce_discr_loss(fake, real):
return (-log(1 - torch.sigmoid(fake)) - log(torch.sigmoid(real))).mean()
def bce_gen_loss(fake):
return -log(torch.sigmoid(fake)).mean()
def grad_layer_wrt_loss(loss, layer):
return torch_grad(
outputs = loss,
inputs = layer,
grad_outputs = torch.ones_like(loss),
retain_graph = True
)[0].detach()
# vqgan vae
class LayerNormChan(nn.Module):
def __init__(
self,
dim,
eps = 1e-5
):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
# discriminator
class Discriminator(nn.Module):
def __init__(
self,
dims,
channels = 3,
groups = 16,
init_kernel_size = 5
):
super().__init__()
dim_pairs = zip(dims[:-1], dims[1:])
self.layers = MList([nn.Sequential(nn.Conv2d(channels, dims[0], init_kernel_size, padding = init_kernel_size // 2), leaky_relu())])
for dim_in, dim_out in dim_pairs:
self.layers.append(nn.Sequential(
nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1),
nn.GroupNorm(groups, dim_out),
leaky_relu()
))
dim = dims[-1]
self.to_logits = nn.Sequential( # return 5 x 5, for PatchGAN-esque training
nn.Conv2d(dim, dim, 1),
leaky_relu(),
nn.Conv2d(dim, 1, 4)
)
def forward(self, x):
for net in self.layers:
x = net(x)
return self.to_logits(x)
# positional encoding
class ContinuousPositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(self, *, dim, heads, layers = 2):
super().__init__()
self.net = MList([])
self.net.append(nn.Sequential(nn.Linear(2, dim), leaky_relu()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), leaky_relu()))
self.net.append(nn.Linear(dim, heads))
self.register_buffer('rel_pos', None, persistent = False)
def forward(self, x):
n, device = x.shape[-1], x.device
fmap_size = int(sqrt(n))
if not exists(self.rel_pos):
pos = torch.arange(fmap_size, device = device)
grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij'))
grid = rearrange(grid, 'c i j -> (i j) c')
rel_pos = rearrange(grid, 'i c -> i 1 c') - rearrange(grid, 'j c -> 1 j c')
rel_pos = torch.sign(rel_pos) * torch.log(rel_pos.abs() + 1)
self.register_buffer('rel_pos', rel_pos, persistent = False)
rel_pos = self.rel_pos.float()
for layer in self.net:
rel_pos = layer(rel_pos)
bias = rearrange(rel_pos, 'i j h -> h i j')
return x + bias
# resnet encoder / decoder
class ResnetEncDec(nn.Module):
def __init__(
self,
dim,
*,
channels = 3,
layers = 4,
layer_mults = None,
num_resnet_blocks = 1,
resnet_groups = 16,
first_conv_kernel_size = 5,
use_attn = True,
attn_dim_head = 64,
attn_heads = 8,
attn_dropout = 0.,
):
super().__init__()
assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'
self.layers = layers
self.encoders = MList([])
self.decoders = MList([])
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
assert len(layer_mults) == layers, 'layer multipliers must be equal to designated number of layers'
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.encoded_dim = dims[-1]
dim_pairs = zip(dims[:-1], dims[1:])
append = lambda arr, t: arr.append(t)
prepend = lambda arr, t: arr.insert(0, t)
if not isinstance(num_resnet_blocks, tuple):
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
if not isinstance(use_attn, tuple):
use_attn = (*((False,) * (layers - 1)), use_attn)
assert len(num_resnet_blocks) == layers, 'number of resnet blocks config must be equal to number of layers'
assert len(use_attn) == layers
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(range(layers), dim_pairs, num_resnet_blocks, use_attn):
append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))
prepend(self.decoders, nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()))
if layer_use_attn:
prepend(self.decoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
for _ in range(layer_num_resnet_blocks):
append(self.encoders, ResBlock(dim_out, groups = resnet_groups))
prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))
if layer_use_attn:
append(self.encoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))
append(self.decoders, nn.Conv2d(dim, channels, 1))
def get_encoded_fmap_size(self, image_size):
return image_size // (2 ** self.layers)
@property
def last_dec_layer(self):
return self.decoders[-1].weight
def encode(self, x):
for enc in self.encoders:
x = enc(x)
return x
def decode(self, x):
for dec in self.decoders:
x = dec(x)
return x
class GLUResBlock(nn.Module):
def __init__(self, chan, groups = 16):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan * 2, 3, padding = 1),
nn.GLU(dim = 1),
nn.GroupNorm(groups, chan),
nn.Conv2d(chan, chan * 2, 3, padding = 1),
nn.GLU(dim = 1),
nn.GroupNorm(groups, chan),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class ResBlock(nn.Module):
def __init__(self, chan, groups = 16):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.GroupNorm(groups, chan),
leaky_relu(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.GroupNorm(groups, chan),
leaky_relu(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
# vqgan attention layer
class VQGanAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.dropout = nn.Dropout(dropout)
self.pre_norm = LayerNormChan(dim)
self.cpb = ContinuousPositionBias(dim = dim // 4, heads = heads)
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(inner_dim, dim, 1, bias = False)
def forward(self, x):
h = self.heads
height, width, residual = *x.shape[-2:], x.clone()
x = self.pre_norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = h), (q, k, v))
sim = einsum('b h c i, b h c j -> b h i j', q, k) * self.scale
sim = self.cpb(sim)
attn = stable_softmax(sim, dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h c j -> b h c i', attn, v)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', x = height, y = width)
out = self.to_out(out)
return out + residual
# ViT encoder / decoder
class RearrangeImage(nn.Module):
def forward(self, x):
n = x.shape[1]
w = h = int(sqrt(n))
return rearrange(x, 'b (h w) ... -> b h w ...', h = h, w = w)
class Attention(nn.Module):
def __init__(
self,
dim,
*,
heads = 8,
dim_head = 32
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
h = self.heads
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
def FeedForward(dim, mult = 4):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult, bias = False),
nn.GELU(),
nn.Linear(dim * mult, dim, bias = False)
)
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
layers,
dim_head = 32,
heads = 8,
ff_mult = 4
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = nn.LayerNorm(dim)
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
class ViTEncDec(nn.Module):
def __init__(
self,
dim,
channels = 3,
layers = 4,
patch_size = 8,
dim_head = 32,
heads = 8,
ff_mult = 4
):
super().__init__()
self.encoded_dim = dim
self.patch_size = patch_size
input_dim = channels * (patch_size ** 2)
self.encoder = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(input_dim, dim),
Transformer(
dim = dim,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
layers = layers
),
RearrangeImage(),
Rearrange('b h w c -> b c h w')
)
self.decoder = nn.Sequential(
Rearrange('b c h w -> b (h w) c'),
Transformer(
dim = dim,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
layers = layers
),
nn.Sequential(
nn.Linear(dim, dim * 4, bias = False),
nn.Tanh(),
nn.Linear(dim * 4, input_dim, bias = False),
),
RearrangeImage(),
Rearrange('b h w (p1 p2 c) -> b c (h p1) (w p2)', p1 = patch_size, p2 = patch_size)
)
def get_encoded_fmap_size(self, image_size):
return image_size // self.patch_size
@property
def last_dec_layer(self):
return self.decoder[-3][-1].weight
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
# main vqgan-vae classes
class NullVQGanVAE(nn.Module):
def __init__(
self,
*,
channels
):
super().__init__()
self.encoded_dim = channels
self.layers = 0
def get_encoded_fmap_size(self, size):
return size
def copy_for_eval(self):
return self
def encode(self, x):
return x
def decode(self, x):
return x
class VQGanVAE(nn.Module):
def __init__(
self,
*,
dim,
image_size,
channels = 3,
layers = 4,
l2_recon_loss = False,
use_hinge_loss = True,
vgg = None,
vq_codebook_dim = 256,
vq_codebook_size = 512,
vq_decay = 0.8,
vq_commitment_weight = 1.,
vq_kmeans_init = True,
vq_use_cosine_sim = True,
use_vgg_and_gan = True,
vae_type = 'resnet',
discr_layers = 4,
**kwargs
):
super().__init__()
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
encdec_kwargs, kwargs = groupby_prefix_and_trim('encdec_', kwargs)
self.image_size = image_size
self.channels = channels
self.codebook_size = vq_codebook_size
if vae_type == 'resnet':
enc_dec_klass = ResnetEncDec
elif vae_type == 'vit':
enc_dec_klass = ViTEncDec
else:
raise ValueError(f'{vae_type} not valid')
self.enc_dec = enc_dec_klass(
dim = dim,
channels = channels,
layers = layers,
**encdec_kwargs
)
self.vq = VQ(
dim = self.enc_dec.encoded_dim,
codebook_dim = vq_codebook_dim,
codebook_size = vq_codebook_size,
decay = vq_decay,
commitment_weight = vq_commitment_weight,
accept_image_fmap = True,
kmeans_init = vq_kmeans_init,
use_cosine_sim = vq_use_cosine_sim,
**vq_kwargs
)
# reconstruction loss
self.recon_loss_fn = F.mse_loss if l2_recon_loss else F.l1_loss
# turn off GAN and perceptual loss if grayscale
self.vgg = None
self.discr = None
self.use_vgg_and_gan = use_vgg_and_gan
if not use_vgg_and_gan:
return
# preceptual loss
if exists(vgg):
self.vgg = vgg
else:
self.vgg = torchvision.models.vgg16(pretrained = True)
self.vgg.classifier = nn.Sequential(*self.vgg.classifier[:-2])
# gan related losses
layer_mults = list(map(lambda t: 2 ** t, range(discr_layers)))
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.discr = Discriminator(dims = dims, channels = channels)
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
@property
def encoded_dim(self):
return self.enc_dec.encoded_dim
def get_encoded_fmap_size(self, image_size):
return self.enc_dec.get_encoded_fmap_size(image_size)
def copy_for_eval(self):
device = next(self.parameters()).device
vae_copy = copy.deepcopy(self.cpu())
if vae_copy.use_vgg_and_gan:
del vae_copy.discr
del vae_copy.vgg
vae_copy.eval()
return vae_copy.to(device)
@remove_vgg
def state_dict(self, *args, **kwargs):
return super().state_dict(*args, **kwargs)
@remove_vgg
def load_state_dict(self, *args, **kwargs):
return super().load_state_dict(*args, **kwargs)
@property
def codebook(self):
return self.vq.codebook
def encode(self, fmap):
fmap = self.enc_dec.encode(fmap)
return fmap
def decode(self, fmap, return_indices_and_loss = False):
fmap, indices, commit_loss = self.vq(fmap)
fmap = self.enc_dec.decode(fmap)
if not return_indices_and_loss:
return fmap
return fmap, indices, commit_loss
def forward(
self,
img,
return_loss = False,
return_discr_loss = False,
return_recons = False,
add_gradient_penalty = True
):
batch, channels, height, width, device = *img.shape, img.device
assert height == self.image_size and width == self.image_size, 'height and width of input image must be equal to {self.image_size}'
assert channels == self.channels, 'number of channels on image or sketch is not equal to the channels set on this VQGanVAE'
fmap = self.encode(img)
fmap, indices, commit_loss = self.decode(fmap, return_indices_and_loss = True)
if not return_loss and not return_discr_loss:
return fmap
assert return_loss ^ return_discr_loss, 'you should either return autoencoder loss or discriminator loss, but not both'
# whether to return discriminator loss
if return_discr_loss:
assert exists(self.discr), 'discriminator must exist to train it'
fmap.detach_()
img.requires_grad_()
fmap_discr_logits, img_discr_logits = map(self.discr, (fmap, img))
discr_loss = self.discr_loss(fmap_discr_logits, img_discr_logits)
if add_gradient_penalty:
gp = gradient_penalty(img, img_discr_logits)
loss = discr_loss + gp
if return_recons:
return loss, fmap
return loss
# reconstruction loss
recon_loss = self.recon_loss_fn(fmap, img)
# early return if training on grayscale
if not self.use_vgg_and_gan:
if return_recons:
return recon_loss, fmap
return recon_loss
# perceptual loss
img_vgg_input = img
fmap_vgg_input = fmap
if img.shape[1] == 1:
# handle grayscale for vgg
img_vgg_input, fmap_vgg_input = map(lambda t: repeat(t, 'b 1 ... -> b c ...', c = 3), (img_vgg_input, fmap_vgg_input))
img_vgg_feats = self.vgg(img_vgg_input)
recon_vgg_feats = self.vgg(fmap_vgg_input)
perceptual_loss = F.mse_loss(img_vgg_feats, recon_vgg_feats)
# generator loss
gen_loss = self.gen_loss(self.discr(fmap))
# calculate adaptive weight
last_dec_layer = self.enc_dec.last_dec_layer
norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p = 2)
norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(p = 2)
adaptive_weight = safe_div(norm_grad_wrt_perceptual_loss, norm_grad_wrt_gen_loss)
adaptive_weight.clamp_(max = 1e4)
# combine losses
loss = recon_loss + perceptual_loss + commit_loss + adaptive_weight * gen_loss
if return_recons:
return loss, fmap
return loss
| DALLE2-pytorch-main | dalle2_pytorch/vqgan_vae.py |
import time
import copy
from pathlib import Path
from math import ceil
from functools import partial, wraps
from contextlib import nullcontext
from collections.abc import Iterable
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR
from torch.cuda.amp import autocast, GradScaler
from dalle2_pytorch.dalle2_pytorch import Decoder, DiffusionPrior
from dalle2_pytorch.optimizer import get_optimizer
from dalle2_pytorch.version import __version__
from packaging import version
import pytorch_warmup as warmup
from ema_pytorch import EMA
from accelerate import Accelerator, DistributedType
import numpy as np
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
# decorators
def cast_torch_tensor(fn):
@wraps(fn)
def inner(model, *args, **kwargs):
device = kwargs.pop('_device', next(model.parameters()).device)
cast_device = kwargs.pop('_cast_device', True)
cast_deepspeed_precision = kwargs.pop('_cast_deepspeed_precision', True)
kwargs_keys = kwargs.keys()
all_args = (*args, *kwargs.values())
split_kwargs_index = len(all_args) - len(kwargs_keys)
all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))
if cast_device:
all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))
if cast_deepspeed_precision:
try:
accelerator = model.accelerator
if accelerator is not None and accelerator.distributed_type == DistributedType.DEEPSPEED:
cast_type_map = {
"fp16": torch.half,
"bf16": torch.bfloat16,
"no": torch.float
}
precision_type = cast_type_map[accelerator.mixed_precision]
all_args = tuple(map(lambda t: t.to(precision_type) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))
except AttributeError:
# Then this model doesn't have an accelerator
pass
args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]
kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))
out = fn(model, *args, **kwargs)
return out
return inner
# gradient accumulation functions
def split_iterable(it, split_size):
accum = []
for ind in range(ceil(len(it) / split_size)):
start_index = ind * split_size
accum.append(it[start_index: (start_index + split_size)])
return accum
def split(t, split_size = None):
if not exists(split_size):
return t
if isinstance(t, torch.Tensor):
return t.split(split_size, dim = 0)
if isinstance(t, Iterable):
return split_iterable(t, split_size)
return TypeError
def find_first(cond, arr):
for el in arr:
if cond(el):
return el
return None
def split_args_and_kwargs(*args, split_size = None, **kwargs):
all_args = (*args, *kwargs.values())
len_all_args = len(all_args)
first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)
assert exists(first_tensor)
batch_size = len(first_tensor)
split_size = default(split_size, batch_size)
num_chunks = ceil(batch_size / split_size)
dict_len = len(kwargs)
dict_keys = kwargs.keys()
split_kwargs_index = len_all_args - dict_len
split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]
chunk_sizes = tuple(map(len, split_all_args[0]))
for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):
chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]
chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))
chunk_size_frac = chunk_size / batch_size
yield chunk_size_frac, (chunked_args, chunked_kwargs)
# diffusion prior trainer
def prior_sample_in_chunks(fn):
@wraps(fn)
def inner(self, *args, max_batch_size = None, **kwargs):
if not exists(max_batch_size):
return fn(self, *args, **kwargs)
outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]
return torch.cat(outputs, dim = 0)
return inner
class DiffusionPriorTrainer(nn.Module):
def __init__(
self,
diffusion_prior,
accelerator = None,
use_ema = True,
lr = 3e-4,
wd = 1e-2,
eps = 1e-6,
max_grad_norm = None,
group_wd_params = True,
warmup_steps = None,
cosine_decay_max_steps = None,
**kwargs
):
super().__init__()
assert isinstance(diffusion_prior, DiffusionPrior)
ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)
accelerator_kwargs, kwargs = groupby_prefix_and_trim('accelerator_', kwargs)
if not exists(accelerator):
accelerator = Accelerator(**accelerator_kwargs)
# assign some helpful member vars
self.accelerator = accelerator
self.text_conditioned = diffusion_prior.condition_on_text_encodings
# setting the device
self.device = accelerator.device
diffusion_prior.to(self.device)
# save model
self.diffusion_prior = diffusion_prior
# mixed precision checks
if (
exists(self.accelerator)
and self.accelerator.distributed_type == DistributedType.DEEPSPEED
and self.diffusion_prior.clip is not None
):
# Then we need to make sure clip is using the correct precision or else deepspeed will error
cast_type_map = {
"fp16": torch.half,
"bf16": torch.bfloat16,
"no": torch.float
}
precision_type = cast_type_map[accelerator.mixed_precision]
assert precision_type == torch.float, "DeepSpeed currently only supports float32 precision when using on the fly embedding generation from clip"
self.diffusion_prior.clip.to(precision_type)
# optimizer stuff
self.optim_kwargs = dict(lr=lr, wd=wd, eps=eps, group_wd_params=group_wd_params)
self.optimizer = get_optimizer(
self.diffusion_prior.parameters(),
**self.optim_kwargs,
**kwargs
)
if exists(cosine_decay_max_steps):
self.scheduler = CosineAnnealingLR(self.optimizer, T_max = cosine_decay_max_steps)
else:
self.scheduler = LambdaLR(self.optimizer, lr_lambda = lambda _: 1.0)
self.warmup_scheduler = warmup.LinearWarmup(self.optimizer, warmup_period = warmup_steps) if exists(warmup_steps) else None
# distribute the model if using HFA
self.diffusion_prior, self.optimizer, self.scheduler = self.accelerator.prepare(self.diffusion_prior, self.optimizer, self.scheduler)
# exponential moving average stuff
self.use_ema = use_ema
if self.use_ema:
self.ema_diffusion_prior = EMA(self.accelerator.unwrap_model(self.diffusion_prior), **ema_kwargs)
# gradient clipping if needed
self.max_grad_norm = max_grad_norm
# track steps internally
self.register_buffer('step', torch.tensor([0], device = self.device))
# utility
def save(self, path, overwrite = True, **kwargs):
# only save on the main process
if self.accelerator.is_main_process:
print(f"Saving checkpoint at step: {self.step.item()}")
path = Path(path)
assert not (path.exists() and not overwrite)
path.parent.mkdir(parents = True, exist_ok = True)
# FIXME: LambdaLR can't be saved due to pickling issues
save_obj = dict(
optimizer = self.optimizer.state_dict(),
scheduler = self.scheduler.state_dict(),
warmup_scheduler = self.warmup_scheduler,
model = self.accelerator.unwrap_model(self.diffusion_prior).state_dict(),
version = version.parse(__version__),
step = self.step,
**kwargs
)
if self.use_ema:
save_obj = {
**save_obj,
'ema': self.ema_diffusion_prior.state_dict(),
'ema_model': self.ema_diffusion_prior.ema_model.state_dict() # save the ema model specifically for easy ema-only reload
}
torch.save(save_obj, str(path))
def load(self, path_or_state, overwrite_lr = True, strict = True):
"""
Load a checkpoint of a diffusion prior trainer.
Will load the entire trainer, including the optimizer and EMA.
Params:
- path_or_state (str | torch): a path to the DiffusionPriorTrainer checkpoint file
- overwrite_lr (bool): wether or not to overwrite the stored LR with the LR specified in the new trainer
- strict (bool): kwarg for `torch.nn.Module.load_state_dict`, will force an exact checkpoint match
Returns:
loaded_obj (dict): The loaded checkpoint dictionary
"""
# all processes need to load checkpoint. no restriction here
if isinstance(path_or_state, str):
path = Path(path_or_state)
assert path.exists()
loaded_obj = torch.load(str(path), map_location=self.device)
elif isinstance(path_or_state, dict):
loaded_obj = path_or_state
if version.parse(__version__) != loaded_obj['version']:
print(f'loading saved diffusion prior at version {loaded_obj["version"]} but current package version is at {__version__}')
# unwrap the model when loading from checkpoint
self.accelerator.unwrap_model(self.diffusion_prior).load_state_dict(loaded_obj['model'], strict = strict)
self.step.copy_(torch.ones_like(self.step, device=self.device) * loaded_obj['step'].to(self.device))
self.optimizer.load_state_dict(loaded_obj['optimizer'])
self.scheduler.load_state_dict(loaded_obj['scheduler'])
# set warmupstep
if exists(self.warmup_scheduler):
self.warmup_scheduler.last_step = self.step.item()
# ensure new lr is used if different from old one
if overwrite_lr:
new_lr = self.optim_kwargs["lr"]
for group in self.optimizer.param_groups:
group["lr"] = new_lr if group["lr"] > 0.0 else 0.0
if self.use_ema:
assert 'ema' in loaded_obj
self.ema_diffusion_prior.load_state_dict(loaded_obj['ema'], strict = strict)
# below might not be necessary, but I had a suspicion that this wasn't being loaded correctly
self.ema_diffusion_prior.ema_model.load_state_dict(loaded_obj["ema_model"])
return loaded_obj
# model functionality
def update(self):
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.diffusion_prior.parameters(), self.max_grad_norm)
self.optimizer.step()
self.optimizer.zero_grad()
# accelerator will ocassionally skip optimizer steps in a "dynamic loss scaling strategy"
if not self.accelerator.optimizer_step_was_skipped:
sched_context = self.warmup_scheduler.dampening if exists(self.warmup_scheduler) else nullcontext
with sched_context():
self.scheduler.step()
if self.use_ema:
self.ema_diffusion_prior.update()
self.step += 1
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def p_sample_loop(self, *args, **kwargs):
model = self.ema_diffusion_prior.ema_model if self.use_ema else self.diffusion_prior
return model.p_sample_loop(*args, **kwargs)
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def sample(self, *args, **kwargs):
model = self.ema_diffusion_prior.ema_model if self.use_ema else self.diffusion_prior
return model.sample(*args, **kwargs)
@torch.no_grad()
def sample_batch_size(self, *args, **kwargs):
model = self.ema_diffusion_prior.ema_model if self.use_ema else self.diffusion_prior
return model.sample_batch_size(*args, **kwargs)
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def embed_text(self, *args, **kwargs):
return self.accelerator.unwrap_model(self.diffusion_prior).clip.embed_text(*args, **kwargs)
@cast_torch_tensor
def forward(
self,
*args,
max_batch_size = None,
**kwargs
):
total_loss = 0.
for chunk_size_frac, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs):
with self.accelerator.autocast():
loss = self.diffusion_prior(*chunked_args, **chunked_kwargs)
loss = loss * chunk_size_frac
total_loss += loss.item()
if self.training:
self.accelerator.backward(loss)
return total_loss
# decoder trainer
def decoder_sample_in_chunks(fn):
@wraps(fn)
def inner(self, *args, max_batch_size = None, **kwargs):
if not exists(max_batch_size):
return fn(self, *args, **kwargs)
if self.decoder.unconditional:
batch_size = kwargs.get('batch_size')
batch_sizes = num_to_groups(batch_size, max_batch_size)
outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]
else:
outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]
return torch.cat(outputs, dim = 0)
return inner
class DecoderTrainer(nn.Module):
def __init__(
self,
decoder,
accelerator = None,
dataloaders = None,
use_ema = True,
lr = 1e-4,
wd = 1e-2,
eps = 1e-8,
warmup_steps = None,
cosine_decay_max_steps = None,
max_grad_norm = 0.5,
amp = False,
group_wd_params = True,
**kwargs
):
super().__init__()
assert isinstance(decoder, Decoder)
ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)
self.accelerator = default(accelerator, Accelerator)
self.num_unets = len(decoder.unets)
self.use_ema = use_ema
self.ema_unets = nn.ModuleList([])
self.amp = amp
# be able to finely customize learning rate, weight decay
# per unet
lr, wd, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, wd, eps, warmup_steps, cosine_decay_max_steps))
assert all([unet_lr <= 1e-2 for unet_lr in lr]), 'your learning rate is too high, recommend sticking with 1e-4, at most 5e-4'
optimizers = []
schedulers = []
warmup_schedulers = []
for unet, unet_lr, unet_wd, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps in zip(decoder.unets, lr, wd, eps, warmup_steps, cosine_decay_max_steps):
if isinstance(unet, nn.Identity):
optimizers.append(None)
schedulers.append(None)
warmup_schedulers.append(None)
else:
optimizer = get_optimizer(
unet.parameters(),
lr = unet_lr,
wd = unet_wd,
eps = unet_eps,
group_wd_params = group_wd_params,
**kwargs
)
optimizers.append(optimizer)
if exists(unet_cosine_decay_max_steps):
scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)
else:
scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)
warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps) if exists(unet_warmup_steps) else None
warmup_schedulers.append(warmup_scheduler)
schedulers.append(scheduler)
if self.use_ema:
self.ema_unets.append(EMA(unet, **ema_kwargs))
# gradient clipping if needed
self.max_grad_norm = max_grad_norm
self.register_buffer('steps', torch.tensor([0] * self.num_unets))
if self.accelerator.distributed_type == DistributedType.DEEPSPEED and decoder.clip is not None:
# Then we need to make sure clip is using the correct precision or else deepspeed will error
cast_type_map = {
"fp16": torch.half,
"bf16": torch.bfloat16,
"no": torch.float
}
precision_type = cast_type_map[accelerator.mixed_precision]
assert precision_type == torch.float, "DeepSpeed currently only supports float32 precision when using on the fly embedding generation from clip"
clip = decoder.clip
clip.to(precision_type)
decoder, *optimizers = list(self.accelerator.prepare(decoder, *optimizers))
self.decoder = decoder
# prepare dataloaders
train_loader = val_loader = None
if exists(dataloaders):
train_loader, val_loader = self.accelerator.prepare(dataloaders["train"], dataloaders["val"])
self.train_loader = train_loader
self.val_loader = val_loader
# store optimizers
for opt_ind, optimizer in zip(range(len(optimizers)), optimizers):
setattr(self, f'optim{opt_ind}', optimizer)
# store schedulers
for sched_ind, scheduler in zip(range(len(schedulers)), schedulers):
setattr(self, f'sched{sched_ind}', scheduler)
# store warmup schedulers
self.warmup_schedulers = warmup_schedulers
def validate_and_return_unet_number(self, unet_number = None):
if self.num_unets == 1:
unet_number = default(unet_number, 1)
assert exists(unet_number) and 1 <= unet_number <= self.num_unets
return unet_number
def num_steps_taken(self, unet_number = None):
unet_number = self.validate_and_return_unet_number(unet_number)
return self.steps[unet_number - 1].item()
def save(self, path, overwrite = True, **kwargs):
path = Path(path)
assert not (path.exists() and not overwrite)
path.parent.mkdir(parents = True, exist_ok = True)
save_obj = dict(
model = self.accelerator.unwrap_model(self.decoder).state_dict(),
version = __version__,
steps = self.steps.cpu(),
**kwargs
)
for ind in range(0, self.num_unets):
optimizer_key = f'optim{ind}'
scheduler_key = f'sched{ind}'
optimizer = getattr(self, optimizer_key)
scheduler = getattr(self, scheduler_key)
optimizer_state_dict = optimizer.state_dict() if exists(optimizer) else None
scheduler_state_dict = scheduler.state_dict() if exists(scheduler) else None
save_obj = {**save_obj, optimizer_key: optimizer_state_dict, scheduler_key: scheduler_state_dict}
if self.use_ema:
save_obj = {**save_obj, 'ema': self.ema_unets.state_dict()}
self.accelerator.save(save_obj, str(path))
def load_state_dict(self, loaded_obj, only_model = False, strict = True):
if version.parse(__version__) != version.parse(loaded_obj['version']):
self.accelerator.print(f'loading saved decoder at version {loaded_obj["version"]}, but current package version is {__version__}')
self.accelerator.unwrap_model(self.decoder).load_state_dict(loaded_obj['model'], strict = strict)
self.steps.copy_(loaded_obj['steps'])
if only_model:
return loaded_obj
for ind, last_step in zip(range(0, self.num_unets), self.steps.tolist()):
optimizer_key = f'optim{ind}'
optimizer = getattr(self, optimizer_key)
scheduler_key = f'sched{ind}'
scheduler = getattr(self, scheduler_key)
warmup_scheduler = self.warmup_schedulers[ind]
if exists(optimizer):
optimizer.load_state_dict(loaded_obj[optimizer_key])
if exists(scheduler):
scheduler.load_state_dict(loaded_obj[scheduler_key])
if exists(warmup_scheduler):
warmup_scheduler.last_step = last_step
if self.use_ema:
assert 'ema' in loaded_obj
self.ema_unets.load_state_dict(loaded_obj['ema'], strict = strict)
def load(self, path, only_model = False, strict = True):
path = Path(path)
assert path.exists()
loaded_obj = torch.load(str(path), map_location = 'cpu')
self.load_state_dict(loaded_obj, only_model = only_model, strict = strict)
return loaded_obj
@property
def unets(self):
return nn.ModuleList([ema.ema_model for ema in self.ema_unets])
def increment_step(self, unet_number):
assert 1 <= unet_number <= self.num_unets
unet_index_tensor = torch.tensor(unet_number - 1, device = self.steps.device)
self.steps += F.one_hot(unet_index_tensor, num_classes = len(self.steps))
def update(self, unet_number = None):
unet_number = self.validate_and_return_unet_number(unet_number)
index = unet_number - 1
optimizer = getattr(self, f'optim{index}')
scheduler = getattr(self, f'sched{index}')
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.decoder.parameters(), self.max_grad_norm) # Automatically unscales gradients
optimizer.step()
optimizer.zero_grad()
warmup_scheduler = self.warmup_schedulers[index]
scheduler_context = warmup_scheduler.dampening if exists(warmup_scheduler) else nullcontext
with scheduler_context():
scheduler.step()
if self.use_ema:
ema_unet = self.ema_unets[index]
ema_unet.update()
self.increment_step(unet_number)
@torch.no_grad()
@cast_torch_tensor
@decoder_sample_in_chunks
def sample(self, *args, **kwargs):
distributed = self.accelerator.num_processes > 1
base_decoder = self.accelerator.unwrap_model(self.decoder)
was_training = base_decoder.training
base_decoder.eval()
if kwargs.pop('use_non_ema', False) or not self.use_ema:
out = base_decoder.sample(*args, **kwargs, distributed = distributed)
base_decoder.train(was_training)
return out
trainable_unets = self.accelerator.unwrap_model(self.decoder).unets
base_decoder.unets = self.unets # swap in exponential moving averaged unets for sampling
output = base_decoder.sample(*args, **kwargs, distributed = distributed)
base_decoder.unets = trainable_unets # restore original training unets
# cast the ema_model unets back to original device
for ema in self.ema_unets:
ema.restore_ema_model_device()
base_decoder.train(was_training)
return output
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def embed_text(self, *args, **kwargs):
return self.accelerator.unwrap_model(self.decoder).clip.embed_text(*args, **kwargs)
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def embed_image(self, *args, **kwargs):
return self.accelerator.unwrap_model(self.decoder).clip.embed_image(*args, **kwargs)
@cast_torch_tensor
def forward(
self,
*args,
unet_number = None,
max_batch_size = None,
return_lowres_cond_image=False,
**kwargs
):
unet_number = self.validate_and_return_unet_number(unet_number)
total_loss = 0.
cond_images = []
for chunk_size_frac, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs):
with self.accelerator.autocast():
loss_obj = self.decoder(*chunked_args, unet_number = unet_number, return_lowres_cond_image=return_lowres_cond_image, **chunked_kwargs)
# loss_obj may be a tuple with loss and cond_image
if return_lowres_cond_image:
loss, cond_image = loss_obj
else:
loss = loss_obj
cond_image = None
loss = loss * chunk_size_frac
if cond_image is not None:
cond_images.append(cond_image)
total_loss += loss.item()
if self.training:
self.accelerator.backward(loss)
if return_lowres_cond_image:
return total_loss, torch.stack(cond_images)
else:
return total_loss
| DALLE2-pytorch-main | dalle2_pytorch/trainer.py |
import os
import webdataset as wds
import torch
from torch.utils.data import DataLoader
import numpy as np
import fsspec
import shutil
def get_shard(filename):
"""
Filenames with shards in them have a consistent structure that we can take advantage of
Standard structure: path/to/file/prefix_string_00001.ext
"""
try:
return filename.split("_")[-1].split(".")[0]
except ValueError:
raise RuntimeError(f"Could not find shard for filename {filename}")
def get_example_file(fs, path, file_format):
"""
Given a file system and a file extension, return the example file
"""
return fs.glob(os.path.join(path, f"*.{file_format}"))[0]
def embedding_inserter(samples, embeddings_url, index_width, sample_key='npy', handler=wds.handlers.reraise_exception):
"""Given a datum of {"__key__": str, "__url__": str, ...} adds the cooresponding embedding and yields"""
previous_tar_url = None
current_embeddings = None
# Get a reference to an abstract file system where the embeddings are stored
embeddings_fs, embeddings_path = fsspec.core.url_to_fs(embeddings_url)
example_embedding_file = get_example_file(embeddings_fs, embeddings_path, "npy")
example_embedding_shard = get_shard(example_embedding_file)
emb_shard_width = len(example_embedding_shard)
# Easier to get the basename without the shard once than search through for the correct file every time
embedding_file_basename = '_'.join(example_embedding_file.split("_")[:-1]) + "_"
def load_corresponding_embeds(tar_url):
"""Finds and reads the npy files that contains embeddings for the given webdataset tar"""
shard = int(tar_url.split("/")[-1].split(".")[0])
embedding_url = embedding_file_basename + str(shard).zfill(emb_shard_width) + '.npy'
with embeddings_fs.open(embedding_url) as f:
data = np.load(f)
return torch.from_numpy(data)
for sample in samples:
try:
tar_url = sample["__url__"]
key = sample["__key__"]
if tar_url != previous_tar_url:
# If the tar changed, we need to download new embeddings
# This means if we shuffle before inserting it will load many more files than we expect and be very inefficient.
previous_tar_url = tar_url
current_embeddings = load_corresponding_embeds(tar_url)
embedding_index = int(key[-index_width:])
embedding = current_embeddings[embedding_index]
# We need to check if this sample is nonzero. If it is, this embedding is not valid and we should continue to the next loop
if torch.count_nonzero(embedding) == 0:
raise RuntimeError(f"Webdataset had a sample, but no embedding was found. ImgShard: {key[:-index_width]} - Index: {key[-index_width:]}")
sample[sample_key] = embedding
yield sample
except Exception as exn: # From wds implementation
if handler(exn):
continue
else:
break
insert_embedding = wds.filters.pipelinefilter(embedding_inserter)
def unassociated_shard_skipper(tarfiles, embeddings_url, handler=wds.handlers.reraise_exception):
"""Finds if the is a corresponding embedding for the tarfile at { url: [URL] }"""
embeddings_fs, embeddings_path = fsspec.core.url_to_fs(embeddings_url)
embedding_files = embeddings_fs.ls(embeddings_path)
get_embedding_shard = lambda embedding_file: int(embedding_file.split("_")[-1].split(".")[0])
embedding_shards = set([get_embedding_shard(filename) for filename in embedding_files]) # Sets have O(1) check for member
get_tar_shard = lambda tar_file: int(tar_file.split("/")[-1].split(".")[0])
for tarfile in tarfiles:
try:
webdataset_shard = get_tar_shard(tarfile["url"])
# If this shard has an associated embeddings file, we pass it through. Otherwise we iterate until we do have one
if webdataset_shard in embedding_shards:
yield tarfile
except Exception as exn: # From wds implementation
if handler(exn):
continue
else:
break
skip_unassociated_shards = wds.filters.pipelinefilter(unassociated_shard_skipper)
def join_embeddings(samples, handler=wds.handlers.reraise_exception):
"""
Takes the img_emb and text_emb keys and turns them into one key "emb": { "text": text_emb, "img": img_emb }
either or both of text_emb and img_emb may not be in the sample so we only add the ones that exist
"""
for sample in samples:
try:
sample['emb'] = {}
if 'text_emb' in sample:
sample['emb']['text'] = sample['text_emb']
if 'img_emb' in sample:
sample['emb']['img'] = sample['img_emb']
yield sample
except Exception as exn: # From wds implementation
if handler(exn):
continue
else:
break
def verify_keys(samples, required_keys, handler=wds.handlers.reraise_exception):
"""
Requires that both the image and embedding are present in the sample
This is important to do as a user may forget they do not have embeddings in their webdataset and neglect to add them using the embedding_folder_url parameter.
"""
for sample in samples:
try:
for key in required_keys:
assert key in sample, f"Sample {sample['__key__']} missing {key}. Has keys {sample.keys()}"
yield sample
except Exception as exn: # From wds implementation
if handler(exn):
continue
else:
break
key_verifier = wds.filters.pipelinefilter(verify_keys)
class ImageEmbeddingDataset(wds.DataPipeline, wds.compat.FluidInterface):
"""
A fluid interface wrapper for DataPipline that returns image embedding pairs
Reads embeddings as npy files from the webdataset if they exist. If embedding_folder_url is set, they will be inserted in from the alternate source.
"""
def __init__(
self,
urls,
img_embedding_folder_url=None,
text_embedding_folder_url=None,
index_width=None,
img_preproc=None,
extra_keys=[],
handler=wds.handlers.reraise_exception,
resample=False,
shuffle_shards=True
):
"""
Modeled directly off of the WebDataset constructor
:param urls: A url pointing to the tar files of the webdataset formatted as /path/to/webdataset/{0000..9999}.tar
:param embedding_folder_url: Required if webdataset does not contain embeddings. A url pointing to the npy files of the embeddings. Should have the same number of shards as the webdataset.
Webdataset image keys should align with the index of the embedding. This means missing image indices must have a corresponding embedding of all zeros.
:param index_width: The number of digits in the index. This is used to align the embedding index with the image index.
For example, if a file in the webdataset shard 3 is named 0003039.jpg, we know the shard is 4 digits and the last 3 digits are the index_width.
:param img_preproc: This function is run on the img before it is batched and returned. Useful for data augmentation or converting to torch tensor.
:param handler: A webdataset handler.
:param resample: If true, resample webdataset shards with replacement. You need to set your own epoch size if this is true since it will resample infinitely.
:param shuffle_shards: If true, shuffle the shards before resampling. This cannot be true if resample is true.
"""
super().__init__()
keys = ["jpg", "emb"] + extra_keys
# if img_embedding_folder_url is not None:
# keys.append("img_emb")
# if text_embedding_folder_url is not None:
# keys.append("text_emb")
# keys.extend(extra_keys)
self.key_map = {key: i for i, key in enumerate(keys)}
self.resampling = resample
self.img_preproc = img_preproc
# If s3, check if s3fs is installed and s3cmd is installed and check if the data is piped instead of straight up
if (isinstance(urls, str) and "s3:" in urls) or (isinstance(urls, list) and any(["s3:" in url for url in urls])):
# Then this has an s3 link for the webdataset and we need extra packages
if shutil.which("s3cmd") is None:
raise RuntimeError("s3cmd is required for s3 webdataset")
if (img_embedding_folder_url is not None and "s3:" in img_embedding_folder_url) or (text_embedding_folder_url is not None and "s3:" in text_embedding_folder_url):
# Then the embeddings are being loaded from s3 and fsspec requires s3fs
try:
import s3fs
except ImportError:
raise RuntimeError("s3fs is required to load embeddings from s3")
# Add the shardList and randomize or resample if requested
if resample:
assert not shuffle_shards, "Cannot both resample and shuffle"
self.append(wds.ResampledShards(urls))
else:
self.append(wds.SimpleShardList(urls))
if shuffle_shards:
self.append(wds.filters.shuffle(1000))
if img_embedding_folder_url is not None:
# There may be webdataset shards that do not have a embedding shard associated with it. If we do not skip these, they would cause issues.
self.append(skip_unassociated_shards(embeddings_url=img_embedding_folder_url, handler=handler))
if text_embedding_folder_url is not None:
self.append(skip_unassociated_shards(embeddings_url=text_embedding_folder_url, handler=handler))
self.append(wds.tarfile_to_samples(handler=handler))
self.append(wds.decode("pilrgb", handler=handler))
if img_embedding_folder_url is not None:
# Then we are loading image embeddings for a remote source
assert index_width is not None, "Reading embeddings separately requires index width length to be given"
self.append(insert_embedding(embeddings_url=img_embedding_folder_url, index_width=index_width, sample_key='img_emb', handler=handler))
if text_embedding_folder_url is not None:
# Then we are loading image embeddings for a remote source
assert index_width is not None, "Reading embeddings separately requires index width length to be given"
self.append(insert_embedding(embeddings_url=text_embedding_folder_url, index_width=index_width, sample_key='text_emb', handler=handler))
self.append(join_embeddings)
self.append(key_verifier(required_keys=keys, handler=handler))
# Apply preprocessing
self.append(wds.map(self.preproc))
self.append(wds.to_tuple(*keys))
def preproc(self, sample):
"""Applies the preprocessing for images"""
if self.img_preproc is not None:
sample["jpg"] = self.img_preproc(sample["jpg"])
return sample
def create_image_embedding_dataloader(
tar_url,
num_workers,
batch_size,
img_embeddings_url=None,
text_embeddings_url=None,
index_width=None,
shuffle_num = None,
shuffle_shards = True,
resample_shards = False,
img_preproc=None,
extra_keys=[],
handler=wds.handlers.reraise_exception#warn_and_continue
):
"""
Convenience function to create an image embedding dataseta and dataloader in one line
:param tar_url: A url pointing to the tar files of the webdataset formatted as /path/to/webdataset/{0000..9999}.tar
:param num_workers: The number of workers to use for the dataloader
:param batch_size: The batch size to use for the dataloader
:param embeddings_url: Required if webdataset does not contain embeddings. A url pointing to the npy files of the embeddings. Should have the same number of shards as the webdataset.
Webdataset image keys should align with the index of the embedding. This means missing image indices must have a corresponding embedding of all zeros.
:param index_width: The number of digits in the index. This is used to align the embedding index with the image index.
For example, if a file in the webdataset shard 3 is named 0003039.jpg, we know the shard is 4 digits and the last 3 digits are the index_width.
:param shuffle_num: If not None, shuffle the dataset with this size buffer after sampling.
:param shuffle_shards: If true, shuffle the shards before sampling. This cannot be true if resample is true.
:param resample_shards: If true, resample webdataset shards with replacement. You need to set your own epoch size if this is true since it will resample infinitely.
:param handler: A webdataset handler.
"""
ds = ImageEmbeddingDataset(
tar_url,
img_embedding_folder_url=img_embeddings_url,
text_embedding_folder_url=text_embeddings_url,
index_width=index_width,
shuffle_shards=shuffle_shards,
resample=resample_shards,
extra_keys=extra_keys,
img_preproc=img_preproc,
handler=handler
)
if shuffle_num is not None and shuffle_num > 0:
ds.shuffle(1000)
return DataLoader(
ds,
num_workers=num_workers,
batch_size=batch_size,
prefetch_factor=2, # This might be good to have high so the next npy file is prefetched
pin_memory=True,
shuffle=False
)
| DALLE2-pytorch-main | dalle2_pytorch/dataloaders/decoder_loader.py |
from math import ceil
from clip import tokenize
from embedding_reader import EmbeddingReader
from torch import from_numpy
from torch.utils.data import IterableDataset, DataLoader
class PriorEmbeddingDataset(IterableDataset):
"""
PriorEmbeddingDataset is a wrapper of EmbeddingReader.
It enables one to simplify the logic necessary to yield samples from
the different EmbeddingReader configurations available.
"""
def __init__(
self,
text_conditioned: bool,
batch_size: int,
start: int,
stop: int,
image_reader,
text_reader: EmbeddingReader = None,
) -> None:
super(PriorEmbeddingDataset).__init__()
self.text_conditioned = text_conditioned
if not self.text_conditioned:
self.text_reader = text_reader
self.image_reader = image_reader
self.start = start
self.stop = stop
self.batch_size = batch_size
def __len__(self):
return self.stop - self.start
def __iter__(self):
# D.R.Y loader args
loader_args = dict(
batch_size=self.batch_size,
start=self.start,
end=self.stop,
show_progress=False,
)
# if the data requested is text conditioned, only load images
if self.text_conditioned:
self.loader = self.image_reader(**loader_args)
# otherwise, include text embeddings and bypass metadata
else:
self.loader = zip(
self.image_reader(**loader_args), self.text_reader(**loader_args)
)
# return the data loader in its formatted state
return self
def __next__(self):
try:
return self.get_sample()
except StopIteration:
raise StopIteration
def __str__(self):
return f"<PriorEmbeddingDataset: start: {self.start}, stop: {self.stop}, len: {self.__len__()}>"
def set_start(self, start):
"""
Adjust the starting point within the reader, useful for resuming an epoch
"""
self.start = start
def get_start(self):
return self.start
def get_sample(self):
"""
pre-proocess data from either reader into a common format
"""
if self.text_conditioned:
image_embedding, caption = next(self.loader)
image_embedding = from_numpy(image_embedding)
tokenized_caption = tokenize(caption["caption"].to_list(), truncate=True)
return image_embedding, tokenized_caption
else:
(image_embedding, _), (text_embedding, _) = next(self.loader)
image_embedding = from_numpy(image_embedding)
text_embedding = from_numpy(text_embedding)
return image_embedding, text_embedding
# helper functions
def distribute_to_rank(start, stop, rank, world_size):
"""
Distribute data to each rank given the world size.
Return:
- New start and stop points for this rank.
"""
num_samples = int(stop - start)
per_rank = int(ceil((num_samples) / float(world_size)))
assert (
per_rank > 0
), f"Number of samples per rank must be larger than 0, (found: {per_rank})"
rank_start = start + rank * per_rank
rank_stop = min(rank_start + per_rank, stop)
new_length = rank_stop - rank_start
assert (
new_length > 0
), "Calculated start and stop points result in a length of zero for this rank."
return rank_start, rank_stop
def get_reader(
text_conditioned: bool, img_url: str, meta_url: str = None, txt_url: str = None
):
"""
Create an EmbeddingReader object from the specified URLs
get_reader() will always expect a url to image embeddings.
If text-conditioned, it will also expect a meta_url for the captions.
Otherwise, it will need txt_url for the matching text embeddings.
Returns an image_reader object if text-conditioned.
Otherwise it returns both an image_reader and a text_reader
"""
assert img_url is not None, "Must supply a image url"
if text_conditioned:
assert meta_url is not None, "Must supply meta url if text-conditioned"
image_reader = EmbeddingReader(
embeddings_folder=img_url,
file_format="parquet_npy",
# will assume the caption column exists and is the only one requested
meta_columns=["caption"],
metadata_folder=meta_url,
)
return image_reader
# otherwise we will require text embeddings as well and return two readers
assert (
txt_url is not None
), "Must supply text embedding url if not text-conditioning"
image_reader = EmbeddingReader(img_url, file_format="npy")
text_reader = EmbeddingReader(txt_url, file_format="npy")
return image_reader, text_reader
def make_splits(
text_conditioned: bool,
batch_size: int,
num_data_points: int,
train_split: float,
eval_split: float,
image_reader: EmbeddingReader,
text_reader: EmbeddingReader = None,
start=0,
rank=0,
world_size=1,
):
"""
Split an embedding reader object as needed.
NOTE: make_splits() will infer the test set size from your train and eval.
Input:
- text_conditioned: whether to prepare text-conditioned training data
- batch_size: the batch size for a single gpu
- num_data_points: the total number of data points you wish to train on
- train_split: the percentage of data you wish to train on
- eval_split: the percentage of data you wish to validate on
- image_reader: the image_reader you wish to split
- text_reader: the text_reader you want to split (if !text_conditioned)
- start: the starting point within your dataset
- rank: the rank of your worker
- world_size: the total world size of your distributed training run
Returns:
- PyTorch Dataloaders that yield tuples of (img, txt) data.
"""
assert start < image_reader.count, "start position cannot exceed reader count."
# verify that the num_data_points does not exceed the max points
if num_data_points > (image_reader.count - start):
print(
"Specified count is larger than what's available...defaulting to reader's count."
)
num_data_points = image_reader.count
# compute split points
train_set_size = int(train_split * num_data_points)
eval_set_size = int(eval_split * num_data_points)
eval_start = train_set_size
eval_stop = int(eval_start + eval_set_size)
assert (
train_split + eval_split
) < 1.0, "Specified train and eval split is too large to infer a test split."
# distribute to rank
rank_train_start, rank_train_stop = distribute_to_rank(
start, train_set_size, rank, world_size
)
rank_eval_start, rank_eval_stop = distribute_to_rank(
train_set_size, eval_stop, rank, world_size
)
rank_test_start, rank_test_stop = distribute_to_rank(
eval_stop, num_data_points, rank, world_size
)
# wrap up splits into a dict
train_split_args = dict(
start=rank_train_start, stop=rank_train_stop, batch_size=batch_size
)
eval_split_args = dict(
start=rank_eval_start, stop=rank_eval_stop, batch_size=batch_size
)
test_split_args = dict(
start=rank_test_start, stop=rank_test_stop, batch_size=batch_size
)
if text_conditioned:
# add the text-conditioned args to a unified dict
reader_args = dict(
text_conditioned=text_conditioned,
image_reader=image_reader,
)
train_split_args = dict(**reader_args, **train_split_args)
eval_split_args = dict(**reader_args, **eval_split_args)
test_split_args = dict(**reader_args, **test_split_args)
train = PriorEmbeddingDataset(**train_split_args)
val = PriorEmbeddingDataset(**eval_split_args)
test = PriorEmbeddingDataset(**test_split_args)
else:
# add the non-conditioned args to a unified dict
reader_args = dict(
text_conditioned=text_conditioned,
image_reader=image_reader,
text_reader=text_reader,
)
train_split_args = dict(**reader_args, **train_split_args)
eval_split_args = dict(**reader_args, **eval_split_args)
test_split_args = dict(**reader_args, **test_split_args)
train = PriorEmbeddingDataset(**train_split_args)
val = PriorEmbeddingDataset(**eval_split_args)
test = PriorEmbeddingDataset(**test_split_args)
# true batch size is specifed in the PriorEmbeddingDataset
train_loader = DataLoader(train, batch_size=None)
eval_loader = DataLoader(val, batch_size=None)
test_loader = DataLoader(test, batch_size=None)
return train_loader, eval_loader, test_loader
| DALLE2-pytorch-main | dalle2_pytorch/dataloaders/prior_loader.py |
from dalle2_pytorch.dataloaders.decoder_loader import ImageEmbeddingDataset, create_image_embedding_dataloader
from dalle2_pytorch.dataloaders.prior_loader import make_splits, get_reader, PriorEmbeddingDataset
| DALLE2-pytorch-main | dalle2_pytorch/dataloaders/__init__.py |
from pathlib import Path
import torch
from torch.utils import data
from torchvision import transforms, utils
from PIL import Image
# helpers functions
def cycle(dl):
while True:
for data in dl:
yield data
# dataset and dataloader
class Dataset(data.Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(image_size),
transforms.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
def get_images_dataloader(
folder,
*,
batch_size,
image_size,
shuffle = True,
cycle_dl = True,
pin_memory = True
):
ds = Dataset(folder, image_size)
dl = data.DataLoader(ds, batch_size = batch_size, shuffle = shuffle, pin_memory = pin_memory)
if cycle_dl:
dl = cycle(dl)
return dl
| DALLE2-pytorch-main | dalle2_pytorch/dataloaders/simple_image_only_dataloader.py |
from setuptools import setup, find_packages
setup(
name = 'chroma-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Chroma - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/chroma-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion',
'protein design'
],
install_requires=[
'einops>=0.6',
'invariant-point-attention',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| chroma-pytorch-main | setup.py |
import torch
import os
import logging
from transformers import AutoTokenizer, AutoModelForMaskedLM, logging
from tf_bind_transformer.cache_utils import cache_fn, run_once
logging.set_verbosity_error()
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
CONTEXT_EMBED_USE_CPU = os.getenv('CONTEXT_EMBED_USE_CPU', None) is not None
if CONTEXT_EMBED_USE_CPU:
print('calculating context embed only on cpu')
MODELS = dict(
pubmed = dict(
dim = 768,
path = 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract',
)
)
GLOBAL_VARIABLES = dict(model = None, tokenizer = None)
def get_contextual_dim(model_name):
assert model_name in MODELS
return MODELS[model_name]['dim']
@run_once('init_transformer')
def init_transformer(model_name):
path = MODELS[model_name]['path']
GLOBAL_VARIABLES['tokenizer'] = AutoTokenizer.from_pretrained(path)
model = AutoModelForMaskedLM.from_pretrained(path)
if not CONTEXT_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = model
@torch.no_grad()
def tokenize_text(
text,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True
):
init_transformer(model_name)
model = GLOBAL_VARIABLES['model']
tokenizer = GLOBAL_VARIABLES['tokenizer']
encoding = tokenizer.batch_encode_plus(
[text],
add_special_tokens = True,
padding = True,
truncation = True,
max_length = max_length,
return_attention_mask = True,
return_tensors = 'pt'
)
if not CONTEXT_EMBED_USE_CPU:
encoding = map_values(lambda t: t.cuda(), encoding)
model.eval()
with torch.no_grad():
outputs = model(**encoding, output_hidden_states = True)
hidden_state = outputs.hidden_states[hidden_state_index][0]
if return_cls_token:
return hidden_state[0]
return hidden_state.mean(dim = 0)
def get_text_repr(
texts,
*,
device,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True,
):
assert model_name in MODELS, f'{model_name} not found in available text transformers to use'
if isinstance(texts, str):
texts = [texts]
get_context_repr_fn = cache_fn(tokenize_text, path = f'contexts/{model_name}')
representations = [get_context_repr_fn(text, max_length = max_length, model_name = model_name, hidden_state_index = hidden_state_index, return_cls_token = return_cls_token) for text in texts]
return torch.stack(representations).to(device)
| chroma-pytorch-main | chroma_pytorch/semantic_conditioner.py |
from chroma_pytorch.chroma_pytorch import Chroma
| chroma-pytorch-main | chroma_pytorch/__init__.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
import math
from pathlib import Path
from random import random
from functools import partial
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
from torch.special import expm1
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torchvision import transforms as T, utils
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from tqdm.auto import tqdm
from ema_pytorch import EMA
from accelerate import Accelerator
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
resnet_block_groups = 8,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
self.channels = channels
input_channels = channels * 2
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinusoidal_dim)
fourier_dim = learned_sinusoidal_dim + 1
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, channels, 1)
def forward(self, x, time, x_self_cond = None):
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# chroma class
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class Chroma(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
use_ddim = False,
noise_schedule = 'cosine',
time_difference = 0.
):
super().__init__()
self.model = model
self.channels = self.model.channels
self.image_size = image_size
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.timesteps = timesteps
self.use_ddim = use_ddim
# proposed in the paper, summed to time_next
# as a way to fix a deficiency in self-conditioning and lower FID when the number of sampling timesteps is < 400
self.time_difference = time_difference
@property
def device(self):
return next(self.model.parameters()).device
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
@torch.no_grad()
def ddpm_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device=device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step', total = self.timesteps):
# add the time delay
time_next = (time_next - self.time_difference).clamp(min = 0.)
noise_cond = self.log_snr(time)
# get predicted x0
x_start = self.model(img, noise_cond, x_start)
# clip x0
x_start.clamp_(-1., 1.)
# get log(snr)
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
# get alpha sigma of time and next time
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# derive posterior mean and variance
c = -expm1(log_snr - log_snr_next)
mean = alpha_next * (img * (1 - c) / alpha + c * x_start)
variance = (sigma_next ** 2) * c
log_variance = log(variance)
# get noise
noise = torch.where(
rearrange(time_next > 0, 'b -> b 1 1 1'),
torch.randn_like(img),
torch.zeros_like(img)
)
img = mean + (0.5 * log_variance).exp() * noise
return img
@torch.no_grad()
def ddim_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device = device)
x_start = None
for times, times_next in tqdm(time_pairs, desc = 'sampling loop time step'):
# get times and noise levels
log_snr = self.log_snr(times)
log_snr_next = self.log_snr(times_next)
padded_log_snr, padded_log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(padded_log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(padded_log_snr_next)
# add the time delay
times_next = (times_next - time_difference).clamp(min = 0.)
# predict x0
x_start = self.model(img, log_snr, x_start)
# clip x0
x_start.clamp_(-1., 1.)
# get predicted noise
pred_noise = (img - alpha * x_start) / sigma.clamp(min = 1e-8)
# calculate x next
img = x_start * alpha_next + pred_noise * sigma_next
return img
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.ddpm_sample if not self.use_ddim else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
def forward(self, img, *args, **kwargs):
batch, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
# sample random times
times = torch.zeros((batch,), device = device).float().uniform_(0, 1.)
# noise sample
noise = torch.randn_like(img)
noise_level = self.log_snr(times)
padded_noise_level = right_pad_dims_to(img, noise_level)
alpha, sigma = log_snr_to_alpha_sigma(padded_noise_level)
noised_img = alpha * img + sigma * noise
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
self_cond = None
if random() < 0.5:
with torch.no_grad():
self_cond = self.model(noised_img, noise_level).detach_()
# predict and take gradient step
pred = self.model(noised_img, noise_level, self_cond)
return F.mse_loss(pred, img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'))
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
self.step += 1
pbar.update(1)
accelerator.print('training complete')
| chroma-pytorch-main | chroma_pytorch/chroma_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'nwt-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'NWT - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/NWT-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'pytorch',
'audio to video synthesis'
],
install_requires=[
'einops>=0.4',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| NWT-pytorch-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import EinMix as Mix
class Memcodes(nn.Module):
def __init__(
self,
*,
dim,
num_codes,
heads = 8,
temperature = 1.,
):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
self.heads = heads
self.dim = dim
self.scale = (dim // heads) ** -0.5
self.temperature = temperature
self.num_codes = num_codes
num_codebooks = heads
codebook_dim = dim // heads
self.codes = nn.Parameter(torch.randn(num_codebooks, num_codes, codebook_dim))
self.to_k = Mix('h n d -> h n c', weight_shape = 'h d c', h = heads, d = codebook_dim, c = codebook_dim)
self.to_v = Mix('h n d -> h n c', weight_shape = 'h d c', h = heads, d = codebook_dim, c = codebook_dim)
def get_codes_from_indices(self, codebook_indices, *, merge_output_heads = True):
batch = codebook_indices.shape[0]
values = self.to_v(self.codes)
values = repeat(values, 'h n d -> b h n d', b = batch)
codebook_indices = repeat(codebook_indices, '... -> ... d', d = values.shape[-1])
out = values.gather(2, codebook_indices)
if not merge_output_heads:
return out
return rearrange(out, 'b h n d -> b n (h d)')
def forward(self, x, *, merge_output_heads = True):
assert x.shape[-1] == self.dim
# split out heads
q = rearrange(x, 'b n (h d) -> b h n d', h = self.heads)
q = q * self.scale
# get key / values of codes
k, v = self.to_k(self.codes), self.to_v(self.codes)
# straight through gumbel softmax
logits = einsum('b h i d, h j d -> b h i j', q, k)
if self.training:
attn = F.gumbel_softmax(logits, tau = self.temperature, dim = -1, hard = True)
codebook_indices = attn.argmax(dim = -1)
else:
codebook_indices = logits.argmax(dim = -1)
attn = F.one_hot(codebook_indices, num_classes = self.num_codes).float()
out = einsum('b h i j, h j d -> b h i d', attn, v)
if not merge_output_heads:
return out, codebook_indices
# merge heads if specified
out = rearrange(out, 'b h n d -> b n (h d)')
return out, codebook_indices
| NWT-pytorch-main | nwt_pytorch/nwt_pytorch.py |
from nwt_pytorch.nwt_pytorch import Memcodes
| NWT-pytorch-main | nwt_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'n-grammer-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.14',
license='MIT',
description = 'N-Grammer - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/n-grammer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'n-grams',
'memory'
],
install_requires=[
'einops>=0.3',
'sympy',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| n-grammer-pytorch-main | setup.py |
from n_grammer_pytorch.n_grammer_pytorch import VQNgrammer, Ngrammer, get_ngrammer_parameters, get_ngrammer_param_groups
| n-grammer-pytorch-main | n_grammer_pytorch/__init__.py |
# based off the jax code
# https://github.com/tensorflow/lingvo/blob/master/lingvo/jax/layers/ngrammer.py
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
import sympy
# helper functions
def exists(val):
return val is not None
def sum_squares(t, dim = -1):
return (t ** 2).sum(dim = dim)
# bigram related functions
def multi_way_hash_ids(x, a, b, prime, buckets):
return ((x * a + b) % prime) % buckets
def get_bigram_ids(ids, vocab_size, segment_pos = None):
# ids are in shape (batch, seq, heads)
ids = ids.long()
ids_0 = F.pad(ids, (0, 0, 0, 1))
ids_1 = F.pad(ids, (0, 0, 1, 0))
if exists(segment_pos):
segment_pos = rearrange(segment_pos, 'b n -> b n 1')
mask = (segment_pos == 0).long()
mask = 1 - mask
mask = F.pad(mask, (0, 0, 0, 1))
ids_1 *= mask
ngram_ids = ids_0 + ids_1 * vocab_size
ngram_ids = ngram_ids[:, :-1]
return ngram_ids
# optimizer related functions
def get_ngrammer_parameters(module):
params = set()
for m in module.modules():
if isinstance(m, Ngrammer):
params.update(m.parameters())
rest = set(module.parameters()) - params
return list(params), list(rest)
def get_ngrammer_param_groups(module, ngrammer_learning_rate = 1e-2):
ngrammer_params, rest = get_ngrammer_parameters(module)
return [{'params': rest}, {'params': ngrammer_params, 'lr': ngrammer_learning_rate}]
# layernorm
class MultiheadLayerNorm(nn.Module):
def __init__(self, dim, heads = 1, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(heads, dim))
self.b = nn.Parameter(torch.zeros(heads, dim))
def forward(self, x):
std = torch.var(x, dim = -1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = -1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
# classes
class VectorQuantization(nn.Module):
def __init__(
self,
*,
num_clusters,
num_heads,
dim_per_head,
decay = 0.999,
epsilon = 1e-6
):
super().__init__()
self.decay = decay
self.epsilon = epsilon
self.num_heads = num_heads
self.dim_per_head = dim_per_head
self.num_clusters = num_clusters
self.register_buffer('means', torch.randn(num_heads, num_clusters, dim_per_head))
def forward(
self,
x,
mask = None
):
h, dim_head, num_clusters, eps, decay, means = self.num_heads, self.dim_per_head, self.num_clusters, self.epsilon, self.decay, self.means
assert x.shape[-1] == (h * dim_head), f'input embedding feature dimension must be {h * dim_head}'
# split heads from input
x = rearrange(x, 'b n (h d) -> b n h d', h = h)
# get distance of input embeddings from means
dists = (
rearrange(sum_squares(x), 'b n h -> b n h 1')
- 2 * einsum('b n h d, h k d -> b n h k', x, means)
+ rearrange(sum_squares(means), 'h k -> 1 1 h k')
)
# get cluster ids
cluster_ids = dists.argmin(dim = -1)
if self.training:
# get one hot, for calculating number of matches per mean
nearest_one_hot = F.one_hot(cluster_ids, num_classes = num_clusters)
per_cluster_count = nearest_one_hot.sum(dim = (0, 1))
# sum of the input per each closest centroid.
sum_x = einsum('b n h k, b n h d -> h k d', nearest_one_hot.float(), x)
# calculate new means
new_means = sum_x / (eps + rearrange(per_cluster_count, '... -> ... 1'))
# exponential moving average
updated_means = (1. - decay) * new_means + decay * means
self.means.data.copy_(updated_means)
return cluster_ids
class Ngrammer(nn.Module):
def __init__(
self,
*,
unigram_vocab_size,
dim_per_head,
num_heads = 1,
ngram_emb_dim = 8,
ngram_vocab_size = 768 * 256,
concat_ngrams = True
):
super().__init__()
assert not (concat_ngrams and dim_per_head <= ngram_emb_dim), 'unigram head dimension cannot be smaller than ngram embedding dimension when concatting'
assert not (not concat_ngrams and dim_per_head != ngram_emb_dim), 'unigram head dimension must be equal to ngram embedding dimension if not concatting'
self.num_heads = num_heads
self.ngram_vocab_size = ngram_vocab_size
self.unigram_vocab_size = unigram_vocab_size
self.concat_ngrams = concat_ngrams
self.embeddings = nn.ModuleList([])
self.ngram_layernorm = MultiheadLayerNorm(ngram_emb_dim, heads = num_heads)
self.embeds_layernorm = MultiheadLayerNorm(dim_per_head, heads = num_heads)
self.ngram_embeds = nn.Embedding(ngram_vocab_size * num_heads, ngram_emb_dim)
primes = list(sympy.primerange(ngram_vocab_size + 1, 2 * ngram_vocab_size))[:num_heads]
self.register_buffer('primes', torch.tensor(primes), persistent = False)
def forward(
self,
embeds,
cluster_ids,
mask = None,
segment_pos = None
):
num_heads, vocab_size, unigram_vocab_size, device = self.num_heads, self.ngram_vocab_size, self.unigram_vocab_size, embeds.device
if cluster_ids.ndim == 2:
cluster_ids = repeat(cluster_ids, '... -> ... h', h = num_heads)
ngram_cluster_ids = get_bigram_ids(cluster_ids, unigram_vocab_size, segment_pos)
# prepare arange of heads for parallel computation of multi-way hash ids
head_range = torch.arange(num_heads, device = device)
head_range = rearrange(head_range, 'h -> 1 1 h')
primes = rearrange(self.primes, 'h -> 1 1 h')
# multi-way hash ids, using https://arxiv.org/abs/1504.06804
ngram_ids = multi_way_hash_ids(ngram_cluster_ids, head_range + 1, head_range + 1, primes, vocab_size)
# shift vocab range for each head appropriately by the head number
ngram_ids = ngram_ids + (vocab_size * head_range)
# get all n-gram embeddings in one go, and multi-head layernorm
ngram_embeds = self.ngram_embeds(ngram_ids)
normed_ngram_embeds = self.ngram_layernorm(ngram_embeds)
# multi-head layernorm inputs
embeds = rearrange(embeds, 'b n (h d) -> b n h d', h = num_heads)
normed_embeds = self.embeds_layernorm(embeds)
# concat original unigram embeds with bigram
if self.concat_ngrams:
input_sliced_dim = normed_embeds.shape[-1] - normed_ngram_embeds.shape[-1]
out = torch.cat((
normed_embeds[..., :input_sliced_dim],
normed_ngram_embeds
), dim = -1)
else:
out = normed_embeds + normed_ngram_embeds
# flatten
out = rearrange(out, 'b n ... -> b n (...)')
# mask if needed
if exists(mask):
out = out * rearrange(mask, 'b n -> b n 1').float()
return out
# main class
class VQNgrammer(nn.Module):
def __init__(
self,
*,
num_clusters,
num_heads,
dim_per_head,
ngram_vocab_size = 768 * 256,
ngram_emb_dim = 8,
concat_ngrams = True,
decay = 0.999,
epsilon = 1e-6
):
super().__init__()
assert ngram_vocab_size < (num_clusters ** 2), 'the ngram vocab size should be less than the number of clusters squared'
self.vq = VectorQuantization(
num_clusters = num_clusters,
num_heads = num_heads,
dim_per_head = dim_per_head,
decay = decay,
epsilon = epsilon
)
self.ngram = Ngrammer(
unigram_vocab_size = num_clusters,
ngram_vocab_size = ngram_vocab_size,
ngram_emb_dim = ngram_emb_dim,
concat_ngrams = concat_ngrams,
num_heads = num_heads,
dim_per_head = dim_per_head
)
def forward(
self,
x,
mask = None,
segment_pos = None
):
cluster_ids = self.vq(x, mask = mask)
out = self.ngram(
x,
cluster_ids = cluster_ids,
mask = mask,
segment_pos = segment_pos
)
return out
| n-grammer-pytorch-main | n_grammer_pytorch/n_grammer_pytorch.py |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = 'revtorch',
packages = ['revtorch'],
version = '0.2.3',
license='bsd-3-clause',
description = 'Framework for creating (partially) reversible neural networks with PyTorch',
long_description=long_description,
long_description_content_type="text/markdown",
author = 'Robin Brügger',
author_email = '[email protected]',
url = 'https://github.com/RobinBruegger/RevTorch',
download_url = 'https://github.com/RobinBruegger/RevTorch/archive/v0.2.3.tar.gz',
keywords = ['reversbile neural network'],
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) | RevTorch-master | setup.py |
from revtorch.revtorch import ReversibleBlock, ReversibleSequence | RevTorch-master | revtorch/__init__.py |
import torch
import torch.nn as nn
#import torch.autograd.function as func
import sys
import random
class ReversibleBlock(nn.Module):
'''
Elementary building block for building (partially) reversible architectures
Implementation of the Reversible block described in the RevNet paper
(https://arxiv.org/abs/1707.04585). Must be used inside a :class:`revtorch.ReversibleSequence`
for autograd support.
Arguments:
f_block (nn.Module): arbitrary subnetwork whos output shape is equal to its input shape
g_block (nn.Module): arbitrary subnetwork whos output shape is equal to its input shape
split_along_dim (integer): dimension along which the tensor is split into the two parts requried for the reversible block
fix_random_seed (boolean): Use the same random seed for the forward and backward pass if set to true
'''
def __init__(self, f_block, g_block, split_along_dim=1, fix_random_seed = False):
super(ReversibleBlock, self).__init__()
self.f_block = f_block
self.g_block = g_block
self.split_along_dim = split_along_dim
self.fix_random_seed = fix_random_seed
self.random_seeds = {}
def _init_seed(self, namespace):
if self.fix_random_seed:
self.random_seeds[namespace] = random.randint(0, sys.maxsize)
self._set_seed(namespace)
def _set_seed(self, namespace):
if self.fix_random_seed:
torch.manual_seed(self.random_seeds[namespace])
def forward(self, x):
"""
Performs the forward pass of the reversible block. Does not record any gradients.
:param x: Input tensor. Must be splittable along dimension 1.
:return: Output tensor of the same shape as the input tensor
"""
x1, x2 = torch.chunk(x, 2, dim=self.split_along_dim)
y1, y2 = None, None
with torch.no_grad():
self._init_seed('f')
y1 = x1 + self.f_block(x2)
self._init_seed('g')
y2 = x2 + self.g_block(y1)
return torch.cat([y1, y2], dim=self.split_along_dim)
def backward_pass(self, y, dy, retain_graph):
"""
Performs the backward pass of the reversible block.
Calculates the derivatives of the block's parameters in f_block and g_block, as well as the inputs of the
forward pass and its gradients.
:param y: Outputs of the reversible block
:param dy: Derivatives of the outputs
:param retain_graph: Whether to retain the graph on intercepted backwards
:return: A tuple of (block input, block input derivatives). The block inputs are the same shape as the block outptus.
"""
# Split the arguments channel-wise
y1, y2 = torch.chunk(y, 2, dim=self.split_along_dim)
del y
assert (not y1.requires_grad), "y1 must already be detached"
assert (not y2.requires_grad), "y2 must already be detached"
dy1, dy2 = torch.chunk(dy, 2, dim=self.split_along_dim)
del dy
assert (not dy1.requires_grad), "dy1 must not require grad"
assert (not dy2.requires_grad), "dy2 must not require grad"
# Enable autograd for y1 and y2. This ensures that PyTorch
# keeps track of ops. that use y1 and y2 as inputs in a DAG
y1.requires_grad = True
y2.requires_grad = True
# Ensures that PyTorch tracks the operations in a DAG
with torch.enable_grad():
self._set_seed('g')
gy1 = self.g_block(y1)
# Use autograd framework to differentiate the calculation. The
# derivatives of the parameters of G are set as a side effect
gy1.backward(dy2, retain_graph = retain_graph)
with torch.no_grad():
x2 = y2 - gy1 # Restore first input of forward()
del y2, gy1
# The gradient of x1 is the sum of the gradient of the output
# y1 as well as the gradient that flows back through G
# (The gradient that flows back through G is stored in y1.grad)
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
self._set_seed('f')
fx2 = self.f_block(x2)
# Use autograd framework to differentiate the calculation. The
# derivatives of the parameters of F are set as a side effec
fx2.backward(dx1, retain_graph = retain_graph)
with torch.no_grad():
x1 = y1 - fx2 # Restore second input of forward()
del y1, fx2
# The gradient of x2 is the sum of the gradient of the output
# y2 as well as the gradient that flows back through F
# (The gradient that flows back through F is stored in x2.grad)
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
# Undo the channelwise split
x = torch.cat([x1, x2.detach()], dim=self.split_along_dim)
dx = torch.cat([dx1, dx2], dim=self.split_along_dim)
return x, dx
class _ReversibleModuleFunction(torch.autograd.function.Function):
'''
Integrates the reversible sequence into the autograd framework
'''
@staticmethod
def forward(ctx, x, reversible_blocks, eagerly_discard_variables):
'''
Performs the forward pass of a reversible sequence within the autograd framework
:param ctx: autograd context
:param x: input tensor
:param reversible_blocks: nn.Modulelist of reversible blocks
:return: output tensor
'''
assert (isinstance(reversible_blocks, nn.ModuleList))
for block in reversible_blocks:
assert (isinstance(block, ReversibleBlock))
x = block(x)
ctx.y = x.detach() #not using ctx.save_for_backward(x) saves us memory by beeing able to free ctx.y earlier in the backward pass
ctx.reversible_blocks = reversible_blocks
ctx.eagerly_discard_variables = eagerly_discard_variables
return x
@staticmethod
def backward(ctx, dy):
'''
Performs the backward pass of a reversible sequence within the autograd framework
:param ctx: autograd context
:param dy: derivatives of the outputs
:return: derivatives of the inputs
'''
y = ctx.y
if ctx.eagerly_discard_variables:
del ctx.y
for i in range(len(ctx.reversible_blocks) - 1, -1, -1):
y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, ctx.multiple_backwards)
if ctx.eagerly_discard_variables:
del ctx.reversible_blocks
return dy, None, None
class ReversibleSequence(nn.Module):
'''
Basic building element for (partially) reversible networks
A reversible sequence is a sequence of arbitrarly many reversible blocks. The entire sequence is reversible.
The activations are only saved at the end of the sequence. Backpropagation leverages the reversible nature of
the reversible sequece to save memory.
Arguments:
reversible_blocks (nn.ModuleList): A ModuleList that exclusivly contains instances of ReversibleBlock whic
which are to be used in the reversible sequence.
eagerly_discard_variables (bool): Should the module eagerly discard the output and not retain the graph for the individual backwards called on the reversible blocks, for further memory savings
'''
def __init__(self, reversible_blocks, eagerly_discard_variables = True):
super(ReversibleSequence, self).__init__()
assert (isinstance(reversible_blocks, nn.ModuleList))
for block in reversible_blocks:
assert(isinstance(block, ReversibleBlock))
self.reversible_blocks = reversible_blocks
self.eagerly_discard_variables = eagerly_discard_variables
def forward(self, x):
'''
Forward pass of a reversible sequence
:param x: Input tensor
:return: Output tensor
'''
x = _ReversibleModuleFunction.apply(x, self.reversible_blocks, self.eagerly_discard_variables)
return x
| RevTorch-master | revtorch/revtorch.py |
import os
import pkg_resources
from setuptools import setup, find_packages
from pathlib import Path
if __name__ == "__main__":
# Read description from README
with Path(Path(__file__).parent, "README.md").open(encoding="utf-8") as file:
long_description = file.read()
setup(
name="clip-anytorch",
long_description=long_description,
long_description_content_type="text/markdown",
description=long_description.split("\n")[0],
url="https://github.com/rom1504/CLIP",
py_modules=["clip"],
version="2.5.2",
author="OpenAI",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
include_package_data=True,
extras_require={'dev': ['pytest']},
)
| CLIP-main | setup.py |
from .clip import *
| CLIP-main | clip/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
attn_mask = None
if self.attn_mask is not None:
n_ctx = x.shape[0]
attn_mask = self.attn_mask[..., -n_ctx:, -n_ctx:].to(dtype=x.dtype, device=x.device)
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
n_ctx = text.shape[-1]
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding[:n_ctx].type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| CLIP-main | clip/model.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.