python_code
stringlengths 0
108k
|
---|
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: [email protected] (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
|
# Intentionally empty
|
# -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support any of the other eigen types
# Such as quaternion or some other type.
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * Create a directory and put the file as well as an empty __init__.py in
# that directory.
# * Create a ~/.gdbinit file, that contains the following:
# python
# import sys
# sys.path.insert(0, '/path/to/eigen/printer/directory')
# from printers import register_eigen_printers
# register_eigen_printers (None)
# end
class EigenMatrixPrinter:
"Print Eigen Matrix or Array of some kind"
def __init__(self, variety, val):
"Extract all the necessary information"
# Save the variety (presumably "Matrix" or "Array") for later usage
self.variety = variety
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = [x.replace(" ", "") for x in template_params]
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
self.rows = val['m_storage']['m_rows']
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1':
self.cols = val['m_storage']['m_cols']
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, rows, cols, dataPtr, rowMajor):
self.rows = rows
self.cols = cols
self.dataPtr = dataPtr
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def children(self):
return self._iterator(self.rows, self.cols, self.data, self.rowMajor)
def to_string(self):
return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data)
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (self.elementNames[element],), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data)
def build_eigen_dictionary ():
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val)
pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val)
def register_eigen_printers(obj):
"Register eigen pretty-printers with objfile Obj"
if obj == None:
obj = gdb
obj.pretty_printers.append(lookup_function)
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
build_eigen_dictionary ()
|
from attention_tensorflow_mesh.attention_tensorflow_mesh import transformer_lm, transformer, attention
|
# helpers
def default(val, d):
return val if val is not None else d
# simple linear layer
def linear(x, dim_out, scope = 'linear', bias = True):
with tf.variable_scope(scope):
*_, dim_in = x.shape
w_init_stdev = 1 / math.sqrt(dim_in.size)
return mtf.layers.dense(x, new_dims=[dim_out], reduced_dims=[dim_in], name=scope, use_bias=bias,
kernel_initializer=tf.random_normal_initializer(stddev=w_init_stdev, dtype=tf.float32))
# norm
def norm(x, axis = None, epsilon=1e-5):
axis = default(axis, x.shape[-1])
u = mtf.reduce_mean(x, reduced_dim=axis)
s = mtf.reduce_mean(mtf.square(x - u), reduced_dim=axis)
u = mtf.broadcast(u, x.shape)
s = mtf.broadcast(s, x.shape)
return (x - u) * mtf.rsqrt(s + epsilon)
def scale_norm(x, scope, *, axis=None, epsilon=1e-5, params=None):
if axis is None:
axis = x.shape[-1]
with tf.variable_scope(scope):
n_state = x.shape[-1]
dt = tf.float32
g = mtf.get_variable(x.mesh, 'g', [], initializer=tf.constant_initializer(1, dtype=dt), dtype=dt)
x = norm(x, axis, epsilon)
x = x * g
return x
def prenorm(fn, scope):
def inner(x, *args, **kwargs):
return fn(scale_norm(x, scope), *args, **kwargs)
return inner
def residual(fn):
def inner(x, *args, **kwargs):
return fn(x, *args, **kwargs) + x
return inner
# full multi-head attention
def attention(x, dim_head, dim_features_head, scope = 'attn', causal = False):
with tf.variable_scope(scope):
mesh, batch, seq, dim = x.mesh, *x.shape
dim_heads = mtf.Dimension('dim_heads', dim_head.size * dim_features_head.size)
dim_intermediate = mtf.Dimension('qkv_dimension', dim_heads.size * 3)
qkv = linear(x, dim_intermediate, bias = False, scope='to_qkv')
q, k, v = mtf.split(qkv, dim_intermediate, 3)
q, k, v = map(lambda t: mtf.reshape(t, [batch, seq, dim_head, dim_features_head]), (q, k, v))
q, k, v = map(lambda t: mtf.transpose(t, [batch, dim_head, seq, dim_features_head]), (q, k, v))
k, v = map(lambda t: mtf.rename_dimension(t, seq.name, 'memory_length'), (k, v))
mem_len_dim = v.shape[-2]
dots = mtf.layers.us_einsum([q, k], [batch, dim_head, seq, mem_len_dim])
if causal:
i = mtf.range(mesh, seq, tf.int32)
j = mtf.range(mesh, mem_len_dim, tf.int32)
i, j = map(lambda t: mtf.broadcast(t, [seq, mem_len_dim]), (i, j))
mask = mtf.less(i + mem_len_dim.size - seq.size, j)
mask = mtf.cast(mask, tf.float32) * -1e10
dots += mask
attn = mtf.softmax(dots, mem_len_dim)
out = mtf.einsum([attn, v], [batch, dim_head, seq, dim_features_head])
out = mtf.transpose(out, [batch, seq, dim_head, dim_features_head])
out = mtf.reshape(out, [batch, seq, dim_heads])
combined_out = linear(out, dim, scope='combine_output')
return combined_out
# feed forward
def ff(x, mult = 4, scope = 'ff'):
*_, dim = x.shape
with tf.variable_scope(scope):
dim_intermediate = mtf.Dimension('ff_intermediate', dim.size * mult)
h = linear(x, dim_intermediate, scope='w1')
h = mtf.gelu(h)
h = linear(h, dim, scope='w2')
return h
# block
def transformer(x, *, depth, dim_head, dim_features_head, causal = False):
attn_fn = residual(prenorm(attention, 'norm1'))
ff_fn = residual(prenorm(ff, 'norm2'))
for i in range(depth):
with tf.variable_scope(f'layer_{i}'):
x = attn_fn(x, dim_head, dim_features_head, causal = causal)
x = ff_fn(x)
return x
# language model
def transformer_lm(x, *, dim, num_tokens, depth, max_seq_len, dim_head, dim_features_head, causal = False):
mesh, batch, seq_dim = x.mesh, *x.shape
dim = mtf.Dimension('dim', dim)
dim_head = mtf.Dimension('dim_head', dim_head)
dim_features_head = mtf.Dimension('dim_features_head', dim_features_head)
dim_num_tokens = mtf.Dimension('vocab_size', num_tokens)
dim_max_seq_len = mtf.Dimension('max_seq_len', max_seq_len)
wte = mtf.get_variable(mesh, name='wte', shape=mtf.Shape([dim_num_tokens, dim]), dtype=tf.float32)
wpe = mtf.get_variable(mesh, name='wpe', shape=mtf.Shape([seq_dim, dim]), dtype=tf.float32)
x = mtf.gather(wte, x, dim_num_tokens)
p = mtf.gather(wpe, mtf.range(mesh, seq_dim, dtype=tf.int32), dim_max_seq_len)
x = x + p
x = transformer(x, depth = depth, dim_head = dim_head, dim_features_head = dim_features_head, causal = causal)
logits = linear(x, dim_num_tokens, scope='to_logits')
return logits
|
from setuptools import setup, find_packages
setup(
name = 'charformer-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Charformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/charformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'learned tokenization'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from charformer_pytorch.charformer_pytorch import GBST
|
import math
from math import gcd
import functools
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def next_divisible_length(seqlen, multiple):
return math.ceil(seqlen / multiple) * multiple
def pad_to_multiple(tensor, multiple, *, seq_dim, dim = -1, value = 0.):
seqlen = tensor.shape[seq_dim]
length = next_divisible_length(seqlen, multiple)
if length == seqlen:
return tensor
remainder = length - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
# helper classes
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size):
super().__init__()
self.conv = nn.Conv1d(dim_in, dim_out, kernel_size, groups = dim_in)
self.proj_out = nn.Conv1d(dim_out, dim_out, 1)
def forward(self, x):
x = self.conv(x)
return self.proj_out(x)
# main class
class GBST(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_block_size = None,
blocks = None,
downsample_factor = 4,
score_consensus_attn = True
):
super().__init__()
assert exists(max_block_size) ^ exists(blocks), 'either max_block_size or blocks are given on initialization'
self.token_emb = nn.Embedding(num_tokens, dim)
if exists(blocks):
assert isinstance(blocks, tuple), 'blocks must be a tuple of block sizes'
self.blocks = tuple(map(lambda el: el if isinstance(el, tuple) else (el, 0), blocks))
assert all([(offset < block_size) for block_size, offset in self.blocks]), 'offset must be always smaller than the block size'
max_block_size = max(list(map(lambda t: t[0], self.blocks)))
else:
self.blocks = tuple(map(lambda el: (el, 0), range(1, max_block_size + 1)))
self.pos_conv = nn.Sequential(
Pad((0, 0, 0, max_block_size - 1)),
Rearrange('b n d -> b d n'),
DepthwiseConv1d(dim, dim, kernel_size = max_block_size),
Rearrange('b d n -> b n d')
)
self.score_fn = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... () -> ...')
)
self.score_consensus_attn = score_consensus_attn
assert downsample_factor <= max_block_size, 'final downsample factor should be less than the maximum block size'
self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])
self.downsample_factor = downsample_factor
def forward(self, x, mask = None):
b, n, block_mult, ds_factor, device = *x.shape, self.block_pad_multiple, self.downsample_factor, x.device
m = next_divisible_length(n, ds_factor)
# get character token embeddings
x = self.token_emb(x)
# do a conv to generate the positions for the tokens
x = self.pos_conv(x)
# pad both sequence and mask to length visibile by all block sizes from 0 to max block size
x = pad_to_multiple(x, block_mult, seq_dim = 1, dim = -2)
if exists(mask):
mask = pad_to_multiple(mask, block_mult, seq_dim = 1, dim = -1, value = False)
# compute representations for all blocks by mean pooling
block_masks = []
block_reprs = []
for block_size, offset in self.blocks:
# clone the input sequence as well as the mask, in order to pad for offsets
block_x = x.clone()
if exists(mask):
block_mask = mask.clone()
# pad for offsets, if needed
need_padding = offset > 0
if need_padding:
left_offset, right_offset = (block_size - offset), offset
block_x = F.pad(block_x, (0, 0, left_offset, right_offset), value = 0.)
if exists(mask):
block_mask = F.pad(block_mask, (left_offset, right_offset), value = False)
# group input sequence into blocks
blocks = rearrange(block_x, 'b (n m) d -> b n m d', m = block_size)
# either mean pool the blocks, or do a masked mean
if exists(mask):
mask_blocks = rearrange(block_mask, 'b (n m) -> b n m', m = block_size)
block_repr = masked_mean(blocks, mask_blocks, dim = -2)
else:
block_repr = blocks.mean(dim = -2)
# append the block representations, as well as the pooled block masks
block_repr = repeat(block_repr, 'b n d -> b (n m) d', m = block_size)
if need_padding:
block_repr = block_repr[:, left_offset:-right_offset]
block_reprs.append(block_repr)
if exists(mask):
mask_blocks = torch.any(mask_blocks, dim = -1)
mask_blocks = repeat(mask_blocks, 'b n -> b (n m)', m = block_size)
if need_padding:
mask_blocks = mask_blocks[:, left_offset:-right_offset]
block_masks.append(mask_blocks)
# stack all the block representations
block_reprs = torch.stack(block_reprs, dim = 2)
# calculate scores and softmax across the block size dimension
scores = self.score_fn(block_reprs)
if exists(mask):
block_masks = torch.stack(block_masks, dim = 2)
max_neg_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~block_masks, max_neg_value)
scores = scores.softmax(dim = 2)
# do the cheap consensus attention, eq (5) in paper
if self.score_consensus_attn:
score_sim = einsum('b i d, b j d -> b i j', scores, scores)
if exists(mask):
cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
max_neg_value = -torch.finfo(score_sim.dtype).max
score_sim = score_sim.masked_fill(~cross_mask, max_neg_value)
score_attn = score_sim.softmax(dim = -1)
scores = einsum('b i j, b j m -> b i m', score_attn, scores)
# multiply the block representations by the position-wise scores
scores = rearrange(scores, 'b n m -> b n m ()')
x = (block_reprs * scores).sum(dim = 2)
# truncate to length divisible by downsample factor
x = x[:, :m]
if exists(mask):
mask = mask[:, :m]
# final mean pooling downsample
x = rearrange(x, 'b (n m) d -> b n m d', m = ds_factor)
if exists(mask):
mask = rearrange(mask, 'b (n m) -> b n m', m = ds_factor)
x = masked_mean(x, mask, dim = 2)
mask = torch.any(mask, dim = -1)
else:
x = x.mean(dim = -2)
return x, mask
|
import os
import re
from subprocess import check_call
from setuptools import setup, find_packages
from setuptools.command.install import install
__pkg_name__ = 'bonito'
verstrline = open(os.path.join(__pkg_name__, '__init__.py'), 'r').read()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(vsre, verstrline, re.M)
if mo:
__version__ = mo.group(1)
else:
raise RuntimeError('Unable to find version string in "{}/__init__.py".'.format(__pkg_name__))
USE_CUDA111 = False
if USE_CUDA111:
print("Building with CUDA 11.1")
require_file = 'requirements-cuda111.txt'
package_name = "ont-%s-cuda111" % __pkg_name__
else:
print("Building with CUDA 10.2")
require_file = 'requirements.txt'
package_name = "ont-%s" % __pkg_name__
with open(require_file) as f:
requirements = f.read().splitlines()
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
class download_latest_model(install):
def run(self):
install.run(self)
check_call("bonito download --models --latest -f".split())
setup(
name=package_name,
version=__version__,
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
long_description=long_description,
long_description_content_type='text/markdown',
author='Oxford Nanopore Technologies, Ltd',
author_email='[email protected]',
url='https://github.com/nanoporetech/bonito',
cmdclass={
'install': download_latest_model,
},
entry_points = {
'console_scripts': [
'{0} = {0}:main'.format(__pkg_name__)
]
},
dependency_links=[
'https://download.pytorch.org/whl/torch_stable.html',
]
)
|
"""
Bonito Aligner
"""
from threading import Thread
from functools import partial
from mappy import Aligner, ThreadBuffer
from bonito.multiprocessing import ThreadMap, ProcessMap
def align_map(aligner, sequences, n_thread=4):
"""
Align `sequences` with minimap using `n_thread` threads.
"""
return ThreadMap(partial(MappyWorker, aligner), sequences, n_thread)
class MappyWorker(Thread):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, aligner, input_queue=None, output_queue=None):
super().__init__()
self.aligner = aligner
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
thrbuf = ThreadBuffer()
while True:
item = self.input_queue.get()
if item is StopIteration:
self.output_queue.put(item)
break
k, v = item
mapping = next(self.aligner.map(v['sequence'], buf=thrbuf, MD=True), None)
self.output_queue.put((k, {**v, 'mapping': mapping}))
|
"""
Bonito Fast5 Utils
"""
import sys
from glob import glob
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from itertools import chain, starmap
import torch
import numpy as np
from scipy.signal import find_peaks
from ont_fast5_api.fast5_interface import get_fast5_file
class Read:
def __init__(self, read, filename):
self.read_id = read.read_id
self.filename = filename.name
self.run_id = read.get_run_id()
if type(self.run_id) in (bytes, np.bytes_):
self.run_id = self.run_id.decode()
read_attrs = read.handle[read.raw_dataset_group_name].attrs
channel_info = read.handle[read.global_key + 'channel_id'].attrs
self.offset = int(channel_info['offset'])
self.sampling_rate = channel_info['sampling_rate']
self.scaling = channel_info['range'] / channel_info['digitisation']
self.mux = read_attrs['start_mux']
self.channel = channel_info['channel_number']
if type(self.channel) in (bytes, np.bytes_):
self.channel = self.channel.decode()
self.start = read_attrs['start_time'] / self.sampling_rate
self.duration = read_attrs['duration'] / self.sampling_rate
raw = read.handle[read.raw_dataset_name][:]
scaled = np.array(self.scaling * (raw + self.offset), dtype=np.float32)
trim_start, _ = trim(scaled[:8000])
scaled = scaled[trim_start:]
self.template_start = self.start + (1 / self.sampling_rate) * trim_start
self.template_duration = self.duration - (1 / self.sampling_rate) * trim_start
if len(scaled) > 8000:
med, mad = med_mad(scaled)
self.signal = (scaled - med) / mad
else:
self.signal = norm_by_noisiest_section(scaled)
def __repr__(self):
return "Read('%s')" % self.read_id
class ReadChunk:
def __init__(self, read, chunk, i, n):
self.read_id = "%s:%i:%i" % (read.read_id, i, n)
self.run_id = read.run_id
self.filename = read.filename
self.mux = read.mux
self.channel = read.channel
self.start = read.start
self.duration = read.duration
self.template_start = self.start
self.template_duration = self.duration
self.signal = chunk
def __repr__(self):
return "ReadChunk('%s')" % self.read_id
def trim(signal, window_size=40, threshold_factor=2.4, min_elements=3):
min_trim = 10
signal = signal[min_trim:]
med, mad = med_mad(signal[-(window_size*100):])
threshold = med + mad * threshold_factor
num_windows = len(signal) // window_size
seen_peak = False
for pos in range(num_windows):
start = pos * window_size
end = start + window_size
window = signal[start:end]
if len(window[window > threshold]) > min_elements or seen_peak:
seen_peak = True
if window[-1] > threshold:
continue
return min(end + min_trim, len(signal)), len(signal)
return min_trim, len(signal)
def med_mad(x, factor=1.4826):
"""
Calculate signal median and median absolute deviation
"""
med = np.median(x)
mad = np.median(np.absolute(x - med)) * factor
return med, mad
def norm_by_noisiest_section(signal, samples=100, threshold=6.0):
"""
Normalise using the medmad from the longest continuous region where the
noise is above some threshold relative to the std of the full signal.
"""
threshold = signal.std() / threshold
noise = np.ones(signal.shape)
for idx in np.arange(signal.shape[0] // samples):
window = slice(idx * samples, (idx + 1) * samples)
noise[window] = np.where(signal[window].std() > threshold, 1, 0)
# start and end low for peak finding
noise[0] = 0; noise[-1] = 0
peaks, info = find_peaks(noise, width=(None, None))
if len(peaks):
widest = np.argmax(info['widths'])
med, mad = med_mad(signal[info['left_bases'][widest]: info['right_bases'][widest]])
else:
med, mad = med_mad(signal)
return (signal - med) / mad
def read_chunks(read, chunksize=4000, overlap=400):
"""
Split a Read in fixed sized ReadChunks
"""
if len(read.signal) < chunksize:
return
_, offset = divmod(len(read.signal) - chunksize, chunksize - overlap)
signal = torch.from_numpy(read.signal[offset:])
blocks = signal.unfold(0, chunksize, chunksize - overlap)
for i, block in enumerate(blocks):
yield ReadChunk(read, block.numpy(), i+1, blocks.shape[0])
def get_raw_data(filename, read_ids=None, skip=False):
"""
Get the raw signal and read id from the fast5 files
"""
with get_fast5_file(filename, 'r') as f5_fh:
for read_id in f5_fh.get_read_ids():
if read_ids is None or (read_id in read_ids) ^ skip:
yield Read(f5_fh.get_read(read_id), filename)
def get_read_ids(filename, read_ids=None, skip=False):
"""
Get all the read_ids from the file `filename`.
"""
with get_fast5_file(filename, 'r') as f5_fh:
ids = [(filename, rid) for rid in f5_fh.get_read_ids()]
if read_ids is None:
return ids
return [rid for rid in ids if (rid[1] in read_ids) ^ skip]
def get_raw_data_for_read(info):
"""
Get the raw signal from the fast5 file for a given filename, read_id pair
"""
filename, read_id = info
with get_fast5_file(filename, 'r') as f5_fh:
return Read(f5_fh.get_read(read_id), filename)
def get_reads(directory, read_ids=None, skip=False, max_read_size=0, n_proc=1, recursive=False, cancel=None):
"""
Get all reads in a given `directory`.
"""
pattern = "**/*.fast5" if recursive else "*.fast5"
get_filtered_reads = partial(get_read_ids, read_ids=read_ids, skip=skip)
with Pool(n_proc) as pool:
for job in chain(pool.imap(get_filtered_reads, (Path(x) for x in glob(directory + "/" + pattern, recursive=True)))):
for read in pool.imap(get_raw_data_for_read, job):
if max_read_size > 0 and len(read.signal) > max_read_size:
sys.stderr.write(
"> skipping long read %s (%s samples)\n" % (read.read_id, len(read.signal))
)
continue
yield read
if cancel is not None and cancel.is_set():
return
|
"""
Bonito utils
"""
import os
import re
import sys
import random
from glob import glob
from itertools import groupby
from operator import itemgetter
from importlib import import_module
from collections import deque, defaultdict, OrderedDict
import toml
import torch
import parasail
import numpy as np
from torch.cuda import get_device_capability
try:
from claragenomics.bindings import cuda
from claragenomics.bindings.cudapoa import CudaPoaBatch
except ImportError:
pass
__dir__ = os.path.dirname(os.path.realpath(__file__))
__data__ = os.path.join(__dir__, "data")
__models__ = os.path.join(__dir__, "models")
__configs__ = os.path.join(__dir__, "models/configs")
split_cigar = re.compile(r"(?P<len>\d+)(?P<op>\D+)")
default_data = os.path.join(__data__, "dna_r9.4.1")
default_config = os.path.join(__configs__, "[email protected]")
def init(seed, device):
"""
Initialise random libs and setup cudnn
https://pytorch.org/docs/stable/notes/randomness.html
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == "cpu": return
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(torch.cuda.is_available())
def permute(x, input_layout, output_layout):
"""
Permute `x` from `input_layout` to `output_layout`
>>> permute(x, 'TNC', 'NTC')
"""
if input_layout == output_layout: return x
return x.permute(*[input_layout.index(x) for x in output_layout])
def concat(xs, dim=0):
"""
Type agnostic concat.
"""
if isinstance(xs[0], torch.Tensor):
return torch.cat(xs, dim=dim)
elif isinstance(xs[0], np.ndarray):
return np.concatenate(xs, axis=dim)
elif isinstance(xs[0], list):
return [x for l in xs for x in l]
elif isinstance(xs[0], str):
return ''.join(xs)
elif isinstance(xs[0], dict):
return {k: concat([x[k] for x in xs], dim) for k in xs[0].keys()}
else:
raise TypeError
def select_range(x, start, end, dim=0):
"""
Type agnostic range select.
"""
if isinstance(x, dict):
return {k: select_range(v, start, end, dim) for (k, v) in x.items()}
if dim == 0 or isinstance(x, list): return x[start:end]
return x[(*(slice(None),)*dim, slice(start, end))]
def size(x, dim=0):
"""
Type agnostic size.
"""
if hasattr(x, 'shape'):
return x.shape[dim]
elif dim == 0:
return len(x)
raise TypeError
def half_supported():
"""
Returns whether FP16 is support on the GPU
"""
try:
return get_device_capability()[0] >= 7
except:
return False
def phred(prob, scale=1.0, bias=0.0):
"""
Converts `prob` into a ascii encoded phred quality score between 0 and 40.
"""
p = max(1 - prob, 1e-4)
q = -10 * np.log10(p) * scale + bias
return chr(int(np.round(q) + 33))
def mean_qscore_from_qstring(qstring):
"""
Convert qstring into a mean qscore
"""
if len(qstring) == 0: return 0.0
err_probs = [10**((ord(c) - 33) / -10) for c in qstring]
mean_err = np.mean(err_probs)
return -10 * np.log10(max(mean_err, 1e-4))
def decode_ref(encoded, labels):
"""
Convert a integer encoded reference into a string and remove blanks
"""
return ''.join(labels[e] for e in encoded if e)
def column_to_set(filename, idx=0, skip_header=False):
"""
Pull a column from a file and return a set of the values.
"""
if filename and os.path.isfile(filename):
with open(filename, 'r') as tsv:
if skip_header:
next(tsv)
return {line.strip().split()[idx] for line in tsv.readlines()}
def chunk(signal, chunksize, overlap):
"""
Convert a read into overlapping chunks before calling
"""
T = signal.shape[0]
if chunksize == 0:
chunks = signal[None, :]
elif T < chunksize:
chunks = torch.nn.functional.pad(signal, (chunksize - T, 0))[None, :]
else:
stub = (T - overlap) % (chunksize - overlap)
chunks = signal[stub:].unfold(0, chunksize, chunksize - overlap)
if stub > 0:
chunks = torch.cat([signal[None, :chunksize], chunks], dim=0)
return chunks.unsqueeze(1)
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if chunks.shape[0] == 1: return chunks.squeeze(0)
semi_overlap = overlap // 2
start, end = semi_overlap // stride, (chunksize - semi_overlap) // stride
stub = (length - overlap) % (chunksize - overlap)
first_chunk_end = (stub + semi_overlap) // stride if (stub > 0) else end
if reverse:
chunks = list(chunks)
return concat([
chunks[-1][:-start], *(x[-end:-start] for x in reversed(chunks[1:-1])), chunks[0][-first_chunk_end:]
])
else:
return concat([
chunks[0, :first_chunk_end], *chunks[1:-1, start:end], chunks[-1, start:]
])
def batchify(items, batchsize, dim=0):
"""
Batch up items up to `batch_size`.
"""
stack, pos = [], 0
for k, v in items:
breaks = range(batchsize - pos, size(v, dim), batchsize)
for start, end in zip([0, *breaks], [*breaks, size(v, dim)]):
sub_batch = select_range(v, start, end, dim)
stack.append(((k, (pos, pos + end - start)), sub_batch))
if pos + end - start == batchsize:
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
stack, pos = [], 0
else:
pos += end - start
if len(stack):
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
def unbatchify(batches, dim=0):
"""
Reconstruct batches.
"""
batches = (
(k, select_range(v, start, end, dim))
for sub_batches, v in batches
for k, (start, end) in sub_batches
)
return (
(k, concat([v for (k, v) in group], dim))
for k, group in groupby(batches, itemgetter(0))
)
def load_data(limit=None, directory=None):
"""
Load the training data
"""
if directory is None:
directory = default_data
chunks = np.load(os.path.join(directory, "chunks.npy"), mmap_mode='r')
targets = np.load(os.path.join(directory, "references.npy"), mmap_mode='r')
lengths = np.load(os.path.join(directory, "reference_lengths.npy"), mmap_mode='r')
indices = os.path.join(directory, "indices.npy")
if os.path.exists(indices):
idx = np.load(indices, mmap_mode='r')
idx = idx[idx < lengths.shape[0]]
if limit:
idx = idx[:limit]
return chunks[idx, :], targets[idx, :], lengths[idx]
if limit:
chunks = chunks[:limit]
targets = targets[:limit]
lengths = lengths[:limit]
return np.array(chunks), np.array(targets), np.array(lengths)
def load_symbol(config, symbol):
"""
Dynamic load a symbol from module specified in model config.
"""
if not isinstance(config, dict):
if not os.path.isdir(config) and os.path.isdir(os.path.join(__models__, config)):
dirname = os.path.join(__models__, config)
else:
dirname = config
config = toml.load(os.path.join(dirname, 'config.toml'))
imported = import_module(config['model']['package'])
return getattr(imported, symbol)
def match_names(state_dict, model):
keys_and_shapes = lambda state_dict: zip(*[
(k, s) for s, i, k in sorted([(v.shape, i, k)
for i, (k, v) in enumerate(state_dict.items())])
])
k1, s1 = keys_and_shapes(state_dict)
k2, s2 = keys_and_shapes(model.state_dict())
assert s1 == s2
remap = dict(zip(k1, k2))
return OrderedDict([(k, remap[k]) for k in state_dict.keys()])
def load_model(dirname, device, weights=None, half=None, chunksize=0):
"""
Load a model from disk
"""
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
if not weights: # take the latest checkpoint
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if not weight_files:
raise FileNotFoundError("no model weights found in '%s'" % dirname)
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
device = torch.device(device)
config = toml.load(os.path.join(dirname, 'config.toml'))
weights = os.path.join(dirname, 'weights_%s.tar' % weights)
Model = load_symbol(config, "Model")
model = Model(config)
state_dict = torch.load(weights, map_location=device)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if half is None:
half = half_supported()
if half: model = model.half()
model.eval()
model.to(device)
return model
def parasail_to_sam(result, seq):
"""
Extract reference start and sam compatible cigar string.
:param result: parasail alignment result.
:param seq: query sequence.
:returns: reference start coordinate, cigar string.
"""
cigstr = result.cigar.decode.decode()
first = re.search(split_cigar, cigstr)
first_count, first_op = first.groups()
prefix = first.group()
rstart = result.cigar.beg_ref
cliplen = result.cigar.beg_query
clip = '' if cliplen == 0 else '{}S'.format(cliplen)
if first_op == 'I':
pre = '{}S'.format(int(first_count) + cliplen)
elif first_op == 'D':
pre = clip
rstart = int(first_count)
else:
pre = '{}{}'.format(clip, prefix)
mid = cigstr[len(prefix):]
end_clip = len(seq) - result.end_query - 1
suf = '{}S'.format(end_clip) if end_clip > 0 else ''
new_cigstr = ''.join((pre, mid, suf))
return rstart, new_cigstr
def accuracy(ref, seq, balanced=False, min_coverage=0.0):
"""
Calculate the accuracy between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
counts = defaultdict(int)
q_coverage = len(alignment.traceback.query) / len(seq)
r_coverage = len(alignment.traceback.ref) / len(ref)
if r_coverage < min_coverage:
return 0.0
_, cigar = parasail_to_sam(alignment, seq)
for count, op in re.findall(split_cigar, cigar):
counts[op] += int(count)
if balanced:
accuracy = (counts['='] - counts['I']) / (counts['='] + counts['X'] + counts['D'])
else:
accuracy = counts['='] / (counts['='] + counts['I'] + counts['X'] + counts['D'])
return accuracy * 100
def print_alignment(ref, seq):
"""
Print the alignment between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
print(alignment.traceback.ref)
print(alignment.traceback.comp)
print(alignment.traceback.query)
print(" Score=%s" % alignment.score)
return alignment.score
def poa(groups, max_poa_sequences=100, gpu_mem_per_batch=0.9):
"""
Generate consensus for POA groups.
Args:
groups : A list of lists of sequences for which consensus is to be generated.
"""
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch *= free
batch = CudaPoaBatch(max_poa_sequences, gpu_mem_per_batch, stream=None, output_type="consensus")
results = []
for i, group in enumerate(groups, start=1):
group_status, seq_status = batch.add_poa_group(group)
# Once batch is full, run POA processing
if group_status == 1 or i == len(groups):
batch.generate_poa()
consensus, coverage, status = batch.get_consensus()
results.extend(consensus)
batch.reset()
group_status, seq_status = batch.add_poa_group(group)
return results
|
"""
Bonito nn modules.
"""
import torch
from torch import nn
from torch.nn import Module
from torch.nn.init import orthogonal_
layers = {}
def register(layer):
layer.name = layer.__name__.lower()
layers[layer.name] = layer
return layer
register(torch.nn.ReLU)
register(torch.nn.Tanh)
@register
class Swish(torch.nn.SiLU):
pass
@register
class Serial(torch.nn.Sequential):
def __init__(self, sublayers):
super().__init__(*sublayers)
def to_dict(self, include_weights=False):
return {
'sublayers': [to_dict(layer, include_weights) for layer in self._modules.values()]
}
@register
class Reverse(Module):
def __init__(self, sublayers):
super().__init__()
self.layer = Serial(sublayers) if isinstance(sublayers, list) else sublayers
def forward(self, x):
return self.layer(x.flip(0)).flip(0)
def to_dict(self, include_weights=False):
if isinstance(self.layer, Serial):
return self.layer.to_dict(include_weights)
else:
return {'sublayers': to_dict(self.layer, include_weights)}
@register
class Convolution(Module):
def __init__(self, insize, size, winlen, stride=1, padding=0, bias=True, activation=None):
super().__init__()
self.conv = torch.nn.Conv1d(insize, size, winlen, stride=stride, padding=padding, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
def forward(self, x):
if self.activation is not None:
return self.activation(self.conv(x))
return self.conv(x)
def to_dict(self, include_weights=False):
res = {
"insize": self.conv.in_channels,
"size": self.conv.out_channels,
"bias": self.conv.bias is not None,
"winlen": self.conv.kernel_size[0],
"stride": self.conv.stride[0],
"padding": self.conv.padding[0],
"activation": self.activation.name if self.activation else None,
}
if include_weights:
res['params'] = {
'W': self.conv.weight, 'b': self.conv.bias if self.conv.bias is not None else []
}
return res
@register
class LinearCRFEncoder(Module):
def __init__(self, insize, n_base, state_len, bias=True, scale=None, activation=None, blank_score=None):
super().__init__()
self.n_base = n_base
self.state_len = state_len
self.blank_score = blank_score
size = (n_base + 1) * n_base**state_len if blank_score is None else n_base**(state_len + 1)
self.linear = torch.nn.Linear(insize, size, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
self.scale = scale
def forward(self, x):
scores = self.linear(x)
if self.activation is not None:
scores = self.activation(scores)
if self.scale is not None:
scores = scores * self.scale
if self.blank_score is not None:
T, N, C = scores.shape
s = torch.tensor(self.blank_score, device=scores.device, dtype=scores.dtype)
scores = torch.cat([s.expand(T, N, C//self.n_base, 1), scores.reshape(T, N, C//self.n_base, self.n_base)], axis=-1).reshape(T, N, -1)
return scores
def to_dict(self, include_weights=False):
res = {
'insize': self.linear.in_features,
'n_base': self.n_base,
'state_len': self.state_len,
'bias': self.linear.bias is not None,
'scale': self.scale,
'activation': self.activation.name if self.activation else None,
'blank_score': self.blank_score,
}
if include_weights:
res['params'] = {
'W': self.linear.weight, 'b': self.linear.bias
if self.linear.bias is not None else []
}
return res
@register
class SHA(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** -0.5
self.to_q = nn.Sequential(nn.Linear(dim, dim), nn.LayerNorm(dim))
def forward(self, x, kv):
x = x.transpose(0, 1)
kv = kv.transpose(0, 1)
q = self.to_q(x)
sim = torch.matmul(q, kv.transpose(-1, -2)) * self.scale
attn = sim.softmax(dim=-1)
out = torch.matmul(attn, kv)
return out.transpose(0, 1)
@register
class SHABlock(Module):
""" https://arxiv.org/abs/1911.11423 """
def __init__(self, dim, ff_mult=4):
super().__init__()
self.attn_query_norm = nn.LayerNorm(dim)
self.attn_kv_norm = nn.LayerNorm(dim)
self.attn = SHA(dim=dim)
self.ff_residual_norm = nn.LayerNorm(dim)
self.ff = Serial([
nn.LayerNorm(dim),
nn.Linear(dim, dim * ff_mult),
nn.GELU(),
nn.Linear(dim * ff_mult, dim),
])
def forward(self, x):
kv = self.attn_kv_norm(x)
x = self.attn_query_norm(x)
x = self.attn(x, kv) + x
x = self.ff(x) + self.ff_residual_norm(x)
return x
@register
class Permute(Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
def to_dict(self, include_weights=False):
return {'dims': self.dims}
def truncated_normal(size, dtype=torch.float32, device=None, num_resample=5):
x = torch.empty(size + (num_resample,), dtype=torch.float32, device=device).normal_()
i = ((x < 2) & (x > -2)).max(-1, keepdim=True)[1]
return torch.clamp_(x.gather(-1, i).squeeze(-1), -2, 2)
class RNNWrapper(Module):
def __init__(
self, rnn_type, *args, reverse=False, orthogonal_weight_init=True, disable_state_bias=True, bidirectional=False, **kwargs
):
super().__init__()
if reverse and bidirectional:
raise Exception("'reverse' and 'bidirectional' should not both be set to True")
self.reverse = reverse
self.rnn = rnn_type(*args, bidirectional=bidirectional, **kwargs)
self.init_orthogonal(orthogonal_weight_init)
self.init_biases()
if disable_state_bias: self.disable_state_bias()
def forward(self, x):
if self.reverse: x = x.flip(0)
y, h = self.rnn(x)
if self.reverse: y = y.flip(0)
return y
def init_biases(self, types=('bias_ih',)):
for name, param in self.rnn.named_parameters():
if any(k in name for k in types):
with torch.no_grad():
param.set_(0.5*truncated_normal(param.shape, dtype=param.dtype, device=param.device))
def init_orthogonal(self, types=True):
if not types: return
if types == True: types = ('weight_ih', 'weight_hh')
for name, x in self.rnn.named_parameters():
if any(k in name for k in types):
for i in range(0, x.size(0), self.rnn.hidden_size):
orthogonal_(x[i:i+self.rnn.hidden_size])
def disable_state_bias(self):
for name, x in self.rnn.named_parameters():
if 'bias_hh' in name:
x.requires_grad = False
x.zero_()
@register
class LSTM(RNNWrapper):
def __init__(self, size, insize, bias=True, reverse=False):
super().__init__(torch.nn.LSTM, size, insize, bias=bias, reverse=reverse)
def to_dict(self, include_weights=False):
res = {
'size': self.rnn.hidden_size,
'insize': self.rnn.input_size,
'bias': self.rnn.bias,
'reverse': self.reverse,
}
if include_weights:
res['params'] = {
'iW': self.rnn.weight_ih_l0.reshape(4, self.rnn.hidden_size, self.rnn.input_size),
'sW': self.rnn.weight_hh_l0.reshape(4, self.rnn.hidden_size, self.rnn.hidden_size),
'b': self.rnn.bias_ih_l0.reshape(4, self.rnn.hidden_size)
}
return res
def to_dict(layer, include_weights=False):
if hasattr(layer, 'to_dict'):
return {'type': layer.name, **layer.to_dict(include_weights)}
return {'type': layer.name}
def from_dict(model_dict, layer_types=None):
model_dict = model_dict.copy()
if layer_types is None:
layer_types = layers
type_name = model_dict.pop('type')
typ = layer_types[type_name]
if 'sublayers' in model_dict:
sublayers = model_dict['sublayers']
model_dict['sublayers'] = [
from_dict(x, layer_types) for x in sublayers
] if isinstance(sublayers, list) else from_dict(sublayers, layer_types)
try:
layer = typ(**model_dict)
except Exception as e:
raise Exception(f'Failed to build layer of type {typ} with args {model_dict}') from e
return layer
|
"""
Bonito Input/Output
"""
import os
import sys
import csv
import pandas as pd
from warnings import warn
from threading import Thread
from logging import getLogger
from contextlib import contextmanager
from os.path import realpath, splitext, dirname
import numpy as np
from mappy import revcomp
import bonito
from bonito.cli.convert import typical_indices
logger = getLogger('bonito')
class CSVLogger:
def __init__(self, filename, sep=','):
self.filename = str(filename)
if os.path.exists(self.filename):
with open(self.filename) as f:
self.columns = csv.DictReader(f).fieldnames
else:
self.columns = None
self.fh = open(self.filename, 'a', newline='')
self.csvwriter = csv.writer(self.fh, delimiter=sep)
self.count = 0
def set_columns(self, columns):
if self.columns:
raise Exception('Columns already set')
self.columns = list(columns)
self.csvwriter.writerow(self.columns)
def append(self, row):
if self.columns is None:
self.set_columns(row.keys())
self.csvwriter.writerow([row.get(k, '-') for k in self.columns])
self.count += 1
if self.count > 100:
self.count = 0
self.fh.flush()
def close(self):
self.fh.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@contextmanager
def devnull(*args, **kwds):
"""
A context manager that sends all out stdout & stderr to devnull.
"""
save_fds = [os.dup(1), os.dup(2)]
null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)]
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
try:
yield
finally:
os.dup2(save_fds[0], 1)
os.dup2(save_fds[1], 2)
for fd in null_fds + save_fds: os.close(fd)
def write_fasta(header, sequence, fd=sys.stdout):
"""
Write a fasta record to a file descriptor.
"""
fd.write(">%s\n" % header)
fd.write("%s\n" % sequence)
fd.flush()
def write_fastq(header, sequence, qstring, fd=sys.stdout):
"""
Write a fastq record to a file descriptor.
"""
fd.write("@%s\n" % header)
fd.write("%s\n" % sequence)
fd.write("+\n")
fd.write("%s\n" % qstring)
fd.flush()
def write_sam_header(aligner, fd=sys.stdout, sep='\t'):
"""
Write the SQ & PG sam headers to a file descriptor.
"""
fd.write('%s\n' % os.linesep.join([
sep.join([
'@SQ', 'SN:%s' % name, 'LN:%s' % len(aligner.seq(name))
]) for name in aligner.seq_names
]))
fd.write('%s\n' % sep.join([
'@PG',
'ID:bonito',
'PN:bonito',
'VN:%s' % bonito.__version__,
'CL:%s' % ' '.join(sys.argv),
]))
fd.flush()
def write_sam(read_id, sequence, qstring, mapping, fd=sys.stdout, unaligned=False, sep='\t'):
"""
Write a sam record to a file descriptor.
"""
if unaligned:
fd.write("%s\n" % sep.join(map(str, [
read_id, 4, '*', 0, 0, '*', '*', 0, 0, sequence, qstring, 'NM:i:0'
])))
else:
softclip = [
'%sS' % mapping.q_st if mapping.q_st else '',
mapping.cigar_str,
'%sS' % (len(sequence) - mapping.q_en) if len(sequence) - mapping.q_en else ''
]
fd.write("%s\n" % sep.join(map(str, [
read_id,
0 if mapping.strand == +1 else 16,
mapping.ctg,
mapping.r_st + 1,
mapping.mapq,
''.join(softclip if mapping.strand == +1 else softclip[::-1]),
'*', 0, 0,
sequence if mapping.strand == +1 else revcomp(sequence),
qstring,
'NM:i:%s' % mapping.NM,
'MD:Z:%s' % mapping.MD,
])))
fd.flush()
def summary_file():
"""
Return the filename to use for the summary tsv.
"""
stdout = realpath('/dev/fd/1')
if sys.stdout.isatty() or stdout.startswith('/proc'):
return 'summary.tsv'
return '%s_summary.tsv' % splitext(stdout)[0]
summary_field_names = [
'filename',
'read_id',
'run_id',
'channel',
'mux',
'start_time',
'duration',
'template_start',
'template_duration',
'sequence_length_template',
'mean_qscore_template',
#if alignment
'alignment_genome',
'alignment_genome_start',
'alignment_genome_end',
'alignment_strand_start',
'alignment_strand_end',
'alignment_direction',
'alignment_length',
'alignment_num_aligned',
'alignment_num_correct',
'alignment_num_insertions',
'alignment_num_deletions',
'alignment_num_substitutions',
'alignment_mapq',
'alignment_strand_coverage',
'alignment_identity',
'alignment_accuracy',
]
def summary_row(read, seqlen, qscore, alignment=False):
"""
Summary tsv row.
"""
fields = [
read.filename,
read.read_id,
read.run_id,
read.channel,
read.mux,
read.start,
read.duration,
read.template_start,
read.template_duration,
seqlen,
qscore,
]
if alignment:
ins = sum(count for count, op in alignment.cigar if op == 1)
dels = sum(count for count, op in alignment.cigar if op == 2)
subs = alignment.NM - ins - dels
length = alignment.blen
matches = length - ins - dels
correct = alignment.mlen
fields.extend([
alignment.ctg,
alignment.r_st,
alignment.r_en,
alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en,
alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st,
'+' if alignment.strand == +1 else '-',
length, matches, correct,
ins, dels, subs,
alignment.mapq,
(alignment.q_en - alignment.q_st) / seqlen,
correct / matches,
correct / length,
])
elif alignment is None:
fields.extend(
['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0]
)
return dict(zip(summary_field_names, fields))
duplex_summary_field_names = [
'filename_template',
'read_id_template',
'filename_complement',
'read_id_complement',
'run_id',
'channel_template',
'mux_template',
'channel_complement',
'mux_complement',
'sequence_length_duplex',
'mean_qscore_duplex',
#if alignment
'alignment_genome',
'alignment_genome_start',
'alignment_genome_end',
'alignment_strand_start',
'alignment_strand_end',
'alignment_direction',
'alignment_length',
'alignment_num_aligned',
'alignment_num_correct',
'alignment_num_insertions',
'alignment_num_deletions',
'alignment_num_substitutions',
'alignment_mapq',
'alignment_strand_coverage',
'alignment_identity',
'alignment_accuracy',
]
def duplex_summary_row(read_temp, comp_read, seqlen, qscore, alignment=False):
"""
Duplex summary tsv row.
"""
fields = [
read_temp.filename,
read_temp.read_id,
comp_read.filename,
comp_read.read_id,
read_temp.run_id,
read_temp.channel,
read_temp.mux,
comp_read.channel,
comp_read.mux,
seqlen,
qscore,
]
if alignment:
ins = sum(count for count, op in alignment.cigar if op == 1)
dels = sum(count for count, op in alignment.cigar if op == 2)
subs = alignment.NM - ins - dels
length = alignment.blen
matches = length - ins - dels
correct = alignment.mlen
fields.extend([
alignment.ctg,
alignment.r_st,
alignment.r_en,
alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en,
alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st,
'+' if alignment.strand == +1 else '-',
length, matches, correct,
ins, dels, subs,
alignment.mapq,
(alignment.q_en - alignment.q_st) / seqlen,
correct / matches,
correct / length,
])
elif alignment is None:
fields.extend(
['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0]
)
return dict(zip(duplex_summary_field_names, fields))
class Writer(Thread):
def __init__(self, iterator, aligner, fd=sys.stdout, fastq=False, duplex=False):
super().__init__()
self.fd = fd
self.log = []
self.fastq = fastq
self.duplex = duplex
self.aligner = aligner
self.iterator = iterator
self.write_headers()
def write_headers(self):
if self.aligner:
write_sam_header(self.aligner, fd=self.fd)
def run(self):
with CSVLogger(summary_file(), sep='\t') as summary:
for read, res in self.iterator:
seq = res['sequence']
qstring = res.get('qstring', '*')
mean_qscore = res.get('mean_qscore', 0.0)
mapping = res.get('mapping', False)
if self.duplex:
samples = len(read[0].signal) + len(read[1].signal)
read_id = '%s;%s' % (read[0].read_id, read[1].read_id)
else:
samples = len(read.signal)
read_id = read.read_id
if len(seq):
if self.aligner:
write_sam(read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None)
else:
if self.fastq:
write_fastq(read_id, seq, qstring, fd=self.fd)
else:
write_fasta(read_id, seq, fd=self.fd)
if self.duplex:
summary.append(duplex_summary_row(read[0], read[1], len(seq), mean_qscore, alignment=mapping))
else:
summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping))
self.log.append((read_id, samples))
else:
logger.warn("> skipping empty sequence %s", read_id)
class CTCWriter(Thread):
"""
CTC writer process that writes output numpy training data.
"""
def __init__(self, iterator, aligner, min_coverage, min_accuracy, fd=sys.stdout):
super().__init__()
self.fd = fd
self.log = []
self.aligner = aligner
self.iterator = iterator
self.min_coverage = min_coverage
self.min_accuracy = min_accuracy
self.write_headers()
def write_headers(self):
if self.aligner:
write_sam_header(self.aligner, fd=self.fd)
def run(self):
chunks = []
targets = []
lengths = []
with CSVLogger(summary_file(), sep='\t') as summary:
for read, ctc_data in self.iterator:
seq = ctc_data['sequence']
qstring = ctc_data['qstring']
mean_qscore = ctc_data['mean_qscore']
mapping = ctc_data.get('mapping', False)
self.log.append((read.read_id, len(read.signal)))
if len(seq) == 0 or mapping is None:
continue
cov = (mapping.q_en - mapping.q_st) / len(seq)
acc = mapping.mlen / mapping.blen
refseq = self.aligner.seq(mapping.ctg, mapping.r_st, mapping.r_en)
if acc < self.min_accuracy or cov < self.min_coverage or 'N' in refseq:
continue
write_sam(read.read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None)
summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping))
if mapping.strand == -1:
refseq = revcomp(refseq)
target = [int(x) for x in refseq.translate({65: '1', 67: '2', 71: '3', 84: '4'})]
targets.append(target)
chunks.append(read.signal)
lengths.append(len(target))
if len(chunks) == 0:
sys.stderr.write("> no suitable ctc data to write\n")
return
chunks = np.array(chunks, dtype=np.float16)
targets_ = np.zeros((chunks.shape[0], max(lengths)), dtype=np.uint8)
for idx, target in enumerate(targets): targets_[idx, :len(target)] = target
lengths = np.array(lengths, dtype=np.uint16)
indices = np.random.permutation(typical_indices(lengths))
chunks = chunks[indices]
targets_ = targets_[indices]
lengths = lengths[indices]
summary = pd.read_csv(summary_file(), sep='\t')
summary.iloc[indices].to_csv(summary_file(), sep='\t', index=False)
output_directory = '.' if sys.stdout.isatty() else dirname(realpath('/dev/fd/1'))
np.save(os.path.join(output_directory, "chunks.npy"), chunks)
np.save(os.path.join(output_directory, "references.npy"), targets_)
np.save(os.path.join(output_directory, "reference_lengths.npy"), lengths)
sys.stderr.write("> written ctc training data\n")
sys.stderr.write(" - chunks.npy with shape (%s)\n" % ','.join(map(str, chunks.shape)))
sys.stderr.write(" - references.npy with shape (%s)\n" % ','.join(map(str, targets_.shape)))
sys.stderr.write(" - reference_lengths.npy shape (%s)\n" % ','.join(map(str, lengths.shape)))
def stop(self):
self.join()
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from bonito.cli import basecaller, train, evaluate, view, convert, download, export, duplex
modules = [
'basecaller', 'train', 'evaluate', 'view', 'convert', 'download', 'export', 'duplex',
]
__version__ = '0.4.0'
def main():
parser = ArgumentParser(
'bonito',
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s {}'.format(__version__)
)
subparsers = parser.add_subparsers(
title='subcommands', description='valid commands',
help='additional help', dest='command'
)
subparsers.required = True
for module in modules:
mod = globals()[module]
p = subparsers.add_parser(module, parents=[mod.argparser()])
p.set_defaults(func=mod.main)
args = parser.parse_args()
args.func(args)
|
"""
Bonito Multiprocesing
"""
import queue
from itertools import count
from threading import Thread
from functools import partial
from collections import deque
from signal import signal, SIGINT
from multiprocessing import Process, Queue, Event, Lock, cpu_count
def process_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another process.
"""
return iter(ProcessIterator(iterator, maxsize=maxsize))
def thread_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another thread.
"""
return iter(ThreadIterator(iterator, maxsize=maxsize))
def process_cancel():
"""
Register an cancel event on sigint
"""
event = Event()
signal(SIGINT, lambda *a: event.set())
return event
def process_map(func, iterator, n_proc=4, maxsize=0):
"""
Take an `iterator` of key, value pairs and apply `func` to all values using `n_proc` processes.
"""
if n_proc == 0: return ((k, func(v)) for k, v in iterator)
return iter(ProcessMap(func, iterator, n_proc, output_queue=Queue(maxsize)))
def thread_map(func, iterator, n_thread=4, maxsize=2):
"""
Take an `iterator` of key, value pairs and apply `func` to all values using `n_thread` threads.
"""
if n_thread == 0: return ((k, func(v)) for k, v in iterator)
return iter(ThreadMap(partial(MapWorkerThread, func), iterator, n_thread, maxsize=maxsize))
class BackgroundIterator:
"""
Runs an iterator in the background.
"""
def __init__(self, iterator, maxsize=10):
super().__init__()
self.iterator = iterator
self.queue = self.QueueClass(maxsize)
def __iter__(self):
self.start()
while True:
item = self.queue.get()
if item is StopIteration:
break
yield item
def run(self):
for item in self.iterator:
self.queue.put(item)
self.queue.put(StopIteration)
def stop(self):
self.join()
class ThreadIterator(BackgroundIterator, Thread):
"""
Runs an iterator in a separate process.
"""
QueueClass = queue.Queue
class ProcessIterator(BackgroundIterator, Process):
"""
Runs an iterator in a separate process.
"""
QueueClass = Queue
class MapWorker(Process):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, func, input_queue, output_queue):
super().__init__()
self.func = func
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
item = self.input_queue.get()
if item is StopIteration:
break
k, v = item
self.output_queue.put((k, self.func(v)))
class ProcessMap(Thread):
def __init__(self, func, iterator, n_proc, output_queue=None):
super().__init__()
self.key_map = {}
self.iterator = iterator
self.work_queue = Queue(n_proc * 2)
self.output_queue = output_queue or Queue()
self.processes = [MapWorker(func, self.work_queue, self.output_queue) for _ in range(n_proc)]
def start(self):
for process in self.processes:
process.start()
super().start()
def run(self):
for (k, v) in self.iterator:
self.work_queue.put((id(k), v))
self.key_map[id(k)] = k
for _ in self.processes:
self.work_queue.put(StopIteration)
for process in self.processes:
process.join()
self.output_queue.put(StopIteration)
def __iter__(self):
self.start()
while True:
item = self.output_queue.get()
if item is StopIteration:
break
k, v = item
yield self.key_map.pop(k), v
class MapWorkerThread(Thread):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, func, input_queue=None, output_queue=None):
super().__init__()
self.func = func
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
item = self.input_queue.get()
if item is StopIteration:
self.output_queue.put(item)
break
k, v = item
self.output_queue.put((k, self.func(v)))
class ThreadMap(Thread):
def __init__(self, worker_type, iterator, n_thread, maxsize=2):
super().__init__()
self.iterator = iterator
self.n_thread = n_thread
self.work_queues = [queue.Queue(maxsize) for _ in range(n_thread)]
self.output_queues = [queue.Queue(maxsize) for _ in range(n_thread)]
self.workers = [worker_type(input_queue=in_q, output_queue=out_q) for (in_q, out_q) in zip(self.work_queues, self.output_queues)]
def start(self):
for worker in self.workers:
worker.start()
super().start()
def __iter__(self):
self.start()
for i in count():
item = self.output_queues[i % self.n_thread].get()
if item is StopIteration:
#do we need to empty output_queues in order to join worker threads?
for j in range(i + 1, i + self.n_thread):
self.output_queues[j % self.n_thread].get()
break
yield item
def run(self):
for i, (k, v) in enumerate(self.iterator):
self.work_queues[i % self.n_thread].put((k, v))
for q in self.work_queues:
q.put(StopIteration)
for worker in self.workers:
worker.join()
|
"""
Bonito train
"""
import os
import re
from glob import glob
from functools import partial
from time import perf_counter
from collections import OrderedDict
from datetime import datetime
from bonito.util import accuracy, decode_ref, permute, concat, match_names
import bonito
import torch
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from torch.optim.lr_scheduler import LambdaLR
import torch.cuda.amp as amp
class ChunkDataSet:
def __init__(self, chunks, targets, lengths):
self.chunks = np.expand_dims(chunks, axis=1)
self.targets = targets
self.lengths = lengths
def __getitem__(self, i):
return (
self.chunks[i].astype(np.float32),
self.targets[i].astype(np.int64),
self.lengths[i].astype(np.int64),
)
def __len__(self):
return len(self.lengths)
def const_schedule(y):
"""
Constant Scheduler
"""
return lambda t: y
def linear_schedule(y0, y1):
"""
Linear Scheduler
"""
return lambda t: y0 + (y1 - y0) * t
def cosine_decay_schedule(y0, y1):
"""
Cosine Decay Scheduler
"""
return lambda t: y1 + 0.5 * (y0 - y1) * (np.cos(t * np.pi) + 1.0)
def piecewise_schedule(knots, funcs):
"""
Piecewise Scheduler
"""
def f(t):
i = np.searchsorted(knots, t)
t0 = 0.0 if i == 0 else knots[i - 1]
t1 = 1.0 if i == len(knots) else knots[i]
return funcs[i]((t - t0) / (t1 - t0))
return f
def func_scheduler(optimizer, func, total_steps, warmup_steps=None, warmup_ratio=0.1, start_step=0):
"""
Learning Rate Scheduler
"""
if warmup_steps:
y0 = func(0.0)
func = piecewise_schedule(
[warmup_steps / total_steps],
[linear_schedule(warmup_ratio * y0, y0), func]
)
return LambdaLR(optimizer, (lambda step: func((step + start_step) / total_steps)))
def load_state(dirname, device, model):
"""
Load a model state dict from disk
"""
model.to(device)
weight_no = None
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if weight_files:
weight_no = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
if weight_no:
print("[picking up from epoch %s]" % weight_no)
state_dict = torch.load(
os.path.join(dirname, 'weights_%s.tar' % weight_no), map_location=device
)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
epoch = weight_no
else:
epoch = 0
return epoch
class Trainer:
def __init__(self, model, device, train_loader, valid_loader, criterion=None, use_amp=True):
self.model = model.to(device)
self.device = device
self.train_loader = train_loader
self.valid_loader = valid_loader
self.criterion = criterion or (model.seqdist.ctc_loss if hasattr(model, 'seqdist') else model.ctc_label_smoothing_loss)
self.use_amp = use_amp
self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
self.optimizer = None
def train_one_step(self, batch):
data, targets, lengths = batch
self.optimizer.zero_grad()
with amp.autocast(enabled=self.use_amp):
scores = self.model(data.to(self.device))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
if not isinstance(losses, dict):
losses = {'loss': losses}
self.scaler.scale(losses['loss']).backward()
self.scaler.unscale_(self.optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=2.0).item()
self.scaler.step(self.optimizer)
self.scaler.update()
return losses, grad_norm
def train_one_epoch(self, loss_log, lr_scheduler):
t0 = perf_counter()
chunks = 0
self.model.train()
progress_bar = tqdm(
total=len(self.train_loader), desc='[0/{}]'.format(len(self.train_loader.dataset)),
ascii=True, leave=True, ncols=100, bar_format='{l_bar}{bar}| [{elapsed}{postfix}]'
)
smoothed_loss = None
with progress_bar:
for batch in self.train_loader:
chunks += batch[0].shape[0]
losses, grad_norm = self.train_one_step(batch)
losses = {k: v.item() for k,v in losses.items()}
if lr_scheduler is not None: lr_scheduler.step()
smoothed_loss = losses['loss'] if smoothed_loss is None else (0.01 * losses['loss'] + 0.99 * smoothed_loss)
progress_bar.set_postfix(loss='%.4f' % smoothed_loss)
progress_bar.set_description("[{}/{}]".format(chunks, len(self.train_loader.dataset)))
progress_bar.update()
if loss_log is not None:
loss_log.append({'chunks': chunks, 'time': perf_counter() - t0, 'grad_norm': grad_norm, **losses})
return smoothed_loss, perf_counter() - t0
def validate_one_step(self, batch):
data, targets, lengths = batch
scores = self.model(data.to(self.device))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
losses = {k: v.item() for k, v in losses.items()} if isinstance(losses, dict) else losses.item()
if hasattr(self.model, 'decode_batch'):
seqs = self.model.decode_batch(scores)
else:
seqs = [self.model.decode(x) for x in permute(scores, 'TNC', 'NTC')]
refs = [decode_ref(target, self.model.alphabet) for target in targets]
accs = [
accuracy(ref, seq, min_coverage=0.5) if len(seq) else 0. for ref, seq in zip(refs, seqs)
]
return seqs, refs, accs, losses
def validate_one_epoch(self):
self.model.eval()
with torch.no_grad():
seqs, refs, accs, losses = zip(*(self.validate_one_step(batch) for batch in self.valid_loader))
seqs, refs, accs = (sum(x, []) for x in (seqs, refs, accs))
loss = np.mean([(x['ctc_loss'] if isinstance(x, dict) else x) for x in losses])
return loss, np.mean(accs), np.median(accs)
def init_optimizer(self, lr, **kwargs):
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=lr, **kwargs)
def get_lr_scheduler(self, epochs, last_epoch=0):
return func_scheduler(
self.optimizer, cosine_decay_schedule(1.0, 0.1), epochs * len(self.train_loader),
warmup_steps=500,
start_step=last_epoch*len(self.train_loader)
)
def fit(self, workdir, epochs=1, lr=2e-3, last_epoch=0):
if self.optimizer is None:
self.init_optimizer(lr)
lr_scheduler = self.get_lr_scheduler(epochs, last_epoch=last_epoch)
for epoch in range(1 + last_epoch, epochs + 1 + last_epoch):
try:
with bonito.io.CSVLogger(os.path.join(workdir, 'losses_{}.csv'.format(epoch))) as loss_log:
train_loss, duration = self.train_one_epoch(loss_log, lr_scheduler)
model_state = self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict()
torch.save(model_state, os.path.join(workdir, "weights_%s.tar" % epoch))
val_loss, val_mean, val_median = self.validate_one_epoch()
except KeyboardInterrupt:
break
print("[epoch {}] directory={} loss={:.4f} mean_acc={:.3f}% median_acc={:.3f}%".format(
epoch, workdir, val_loss, val_mean, val_median
))
with bonito.io.CSVLogger(os.path.join(workdir, 'training.csv')) as training_log:
training_log.append({
'time': datetime.today(),
'duration': int(duration),
'epoch': epoch,
'train_loss': train_loss,
'validation_loss': val_loss,
'validation_mean': val_mean,
'validation_median': val_median
})
|
"""
Bonito Download
"""
import os
import re
from shutil import rmtree
from zipfile import ZipFile
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.util import __data__, __models__
from bonito.cli.convert import main as convert
from bonito.cli.convert import argparser as cargparser
import requests
from tqdm import tqdm
class File:
"""
Small class for downloading models and training assets.
"""
__url__ = "https://nanoporetech.box.com/shared/static/"
def __init__(self, path, url_frag, force):
self.path = path
self.force = force
self.url = os.path.join(self.__url__, url_frag)
def location(self, filename):
return os.path.join(self.path, filename)
def exists(self, filename):
return os.path.exists(self.location(filename))
def download(self):
"""
Download the remote file
"""
# create the requests for the file
req = requests.get(self.url, stream=True)
total = int(req.headers.get('content-length', 0))
fname = re.findall('filename="([^"]+)', req.headers['content-disposition'])[0]
# skip download if local file is found
if self.exists(fname.strip('.zip')) and not self.force:
print("[skipping %s]" % fname)
return
if self.exists(fname.strip('.zip')) and self.force:
rmtree(self.location(fname.strip('.zip')))
# download the file
with tqdm(total=total, unit='iB', ascii=True, ncols=100, unit_scale=True, leave=False) as t:
with open(self.location(fname), 'wb') as f:
for data in req.iter_content(1024):
f.write(data)
t.update(len(data))
print("[downloaded %s]" % fname)
# unzip .zip files
if fname.endswith('.zip'):
with ZipFile(self.location(fname), 'r') as zfile:
zfile.extractall(self.path)
os.remove(self.location(fname))
# convert chunkify training files to bonito
if fname.endswith('.hdf5'):
print("[converting %s]" % fname)
args = cargparser().parse_args([
self.location(fname),
self.location(fname).strip('.hdf5')
])
convert(args)
r9_models = [
"n8c07gc9ro09zt0ivgcoeuz6krnwsnf6.zip", # dna_r9.4.1@v1
"nas0uhf46fd1lh2jndhx2a54a9vvhxp4.zip", # dna_r9.4.1@v2
"1wodp3ur4jhvqvu5leowfg6lrw54jxp2.zip", # dna_r9.4.1@v3
"uetgwsnb8yfqvuyoka8p09mxilgskqc7.zip", # [email protected]
"47t2y48zw4waly25lmzx6sagf4bbbqqz.zip", # [email protected]
"hrv649cvx8lvomu1u0tsd47e5u2bbabt.zip", # [email protected]
"arqi4qwcj9btsd6bbjsnlbai0s6dg8yd.zip",
]
r10_models = [
"e70s615lh3i24rkhz006i0e4u4m8y2xa.zip", # dna_r10.3_q20ea
"hnr5mwlm8vmdsfpvn5fsxn3mvhbucy5f.zip", # dna_r10.3@v3
"yesf11tisfrncmod5hj2xtx9kbdveuqt.zip", # [email protected]
"ci6xdu7d4wczmhorhw1sweyg4gczx97t.zip", # [email protected]
"4cunv5z7nwjag7v2bun0g7vk2lf8rqnc.zip",
]
training = [
"cmh91cxupa0are1kc3z9aok425m75vrb.hdf5",
]
def main(args):
"""
Download models and training sets
"""
if args.models or args.all:
print("[downloading models]")
for model in r9_models[-1 if args.latest else 0:]:
File(__models__, model, args.force).download()
for model in r10_models[-1 if args.latest else 0:]:
File(__models__, model, args.force).download()
if args.training or args.all:
print("[downloading training data]")
for train in training:
File(__data__, train, args.force).download()
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
group = parser.add_mutually_exclusive_group()
group.add_argument('--all', action='store_true')
group.add_argument('--models', action='store_true')
group.add_argument('--training', action='store_true')
parser.add_argument('-f', '--force', action='store_true')
parser.add_argument('--latest', action='store_true')
return parser
|
#!/usr/bin/env python
"""
Convert a Taiyaki chunkify training file to set of Bonito CTC .npy files
"""
import os
import h5py
import random
import numpy as np
from argparse import ArgumentParser
from collections import OrderedDict
from itertools import islice as take
from argparse import ArgumentDefaultsHelpFormatter
from tqdm import tqdm
from bonito.training import ChunkDataSet
def align(samples, pointers, reference):
""" align to the start of the mapping """
squiggle_duration = len(samples)
mapped_off_the_start = len(pointers[pointers < 0])
mapped_off_the_end = len(pointers[pointers >= squiggle_duration])
pointers = pointers[mapped_off_the_start:len(pointers) - mapped_off_the_end]
reference = reference[mapped_off_the_start:len(reference) - mapped_off_the_end]
return samples[pointers[0]:pointers[-1]], pointers - pointers[0], reference
def scale(read, normalise=True):
""" scale and normalise a read """
samples = read['Dacs'][:]
scaling = read.attrs['range'] / read.attrs['digitisation']
scaled = (scaling * (samples + read.attrs['offset'])).astype(np.float32)
if normalise:
return (scaled - read.attrs['shift_frompA']) / read.attrs['scale_frompA']
return scaled
def pad_lengths(ragged_array, max_len=None):
lengths = np.array([len(x) for x in ragged_array], dtype=np.uint16)
padded = np.zeros((len(ragged_array), max_len or np.max(lengths)), dtype=ragged_array[0].dtype)
for x, y in zip(ragged_array, padded):
y[:len(x)] = x
return padded, lengths
def regular_break_points(n, chunk_len, overlap=0, align='mid'):
num_chunks, remainder = divmod(n - overlap, chunk_len - overlap)
start = {'left': 0, 'mid': remainder // 2, 'right': remainder}[align]
starts = np.arange(start, start + num_chunks*(chunk_len - overlap), (chunk_len - overlap))
return np.vstack([starts, starts + chunk_len]).T
def get_chunks(read, break_points):
sample = scale(read)
pointers = read['Ref_to_signal'][:]
target = read['Reference'][:] + 1 # CTC convention
return (
(sample[i:j], target[ti:tj]) for (i, j), (ti, tj)
in zip(break_points, np.searchsorted(pointers, break_points))
)
def chunk_dataset(reads, chunk_len, num_chunks=None):
all_chunks = (
(chunk, target) for read in reads for chunk, target in
get_chunks(reads[read], regular_break_points(len(reads[read]['Dacs']), chunk_len))
)
chunks, targets = zip(*tqdm(take(all_chunks, num_chunks), total=num_chunks))
targets, target_lens = pad_lengths(targets) # convert refs from ragged arrray
return ChunkDataSet(chunks, targets, target_lens)
def validation_split(reads, num_valid=1000):
reads = np.random.permutation(sorted(reads.items()))
return OrderedDict(reads[:-num_valid]), OrderedDict(reads[-num_valid:])
def typical_indices(x, n=2.5):
mu, sd = np.mean(x), np.std(x)
idx, = np.where((mu - n*sd < x) & (x < mu + n*sd))
return idx
def filter_chunks(ds, idx):
filtered = ChunkDataSet(ds.chunks.squeeze(1)[idx], ds.targets[idx], ds.lengths[idx])
filtered.targets = filtered.targets[:, :filtered.lengths.max()]
return filtered
def save_chunks(chunks, output_directory):
os.makedirs(output_directory, exist_ok=True)
np.save(os.path.join(output_directory, "chunks.npy"), chunks.chunks.squeeze(1))
np.save(os.path.join(output_directory, "references.npy"), chunks.targets)
np.save(os.path.join(output_directory, "reference_lengths.npy"), chunks.lengths)
print()
print("> data written to %s:" % output_directory)
print(" - chunks.npy with shape", chunks.chunks.squeeze(1).shape)
print(" - references.npy with shape", chunks.targets.shape)
print(" - reference_lengths.npy shape", chunks.lengths.shape)
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
reads = h5py.File(args.chunkify_file, 'r')['Reads']
training, validation = validation_split(reads, args.validation_reads)
print("> preparing training chunks\n")
training_chunks = chunk_dataset(training, args.chunksize)
training_indices = typical_indices(training_chunks.lengths)
training_chunks = filter_chunks(training_chunks, np.random.permutation(training_indices))
save_chunks(training_chunks, args.output_directory)
print("\n> preparing validation chunks\n")
validation_chunks = chunk_dataset(validation, args.chunksize)
validation_indices = typical_indices(validation_chunks.lengths)
validation_chunks = filter_chunks(validation_chunks, validation_indices)
save_chunks(validation_chunks, os.path.join(args.output_directory, "validation"))
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("chunkify_file")
parser.add_argument("output_directory")
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--chunksize", default=3600, type=int)
parser.add_argument("--validation-reads", default=1000, type=int)
return parser
|
"""
Bonito Export
"""
import os
import re
import sys
import json
import torch
import bonito
import hashlib
import numpy as np
from glob import glob
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, torch.nn.Parameter):
return obj.data
elif isinstance(obj, torch.Tensor):
return obj.detach().numpy()
else:
return super(JsonEncoder, self).default(obj)
def file_md5(filename, nblock=1024):
"""
Get md5 string from file.
"""
hasher = hashlib.md5()
block_size = nblock * hasher.block_size
with open(filename, "rb") as fh:
for blk in iter((lambda: fh.read(block_size)), b""):
hasher.update(blk)
return hasher.hexdigest()
def reformat_output_layer(layer_dict):
n_base, state_len, blank_score = [layer_dict.pop(k) for k in ['n_base', 'state_len', 'blank_score']]
layer_dict['size'] = (n_base + 1) * n_base**state_len
layer_dict['type'] = 'GlobalNormTransducer'
if blank_score is not None:
assert layer_dict['activation'] == 'tanh'
params = layer_dict['params']
params['W'] = torch.nn.functional.pad(
params['W'].reshape([n_base**state_len, n_base, -1]),
(0, 0, 1, 0),
value=0.
).reshape((n_base + 1) * n_base**state_len, -1)
params['b'] = torch.nn.functional.pad(
params['b'].reshape(n_base**state_len, n_base),
(1, 0),
value=np.arctanh(blank_score / layer_dict['scale'])
).reshape(-1)
return layer_dict
def to_guppy_dict(model, include_weights=True):
guppy_dict = bonito.nn.to_dict(model.encoder, include_weights=include_weights)
guppy_dict['sublayers'] = [x for x in guppy_dict['sublayers'] if x['type'] != 'permute']
guppy_dict['sublayers'] = [dict(x, type='LSTM', activation='tanh', gate='sigmoid') if x['type'] == 'lstm' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [dict(x, padding=(x['padding'], x['padding'])) if x['type'] == 'convolution' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [{'type': 'reverse', 'sublayers': x} if x.pop('reverse', False) else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'][-1] = reformat_output_layer(guppy_dict['sublayers'][-1])
return guppy_dict
def main(args):
if not os.path.isdir(args.model):
print("[error] file given - please provide a model directory to export.", file=sys.stderr)
return 1
model = bonito.util.load_model(args.model, device='cpu')
jsn = to_guppy_dict(model)
weight_files = glob(os.path.join(args.model, "weights_*.tar"))
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
jsn["md5sum"] = file_md5(os.path.join(args.model, 'weights_%s.tar' % weights))
json.dump(jsn, sys.stdout, cls=JsonEncoder)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument('model')
return parser
|
"""
Bonito model viewer - display a model architecture for a given config.
"""
import toml
import argparse
from bonito.util import load_symbol
def main(args):
config = toml.load(args.config)
Model = load_symbol(config, "Model")
model = Model(config)
print(model)
print("Total parameters in model", sum(p.numel() for p in model.parameters()))
def argparser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("config")
return parser
|
"""
Bonito Basecaller
"""
import sys
import torch
import numpy as np
from tqdm import tqdm
from time import perf_counter
from datetime import timedelta
from itertools import islice as take
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.aligner import Aligner
from bonito.io import CTCWriter, Writer
from bonito.fast5 import get_reads, read_chunks
from bonito.multiprocessing import process_cancel
from bonito.util import column_to_set, load_symbol, load_model
def main(args):
if args.save_ctc and not args.reference:
sys.stderr.write("> a reference is needed to output ctc training data\n")
exit(1)
sys.stderr.write("> loading model\n")
model = load_model(args.model_directory, args.device, weights=int(args.weights))
if args.reference:
sys.stderr.write("> loading reference\n")
aligner = Aligner(args.reference, preset='ont-map', best_n=1)
if not aligner:
sys.stderr.write("> failed to load/build index\n")
exit(1)
else:
aligner = None
reads = get_reads(
args.reads_directory, n_proc=8, recursive=args.recursive,
read_ids=column_to_set(args.read_ids), skip=args.skip,
cancel=process_cancel()
)
if args.max_reads:
reads = take(reads, args.max_reads)
basecall = load_symbol(args.model_directory, "basecall")
if args.save_ctc:
reads = (
chunk for read in reads for chunk in read_chunks(read, chunksize=args.chunksize)
)
basecalls = basecall(
model, reads, batchsize=64, chunksize=args.chunksize,
aligner=aligner, qscores=args.fastq, reverse=args.revcomp,
)
writer = CTCWriter(
tqdm(basecalls, desc="> calling", unit=" reads", leave=False),
aligner, args.ctc_min_coverage, args.ctc_min_accuracy
)
else:
basecalls = basecall(
model, reads, aligner=aligner, reverse=args.revcomp,
qscores=args.fastq, batchsize=args.batchsize, chunksize=args.chunksize,
)
writer = Writer(
tqdm(basecalls, desc="> calling", unit=" reads", leave=False),
aligner, fastq=args.fastq
)
t0 = perf_counter()
writer.start()
writer.join()
duration = perf_counter() - t0
num_samples = sum(num_samples for read_id, num_samples in writer.log)
sys.stderr.write("> completed reads: %s\n" % len(writer.log))
sys.stderr.write("> duration: %s\n" % timedelta(seconds=np.round(duration)))
sys.stderr.write("> samples per second %.1E\n" % (num_samples / duration))
sys.stderr.write("> done\n")
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("reads_directory")
parser.add_argument("--reference")
parser.add_argument("--read-ids")
parser.add_argument("--device", default="cuda")
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--skip", action="store_true", default=False)
parser.add_argument("--fastq", action="store_true", default=False)
parser.add_argument("--save-ctc", action="store_true", default=False)
parser.add_argument("--revcomp", action="store_true", default=False)
parser.add_argument("--recursive", action="store_true", default=False)
parser.add_argument("--ctc-min-coverage", default=0.9, type=float)
parser.add_argument("--ctc-min-accuracy", default=0.9, type=float)
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--chunksize", default=4000, type=int)
parser.add_argument("--max-reads", default=0, type=int)
return parser
|
"""
Bonito Duplex consensus decoding.
https://www.biorxiv.org/content/10.1101/2020.02.25.956771v1
"""
import os
import sys
import json
from glob import glob
from pathlib import Path
from os.path import basename
from functools import partial
from time import perf_counter
from datetime import timedelta
from multiprocessing import Pool
from itertools import islice, groupby
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import Process, Queue, Lock, cpu_count
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import spoa
import torch
import parasail
import numpy as np
import pandas as pd
from tqdm import tqdm
from fast_ctc_decode import crf_beam_search, crf_beam_search_duplex
from genomeworks import cuda
from genomeworks.cudapoa import CudaPoaBatch, status_to_str
import bonito
from bonito.io import Writer, devnull
from bonito.aligner import Aligner, align_map
from bonito.util import load_model, half_supported
from bonito.crf.basecall import transfer, split_read, stitch
from bonito.fast5 import get_raw_data_for_read, get_fast5_file
from bonito.util import unbatchify, batchify, chunk, concat, accuracy
from bonito.multiprocessing import thread_map, process_map, process_cancel
def poagen(groups, gpu_percent=0.8):
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch = gpu_percent * free
max_seq_sz = 0
max_sequences_per_poa = 0
for group in groups:
longest_seq = len(max(group, key=len))
max_seq_sz = longest_seq if longest_seq > max_seq_sz else max_seq_sz
seq_in_poa = len(group)
max_sequences_per_poa = seq_in_poa if seq_in_poa > max_sequences_per_poa else max_sequences_per_poa
batch = CudaPoaBatch(
max_sequences_per_poa,
max_seq_sz,
gpu_mem_per_batch,
output_type="consensus",
cuda_banded_alignment=True,
alignment_band_width=256,
)
poa_index = 0
initial_count = 0
while poa_index < len(groups):
group = groups[poa_index]
group_status, seq_status = batch.add_poa_group(group)
# If group was added and more space is left in batch, continue onto next group.
if group_status == 0:
for seq_index, status in enumerate(seq_status):
if status != 0:
print("Could not add sequence {} to POA {} - error {}".format(seq_index, poa_index, status_to_str(status)), file=sys.stderr)
poa_index += 1
# Once batch is full or no groups are left, run POA processing.
if ((group_status == 1) or ((group_status == 0) and (poa_index == len(groups)))):
batch.generate_poa()
consensus, coverage, con_status = batch.get_consensus()
for p, status in enumerate(con_status):
if status != 0:
print("Could not get consensus for POA group {} - {}".format(initial_count + p, status_to_str(status)), file=sys.stderr)
yield from consensus
initial_count = poa_index
batch.reset()
# In the case where POA group wasn't processed correctly.
elif group_status != 0:
print("Could not add POA group {} to batch - {}".format(poa_index, status_to_str(group_status)), file=sys.stderr)
poa_index += 1
def get_read(readdir, summary, idx):
"""
Get a single read from row `idx` in the `summary` dataframe.
"""
return get_raw_data_for_read(
(readdir / summary.iloc[idx].filename_fast5, summary.iloc[idx].read_id)
)
def read_gen(directory, summary, n_proc=1, cancel=None):
"""
Generate reads from the given `directory` listed in the `summary` dataframe.
"""
with Pool(n_proc) as pool:
for read in pool.imap(partial(get_read, Path(directory), summary), range(len(summary))):
yield read
if cancel is not None and cancel.is_set():
return
def get_read_ids(filename):
"""
Return a dictionary of read_id -> filename mappings.
"""
with get_fast5_file(filename, 'r') as f5:
return {
read.read_id: basename(filename) for read in f5.get_reads()
}
def build_index(files, n_proc=1):
"""
Build an index of read ids to filename mappings
"""
index = {}
with ProcessPoolExecutor(max_workers=n_proc) as pool:
for res in tqdm(pool.map(get_read_ids, files), leave=False):
index.update(res)
return index
def build_envelope(len1, seq1, path1, len2, seq2, path2, padding=15):
# needleman-wunsch alignment with constant gap penalty.
aln = parasail.nw_trace_striped_32(seq2, seq1, 2, 2, parasail.dnafull)
# pair up positions
alignment = np.column_stack([
np.cumsum([x != '-' for x in aln.traceback.ref]) - 1,
np.cumsum([x != '-' for x in aln.traceback.query]) - 1
])
path_range1 = np.column_stack([path1, path1[1:] + [len1]])
path_range2 = np.column_stack([path2, path2[1:] + [len2]])
envelope = np.full((len1, 2), -1, dtype=int)
for idx1, idx2 in alignment.clip(0):
st_1, en_1 = path_range1[idx1]
st_2, en_2 = path_range2[idx2]
for idx in range(st_1, en_1):
if st_2 < envelope[idx, 0] or envelope[idx, 0] < 0:
envelope[idx, 0] = st_2
if en_2 > envelope[idx, 1] or envelope[idx, 1] < 0:
envelope[idx, 1] = en_2
# add a little padding to ensure some overlap
envelope[:, 0] = envelope[:, 0] - padding
envelope[:, 1] = envelope[:, 1] + padding
envelope = np.clip(envelope, 0, len2)
prev_end = 0
for i in range(envelope.shape[0]):
if envelope[i, 0] > envelope[i, 1]:
envelope[i, 0] = 0
if envelope[i, 0] > prev_end:
envelope[i, 0] = prev_end
prev_end = envelope[i, 1]
return envelope.astype(np.uint64)
def find_follow_on(df, gap=5, distance=51, cov=0.85, min_len=100):
"""
Find follow on reads from a sequencing summary file.
"""
df = df[
df.alignment_coverage.astype('float32').gt(cov) &
df.sequence_length_template.astype('int32').gt(min_len)
]
df = df.sort_values(['run_id', 'channel', 'mux', 'start_time'])
genome_start = np.array(df.alignment_genome_start, dtype=np.int32)
genome_end = np.array(df.alignment_genome_end, dtype=np.int32)
direction = np.array(df.alignment_direction)
start_time = np.array(df.start_time, dtype=np.float32)
end_time = np.array(df.start_time + df.duration, dtype=np.float32)
channel = np.array(df.channel, dtype=np.int32)
mux = np.array(df.mux, dtype=np.int32)
filt = (
(channel[1:] == channel[:-1]) &
(mux[1:] == mux[:-1]) &
(np.abs(genome_start[1:] - genome_start[:-1]) < distance) &
(np.abs(genome_end[1:] - genome_end[:-1]) < distance) &
(direction[1:] != direction[:-1]) &
(start_time[1:] - end_time[:-1] < gap)
)
mask = np.full(len(filt) + 1, False)
mask[:-1] = mask[:-1] | filt
mask[1:] = mask[1:] | filt
return df[mask]
def compute_scores(model, batch, reverse=False):
with torch.no_grad():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model.encoder(batch.to(dtype).to(device))
if reverse: scores = model.seqdist.reverse_complement(scores)
betas = model.seqdist.backward_scores(scores.to(torch.float32))
trans, init = model.seqdist.compute_transition_probs(scores, betas)
return {
'trans': trans.to(dtype).transpose(0, 1),
'init': init.to(dtype).unsqueeze(1),
}
def basecall(model, reads, chunksize=4000, overlap=500, batchsize=32, reverse=False):
reads = (
read_chunk for read in reads
for read_chunk in split_read(read, chunksize * batchsize)[::-1 if reverse else 1]
)
chunks = (
((read, start, end),
chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap))
for (read, start, end) in reads
)
batches = (
(k, compute_scores(model, batch, reverse=reverse))
for k, batch in batchify(chunks, batchsize=batchsize)
)
stitched = (
(read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse))
for ((read, start, end), x) in unbatchify(batches)
)
transferred = thread_map(transfer, stitched, n_thread=1)
return (
(read, concat([part for k, part in parts]))
for read, parts in groupby(transferred, lambda x: x[0])
)
def beam_search_duplex(seq1, path1, t1, b1, seq2, path2, t2, b2, alphabet='NACGT', beamsize=5, pad=40, T=0.01):
env = build_envelope(t1.shape[0], seq1, path1, t2.shape[0], seq2, path2, padding=pad)
return crf_beam_search_duplex(
t1, b1, t2, b2,
alphabet=alphabet,
beam_size=beamsize,
beam_cut_threshold=T,
envelope=env,
)
def decode(res, beamsize_1=5, pad_1=40, cut_1=0.01, beamsize_2=5, pad_2=40, cut_2=0.01, match=80, alphabet="NACGT"):
temp_probs, init1 = res[0]['trans'].astype(np.float32), res[0]['init'][0].astype(np.float32)
comp_probs, init2 = res[1]['trans'].astype(np.float32), res[1]['init'][0].astype(np.float32)
simplex1, path1 = crf_beam_search(temp_probs, init1, alphabet, beam_size=5, beam_cut_threshold=0.01)
simplex2, path2 = crf_beam_search(comp_probs, init2, alphabet, beam_size=5, beam_cut_threshold=0.01)
if len(simplex1) < 10 or len(simplex2) < 10:
return [simplex1, simplex2]
if accuracy(simplex1, simplex2) < match:
return [simplex1, simplex2]
duplex1 = beam_search_duplex(
simplex1, path1, temp_probs, init1, simplex2, path2, comp_probs, init2, pad=pad_1, beamsize=5, T=cut_1
)
duplex2 = beam_search_duplex(
simplex2, path2, comp_probs, init2, simplex1, path1, temp_probs, init1, pad=pad_2, beamsize=5, T=cut_2
)
return [duplex1, duplex2, simplex1, simplex2]
def poa(seqs, allseq=False):
con, msa = spoa.poa(seqs, genmsa=False)
if allseq: return (con, *seqs)
return (con, )
def call(model, reads_directory, templates, complements, aligner=None, cudapoa=True):
temp_reads = read_gen(reads_directory, templates, n_proc=8, cancel=process_cancel())
comp_reads = read_gen(reads_directory, complements, n_proc=8, cancel=process_cancel())
temp_scores = basecall(model, temp_reads, reverse=False)
comp_scores = basecall(model, comp_reads, reverse=True)
scores = (((r1, r2), (s1, s2)) for (r1, s1), (r2, s2) in zip(temp_scores, comp_scores))
calls = thread_map(decode, scores, n_thread=12)
if cudapoa:
sequences = ((reads, [seqs, ]) for reads, seqs in calls if len(seqs) > 2)
consensus = (zip(reads, poagen(calls)) for reads, calls in batchify(sequences, 100))
res = ((reads[0], {'sequence': seq}) for seqs in consensus for reads, seq in seqs)
else:
sequences = ((reads, seqs) for reads, seqs in calls if len(seqs) > 2)
consensus = process_map(poa, sequences, n_proc=4)
res = ((reads, {'sequence': seq}) for reads, seqs in consensus for seq in seqs)
if aligner is None: return res
return align_map(aligner, res)
def main(args):
sys.stderr.write("> loading model\n")
model = load_model(args.model, args.device)
if args.reference:
sys.stderr.write("> loading reference\n")
aligner = Aligner(args.reference, preset='ont-map')
if not aligner:
sys.stderr.write("> failed to load/build index\n")
exit(1)
else:
aligner = None
if args.summary:
sys.stderr.write("> finding follow on strands\n")
pairs = pd.read_csv(args.summary, '\t', low_memory=False)
pairs = pairs[pairs.sequence_length_template.gt(0)]
if 'filename' in pairs.columns:
pairs = pairs.rename(columns={'filename': 'filename_fast5'})
if 'alignment_strand_coverage' in pairs.columns:
pairs = pairs.rename(columns={'alignment_strand_coverage': 'alignment_coverage'})
valid_fast5s = [
f for f in pairs.filename_fast5.unique()
if ((args.reads_directory / Path(f)).exists())
]
pairs = pairs[pairs.filename_fast5.isin(valid_fast5s)]
pairs = find_follow_on(pairs)
sys.stderr.write("> found %s follow strands in summary\n" % (len(pairs) // 2))
if args.max_reads > 0: pairs = pairs.head(args.max_reads)
temp_reads = pairs.iloc[0::2]
comp_reads = pairs.iloc[1::2]
else:
if args.index is not None:
sys.stderr.write("> loading read index\n")
index = json.load(open(args.index, 'r'))
else:
sys.stderr.write("> building read index\n")
files = list(glob(os.path.join(args.reads_directory, '*.fast5')))
index = build_index(files, n_proc=8)
if args.save_index:
with open('bonito-read-id.idx', 'w') as f:
json.dump(index, f)
pairs = pd.read_csv(args.pairs, sep=args.sep, names=['read_1', 'read_2'])
if args.max_reads > 0: pairs = pairs.head(args.max_reads)
pairs['file_1'] = pairs['read_1'].apply(index.get)
pairs['file_2'] = pairs['read_2'].apply(index.get)
pairs = pairs.dropna().reset_index()
temp_reads = pairs[['read_1', 'file_1']].rename(
columns={'read_1': 'read_id', 'file_1': 'filename_fast5'}
)
comp_reads = pairs[['read_2', 'file_2']].rename(
columns={'read_2': 'read_id', 'file_2': 'filename_fast5'}
)
if len(pairs) == 0:
print("> no matched pairs found in given directory", file=sys.stderr)
exit(1)
# https://github.com/clara-parabricks/GenomeWorks/issues/648
with devnull(): CudaPoaBatch(1000, 1000, 3724032)
basecalls = call(model, args.reads_directory, temp_reads, comp_reads, aligner=aligner)
writer = Writer(tqdm(basecalls, desc="> calling", unit=" reads", leave=False), aligner, duplex=True)
t0 = perf_counter()
writer.start()
writer.join()
duration = perf_counter() - t0
num_samples = sum(num_samples for read_id, num_samples in writer.log)
print("> duration: %s" % timedelta(seconds=np.round(duration)), file=sys.stderr)
print("> samples per second %.1E" % (num_samples / duration), file=sys.stderr)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model")
parser.add_argument("reads_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument("--summary", default=None)
group.add_argument("--pairs", default=None)
parser.add_argument("--sep", default=' ')
parser.add_argument("--index", default=None)
parser.add_argument("--save-index", action="store_true", default=False)
parser.add_argument("--reference")
parser.add_argument("--device", default="cuda")
parser.add_argument("--max-reads", default=0, type=int)
return parser
|
#!/usr/bin/env python3
"""
Bonito training.
"""
import os
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
from bonito.util import __models__, default_config, default_data
from bonito.util import load_data, load_model, load_symbol, init, half_supported
from bonito.training import ChunkDataSet, load_state, Trainer
import toml
import torch
import numpy as np
from torch.optim import AdamW
from torch.utils.data import DataLoader
def main(args):
workdir = os.path.expanduser(args.training_directory)
if os.path.exists(workdir) and not args.force:
print("[error] %s exists, use -f to force continue training." % workdir)
exit(1)
init(args.seed, args.device)
device = torch.device(args.device)
print("[loading data]")
train_data = load_data(limit=args.chunks, directory=args.directory)
if os.path.exists(os.path.join(args.directory, 'validation')):
valid_data = load_data(directory=os.path.join(args.directory, 'validation'))
else:
print("[validation set not found: splitting training set]")
split = np.floor(len(train_data[0]) * 0.97).astype(np.int32)
valid_data = [x[split:] for x in train_data]
train_data = [x[:split] for x in train_data]
train_loader = DataLoader(ChunkDataSet(*train_data), batch_size=args.batch, shuffle=True, num_workers=4, pin_memory=True)
valid_loader = DataLoader(ChunkDataSet(*valid_data), batch_size=args.batch, num_workers=4, pin_memory=True)
if args.pretrained:
dirname = args.pretrained
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
config_file = os.path.join(dirname, 'config.toml')
else:
config_file = args.config
config = toml.load(config_file)
argsdict = dict(training=vars(args))
os.makedirs(workdir, exist_ok=True)
toml.dump({**config, **argsdict}, open(os.path.join(workdir, 'config.toml'), 'w'))
print("[loading model]")
if args.pretrained:
print("[using pretrained model {}]".format(args.pretrained))
model = load_model(args.pretrained, device, half=False)
else:
model = load_symbol(config, 'Model')(config)
last_epoch = load_state(workdir, args.device, model)
if args.multi_gpu:
from torch.nn import DataParallel
model = DataParallel(model)
model.decode = model.module.decode
model.alphabet = model.module.alphabet
trainer = Trainer(model, device, train_loader, valid_loader, use_amp=half_supported() and not args.no_amp)
trainer.fit(workdir, args.epochs, args.lr, last_epoch=last_epoch)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("training_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument('--config', default=default_config)
group.add_argument('--pretrained', default="")
parser.add_argument("--directory", default=default_data)
parser.add_argument("--device", default="cuda")
parser.add_argument("--lr", default=2e-3, type=float)
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--epochs", default=5, type=int)
parser.add_argument("--batch", default=64, type=int)
parser.add_argument("--chunks", default=0, type=int)
parser.add_argument("--no-amp", action="store_true", default=False)
parser.add_argument("--multi-gpu", action="store_true", default=False)
parser.add_argument("-f", "--force", action="store_true", default=False)
return parser
|
"""
Bonito model evaluator
"""
import os
import time
import torch
import numpy as np
from itertools import starmap
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bonito.training import ChunkDataSet
from bonito.util import accuracy, poa, decode_ref, half_supported
from bonito.util import init, load_data, load_model, concat, permute
from torch.utils.data import DataLoader
def main(args):
poas = []
init(args.seed, args.device)
print("* loading data")
directory = args.directory
if os.path.exists(os.path.join(directory, 'validation')):
directory = os.path.join(directory, 'validation')
testdata = ChunkDataSet(
*load_data(
limit=args.chunks, directory=directory
)
)
dataloader = DataLoader(testdata, batch_size=args.batchsize)
accuracy_with_cov = lambda ref, seq: accuracy(ref, seq, min_coverage=args.min_coverage)
for w in [int(i) for i in args.weights.split(',')]:
seqs = []
print("* loading model", w)
model = load_model(args.model_directory, args.device, weights=w)
print("* calling")
t0 = time.perf_counter()
with torch.no_grad():
for data, *_ in dataloader:
if half_supported():
data = data.type(torch.float16).to(args.device)
else:
data = data.to(args.device)
log_probs = model(data)
if hasattr(model, 'decode_batch'):
seqs.extend(model.decode_batch(log_probs))
else:
seqs.extend([model.decode(p) for p in permute(log_probs, 'TNC', 'NTC')])
duration = time.perf_counter() - t0
refs = [decode_ref(target, model.alphabet) for target in dataloader.dataset.targets]
accuracies = [accuracy_with_cov(ref, seq) if len(seq) else 0. for ref, seq in zip(refs, seqs)]
if args.poa: poas.append(sequences)
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
print("* samples/s %.2E" % (args.chunks * data.shape[2] / duration))
if args.poa:
print("* doing poa")
t0 = time.perf_counter()
# group each sequence prediction per model together
poas = [list(seq) for seq in zip(*poas)]
consensuses = poa(poas)
duration = time.perf_counter() - t0
accuracies = list(starmap(accuracy_with_coverage_filter, zip(references, consensuses)))
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("--directory", default=None)
parser.add_argument("--device", default="cuda")
parser.add_argument("--seed", default=9, type=int)
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--chunks", default=1000, type=int)
parser.add_argument("--batchsize", default=96, type=int)
parser.add_argument("--beamsize", default=5, type=int)
parser.add_argument("--poa", action="store_true", default=False)
parser.add_argument("--min-coverage", default=0.5, type=float)
return parser
|
from .model import Model
from .basecall import basecall
|
"""
Bonito CTC-CRF Model.
"""
import torch
import numpy as np
from bonito.nn import Module, Convolution, SHABlock, LinearCRFEncoder, Serial, Permute, layers, from_dict
import seqdist.sparse
from seqdist.ctc_simple import logZ_cupy, viterbi_alignments
from seqdist.core import SequenceDist, Max, Log, semiring
def get_stride(m):
if hasattr(m, 'stride'):
return m.stride if isinstance(m.stride, int) else m.stride[0]
if isinstance(m, Convolution):
return get_stride(m.conv)
if isinstance(m, Serial):
return int(np.prod([get_stride(x) for x in m]))
return 1
class CTC_CRF(SequenceDist):
def __init__(self, state_len, alphabet):
super().__init__()
self.alphabet = alphabet
self.state_len = state_len
self.n_base = len(alphabet[1:])
self.idx = torch.cat([
torch.arange(self.n_base**(self.state_len))[:, None],
torch.arange(
self.n_base**(self.state_len)
).repeat_interleave(self.n_base).reshape(self.n_base, -1).T
], dim=1).to(torch.int32)
def n_score(self):
return len(self.alphabet) * self.n_base**(self.state_len)
def logZ(self, scores, S:semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, len(self.alphabet))
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.logZ(Ms, self.idx, alpha_0, beta_T, S)
def normalise(self, scores):
return (scores - self.logZ(scores)[:, None] / len(scores))
def forward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.fwd_scores_cupy(Ms, self.idx, alpha_0, S, K=1)
def backward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.bwd_scores_cupy(Ms, self.idx, beta_T, S, K=1)
def compute_transition_probs(self, scores, betas):
T, N, C = scores.shape
# add bwd scores to edge scores
log_trans_probs = (scores.reshape(T, N, -1, self.n_base + 1) + betas[1:, :, :, None])
# transpose from (new_state, dropped_base) to (old_state, emitted_base) layout
log_trans_probs = torch.cat([
log_trans_probs[:, :, :, [0]],
log_trans_probs[:, :, :, 1:].transpose(3, 2).reshape(T, N, -1, self.n_base)
], dim=-1)
# convert from log probs to probs by exponentiating and normalising
trans_probs = torch.softmax(log_trans_probs, dim=-1)
#convert first bwd score to initial state probabilities
init_state_probs = torch.softmax(betas[0], dim=-1)
return trans_probs, init_state_probs
def reverse_complement(self, scores):
T, N, C = scores.shape
expand_dims = T, N, *(self.n_base for _ in range(self.state_len)), self.n_base + 1
scores = scores.reshape(*expand_dims)
blanks = torch.flip(scores[..., 0].permute(
0, 1, *range(self.state_len + 1, 1, -1)).reshape(T, N, -1, 1), [0, 2]
)
emissions = torch.flip(scores[..., 1:].permute(
0, 1, *range(self.state_len, 1, -1),
self.state_len +2,
self.state_len + 1).reshape(T, N, -1, self.n_base), [0, 2, 3]
)
return torch.cat([blanks, emissions], dim=-1).reshape(T, N, -1)
def viterbi(self, scores):
traceback = self.posteriors(scores, Max)
paths = traceback.argmax(2) % len(self.alphabet)
return paths
def path_to_str(self, path):
alphabet = np.frombuffer(''.join(self.alphabet).encode(), dtype='u1')
seq = alphabet[path[path != 0]]
return seq.tobytes().decode()
def prepare_ctc_scores(self, scores, targets):
# convert from CTC targets (with blank=0) to zero indexed
targets = torch.clamp(targets - 1, 0)
T, N, C = scores.shape
scores = scores.to(torch.float32)
n = targets.size(1) - (self.state_len - 1)
stay_indices = sum(
targets[:, i:n + i] * self.n_base ** (self.state_len - i - 1)
for i in range(self.state_len)
) * len(self.alphabet)
move_indices = stay_indices[:, 1:] + targets[:, :n - 1] + 1
stay_scores = scores.gather(2, stay_indices.expand(T, -1, -1))
move_scores = scores.gather(2, move_indices.expand(T, -1, -1))
return stay_scores, move_scores
def ctc_loss(self, scores, targets, target_lengths, loss_clip=None, reduction='mean', normalise_scores=True):
if normalise_scores:
scores = self.normalise(scores)
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
logz = logZ_cupy(stay_scores, move_scores, target_lengths + 1 - self.state_len)
loss = - (logz / target_lengths)
if loss_clip:
loss = torch.clamp(loss, 0.0, loss_clip)
if reduction == 'mean':
return loss.mean()
elif reduction in ('none', None):
return loss
else:
raise ValueError('Unknown reduction type {}'.format(reduction))
def ctc_viterbi_alignments(self, scores, targets, target_lengths):
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
return viterbi_alignments(stay_scores, move_scores, target_lengths + 1 - self.state_len)
def conv(c_in, c_out, ks, stride=1, bias=False, activation=None):
return Convolution(c_in, c_out, ks, stride=stride, padding=ks//2, bias=bias, activation=activation)
def rnn_encoder(n_base, state_len, insize=1, stride=5, winlen=19, activation='swish', rnn_type='lstm', features=768, scale=5.0, blank_score=None, single_head_attn=False):
rnn = layers[rnn_type]
return Serial([
conv(insize, 4, ks=5, bias=True, activation=activation),
conv(4, 16, ks=5, bias=True, activation=activation),
conv(16, features, ks=winlen, stride=stride, bias=True, activation=activation),
Permute([2, 0, 1]),
rnn(features, features, reverse=True), rnn(features, features),
rnn(features, features, reverse=True), rnn(features, features),
*([SHABlock(features)] if single_head_attn else []),
rnn(features, features, reverse=True),
LinearCRFEncoder(features, n_base, state_len, bias=True, activation='tanh', scale=scale, blank_score=blank_score)
])
class SeqdistModel(Module):
def __init__(self, encoder, seqdist):
super().__init__()
self.seqdist = seqdist
self.encoder = encoder
self.stride = get_stride(encoder)
self.alphabet = seqdist.alphabet
def forward(self, x):
return self.encoder(x).to(torch.float32)
def decode_batch(self, x):
scores = self.seqdist.posteriors(x.to(torch.float32)) + 1e-8
tracebacks = self.seqdist.viterbi(scores.log()).to(torch.int16).T
return [self.seqdist.path_to_str(x) for x in tracebacks.cpu().numpy()]
def decode(self, x):
return self.decode_batch(x.unsqueeze(1))[0]
class Model(SeqdistModel):
def __init__(self, config):
seqdist = CTC_CRF(
state_len=config['global_norm']['state_len'],
alphabet=config['labels']['labels']
)
if 'type' in config['encoder']: #new-style config
encoder = from_dict(config['encoder'])
else: #old-style
encoder = rnn_encoder(seqdist.n_base, seqdist.state_len, insize=config['input']['features'], **config['encoder'])
super().__init__(encoder, seqdist)
self.config = config
|
"""
Bonito CRF basecall
"""
import torch
import numpy as np
from kbeam import beamsearch
from itertools import groupby
from functools import partial
from operator import itemgetter
import bonito
from bonito.io import Writer
from bonito.fast5 import get_reads
from bonito.aligner import align_map
from bonito.multiprocessing import thread_map, thread_iter
from bonito.util import concat, chunk, batchify, unbatchify, half_supported
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if isinstance(chunks, dict):
return {
k: stitch(v, chunksize, overlap, length, stride, reverse=reverse)
for k, v in chunks.items()
}
return bonito.util.stitch(chunks, chunksize, overlap, length, stride, reverse=reverse)
def compute_scores(model, batch, reverse=False):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model(batch.to(dtype).to(device))
if reverse: scores = model.seqdist.reverse_complement(scores)
betas = model.seqdist.backward_scores(scores.to(torch.float32))
betas -= (betas.max(2, keepdim=True)[0] - 5.0)
return {
'scores': scores.transpose(0, 1),
'betas': betas.transpose(0, 1),
}
def quantise_int8(x, scale=127/5):
"""
Quantise scores to int8.
"""
scores = x['scores']
scores *= scale
scores = torch.round(scores).to(torch.int8).detach()
betas = x['betas']
betas *= scale
betas = torch.round(torch.clamp(betas, -127., 128.)).to(torch.int8).detach()
return {'scores': scores, 'betas': betas}
def transfer(x):
"""
Device to host transfer using pinned memory.
"""
torch.cuda.synchronize()
with torch.cuda.stream(torch.cuda.Stream()):
return {
k: torch.empty(v.shape, pin_memory=True, dtype=v.dtype).copy_(v).numpy()
for k, v in x.items()
}
def decode_int8(scores, seqdist, scale=127/5, beamsize=40, beamcut=100.0):
"""
Beamsearch decode.
"""
path, _ = beamsearch(
scores['scores'], scale, seqdist.n_base, beamsize,
guide=scores['betas'], beam_cut=beamcut
)
try:
return seqdist.path_to_str(path % 4 + 1)
except IndexError:
return ""
def split_read(read, split_read_length=400000):
"""
Split large reads into manageable pieces.
"""
if len(read.signal) <= split_read_length:
return [(read, 0, len(read.signal))]
breaks = np.arange(0, len(read.signal) + split_read_length, split_read_length)
return [(read, start, min(end, len(read.signal))) for (start, end) in zip(breaks[:-1], breaks[1:])]
def basecall(model, reads, aligner=None, beamsize=40, chunksize=4000, overlap=500, batchsize=32, qscores=False, reverse=False):
"""
Basecalls a set of reads.
"""
_decode = partial(decode_int8, seqdist=model.seqdist, beamsize=beamsize)
reads = (read_chunk for read in reads for read_chunk in split_read(read)[::-1 if reverse else 1])
chunks = (
((read, start, end), chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap))
for (read, start, end) in reads
)
batches = (
(k, quantise_int8(compute_scores(model, batch, reverse=reverse)))
for k, batch in thread_iter(batchify(chunks, batchsize=batchsize))
)
stitched = (
(read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse))
for ((read, start, end), x) in unbatchify(batches)
)
transferred = thread_map(transfer, stitched, n_thread=1)
basecalls = thread_map(_decode, transferred, n_thread=8)
basecalls = (
(read, ''.join(seq for k, seq in parts))
for read, parts in groupby(basecalls, lambda x: (x[0].parent if hasattr(x[0], 'parent') else x[0]))
)
basecalls = (
(read, {'sequence': seq, 'qstring': '?' * len(seq) if qscores else '*', 'mean_qscore': 0.0})
for read, seq in basecalls
)
if aligner: return align_map(aligner, basecalls)
return basecalls
|
from .model import Model
from .basecall import basecall
|
"""
Bonito Model template
"""
import numpy as np
from bonito.nn import Permute, layers
import torch
from torch.nn.functional import log_softmax, ctc_loss
from torch.nn import Module, ModuleList, Sequential, Conv1d, BatchNorm1d, Dropout
from fast_ctc_decode import beam_search, viterbi_search
class Model(Module):
"""
Model template for QuartzNet style architectures
https://arxiv.org/pdf/1910.10261.pdf
"""
def __init__(self, config):
super(Model, self).__init__()
if 'qscore' not in config:
self.qbias = 0.0
self.qscale = 1.0
else:
self.qbias = config['qscore']['bias']
self.qscale = config['qscore']['scale']
self.config = config
self.stride = config['block'][0]['stride'][0]
self.alphabet = config['labels']['labels']
self.features = config['block'][-1]['filters']
self.encoder = Encoder(config)
self.decoder = Decoder(self.features, len(self.alphabet))
def forward(self, x):
encoded = self.encoder(x)
return self.decoder(encoded)
def decode(self, x, beamsize=5, threshold=1e-3, qscores=False, return_path=False):
x = x.exp().cpu().numpy().astype(np.float32)
if beamsize == 1 or qscores:
seq, path = viterbi_search(x, self.alphabet, qscores, self.qscale, self.qbias)
else:
seq, path = beam_search(x, self.alphabet, beamsize, threshold)
if return_path: return seq, path
return seq
def ctc_label_smoothing_loss(self, log_probs, targets, lengths, weights=None):
T, N, C = log_probs.shape
weights = weights or torch.cat([torch.tensor([0.4]), (0.1 / (C - 1)) * torch.ones(C - 1)])
log_probs_lengths = torch.full(size=(N, ), fill_value=T, dtype=torch.int64)
loss = ctc_loss(log_probs.to(torch.float32), targets, log_probs_lengths, lengths, reduction='mean')
label_smoothing_loss = -((log_probs * weights.to(log_probs.device)).mean())
return {'loss': loss + label_smoothing_loss, 'ctc_loss': loss, 'label_smooth_loss': label_smoothing_loss}
class Encoder(Module):
"""
Builds the model encoder
"""
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
features = self.config['input']['features']
activation = layers[self.config['encoder']['activation']]()
encoder_layers = []
for layer in self.config['block']:
encoder_layers.append(
Block(
features, layer['filters'], activation,
repeat=layer['repeat'], kernel_size=layer['kernel'],
stride=layer['stride'], dilation=layer['dilation'],
dropout=layer['dropout'], residual=layer['residual'],
separable=layer['separable'],
)
)
features = layer['filters']
self.encoder = Sequential(*encoder_layers)
def forward(self, x):
return self.encoder(x)
class TCSConv1d(Module):
"""
Time-Channel Separable 1D Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, separable=False):
super(TCSConv1d, self).__init__()
self.separable = separable
if separable:
self.depthwise = Conv1d(
in_channels, in_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=in_channels
)
self.pointwise = Conv1d(
in_channels, out_channels, kernel_size=1, stride=1,
dilation=dilation, bias=bias, padding=0
)
else:
self.conv = Conv1d(
in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias
)
def forward(self, x):
if self.separable:
x = self.depthwise(x)
x = self.pointwise(x)
else:
x = self.conv(x)
return x
class Block(Module):
"""
TCSConv, Batch Normalisation, Activation, Dropout
"""
def __init__(self, in_channels, out_channels, activation, repeat=5, kernel_size=1, stride=1, dilation=1, dropout=0.0, residual=False, separable=False):
super(Block, self).__init__()
self.use_res = residual
self.conv = ModuleList()
_in_channels = in_channels
padding = self.get_padding(kernel_size[0], stride[0], dilation[0])
# add the first n - 1 convolutions + activation
for _ in range(repeat - 1):
self.conv.extend(
self.get_tcs(
_in_channels, out_channels, kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
self.conv.extend(self.get_activation(activation, dropout))
_in_channels = out_channels
# add the last conv and batch norm
self.conv.extend(
self.get_tcs(
_in_channels, out_channels,
kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
# add the residual connection
if self.use_res:
self.residual = Sequential(*self.get_tcs(in_channels, out_channels))
# add the activation and dropout
self.activation = Sequential(*self.get_activation(activation, dropout))
def get_activation(self, activation, dropout):
return activation, Dropout(p=dropout)
def get_padding(self, kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Dilation and stride can not both be greater than 1")
return (kernel_size // 2) * dilation
def get_tcs(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, padding=0, bias=False, separable=False):
return [
TCSConv1d(
in_channels, out_channels, kernel_size,
stride=stride, dilation=dilation, padding=padding,
bias=bias, separable=separable
),
BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)
]
def forward(self, x):
_x = x
for layer in self.conv:
_x = layer(_x)
if self.use_res:
_x = _x + self.residual(x)
return self.activation(_x)
class Decoder(Module):
"""
Decoder
"""
def __init__(self, features, classes):
super(Decoder, self).__init__()
self.layers = Sequential(
Conv1d(features, classes, kernel_size=1, bias=True),
Permute([2, 0, 1])
)
def forward(self, x):
return log_softmax(self.layers(x), dim=-1)
|
"""
Bonito basecall
"""
import torch
import numpy as np
from functools import partial
from bonito.fast5 import ReadChunk
from bonito.aligner import align_map
from bonito.multiprocessing import process_map, thread_map
from bonito.util import mean_qscore_from_qstring, half_supported
from bonito.util import chunk, stitch, batchify, unbatchify, permute, concat
def basecall(model, reads, aligner=None, beamsize=5, chunksize=0, overlap=0, batchsize=1, qscores=False, reverse=None):
"""
Basecalls a set of reads.
"""
chunks = (
(read, chunk(torch.tensor(read.signal), chunksize, overlap)) for read in reads
)
scores = unbatchify(
(k, compute_scores(model, v)) for k, v in batchify(chunks, batchsize)
)
scores = (
(read, {'scores': stitch(v, chunksize, overlap, len(read.signal), model.stride)}) for read, v in scores
)
decoder = partial(decode, decode=model.decode, beamsize=beamsize, qscores=qscores)
basecalls = process_map(decoder, scores, n_proc=4)
if aligner: return align_map(aligner, basecalls)
return basecalls
def compute_scores(model, batch):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
chunks = batch.to(torch.half).to(device)
probs = permute(model(chunks), 'TNC', 'NTC')
return probs.cpu().to(torch.float32)
def decode(scores, decode, beamsize=5, qscores=False):
"""
Convert the network scores into a sequence.
"""
# do a greedy decode to get a sensible qstring to compute the mean qscore from
seq, path = decode(scores['scores'], beamsize=1, qscores=True, return_path=True)
seq, qstring = seq[:len(path)], seq[len(path):]
mean_qscore = mean_qscore_from_qstring(qstring)
# beam search will produce a better sequence but doesn't produce a sensible qstring/path
if not (qscores or beamsize == 1):
try:
seq = decode(scores['scores'], beamsize=beamsize)
path = None
qstring = '*'
except:
pass
return {'sequence': seq, 'qstring': qstring, 'mean_qscore': mean_qscore, 'path': path}
|
from setuptools import setup, find_packages
setup(
name = 'BS-RoFormer',
packages = find_packages(exclude=[]),
version = '0.0.2',
license='MIT',
description = 'BS-RoFormer - Band-Split Rotary Transformer for SOTA Music Source Separation',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/BS-RoFormer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'music source separation'
],
install_requires=[
'beartype',
'einops>=0.6.1',
'rotary-embedding-torch>=0.3.0',
'torch>=2.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from bs_roformer.bs_roformer import BSRoformer
|
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = FlashAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
return self.flash_attn(q, k, v)
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
|
import torch
from torch import nn, einsum, Tensor
from torch.nn import Module, ModuleList
import torch.nn.functional as F
from bs_roformer.attend import Attend
from beartype.typing import Tuple, Optional, List
from beartype import beartype
from rotary_embedding_torch import RotaryEmbedding
from einops import rearrange, pack, unpack
# helper functions
def exists(val):
return val is not None
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# attention
class FeedForward(Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
dim_inner = int(dim * mult)
self.net = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_inner, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
rotary_embed = None,
flash = True
):
super().__init__()
self.heads = heads
self.scale = dim_head **-0.5
dim_inner = heads * dim_head
self.rotary_embed = rotary_embed
self.attend = Attend(flash = flash, dropout = dropout)
self.norm = RMSNorm(dim)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(dim_inner, dim, bias = False),
nn.Dropout(dropout)
)
def forward(self, x):
x = self.norm(x)
q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv = 3, h = self.heads)
if exists(self.rotary_embed):
q = self.rotary_embed.rotate_queries_or_keys(q)
k = self.rotary_embed.rotate_queries_or_keys(k)
out = self.attend(q, k, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
norm_output = True,
rotary_embed = None,
flash_attn = True
):
super().__init__()
self.layers = ModuleList([])
for _ in range(depth):
self.layers.append(ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_embed = rotary_embed, flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim) if norm_output else nn.Identity()
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
# bandsplit module
class BandSplit(Module):
@beartype
def __init__(
self,
dim,
dim_inputs: Tuple[int, ...]
):
super().__init__()
self.dim_inputs = dim_inputs
self.to_features = ModuleList([])
for dim_in in dim_inputs:
net = nn.Sequential(
RMSNorm(dim_in),
nn.Linear(dim_in, dim)
)
self.to_features.append(net)
def forward(self, x):
x = x.split(self.dim_inputs, dim = -1)
outs = []
for split_input, to_feature in zip(x, self.to_features):
split_output = to_feature(split_input)
outs.append(split_output)
return torch.stack(outs, dim = -2)
class LinearGLUWithTanH(Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x.tanh() * gate.sigmoid()
class MaskEstimator(Module):
@beartype
def __init__(
self,
dim,
dim_inputs: Tuple[int, ...],
depth
):
super().__init__()
self.dim_inputs = dim_inputs
self.to_freqs = ModuleList([])
for dim_in in dim_inputs:
net = []
for ind in range(depth):
is_last = ind == (depth - 1)
dim_out = dim if not is_last else dim_in
net.append(LinearGLUWithTanH(dim, dim_out))
self.to_freqs.append(nn.Sequential(*net))
def forward(self, x):
x = x.unbind(dim = -2)
outs = []
for band_features, to_freq in zip(x, self.to_freqs):
freq_out = to_freq(band_features)
outs.append(freq_out)
return torch.cat(outs, dim = -1)
# main class
class BSRoformer(Module):
@beartype
def __init__(
self,
dim,
*,
depth,
time_transformer_depth = 2,
freq_transformer_depth = 2,
freqs_per_bands: Tuple[int, ...] = (256, 257), # in the paper, they divide into ~60 bands, test with 1 for starters
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
flash_attn = True,
dim_freqs_in = 513,
stft_n_fft = 1024,
stft_hop_length = 256,
stft_win_length = 1024,
stft_normalized = False,
mask_estimator_depth = 1,
multi_stft_resolution_loss_weight = 1.,
multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256),
multi_stft_hop_size = 147,
multi_stft_normalized = False
):
super().__init__()
self.layers = ModuleList([])
transformer_kwargs = dict(
dim = dim,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
flash_attn = flash_attn
)
time_rotary_embed = RotaryEmbedding(dim = dim_head)
freq_rotary_embed = RotaryEmbedding(dim = dim_head)
for _ in range(depth):
self.layers.append(nn.ModuleList([
Transformer(depth = time_transformer_depth, rotary_embed = time_rotary_embed, **transformer_kwargs),
Transformer(depth = freq_transformer_depth, rotary_embed = freq_rotary_embed, **transformer_kwargs)
]))
self.stft_kwargs = dict(
n_fft = stft_n_fft,
hop_length = stft_hop_length,
win_length = stft_win_length,
normalized = stft_normalized
)
freqs = torch.stft(torch.randn(1, 1024), **self.stft_kwargs, return_complex = True).shape[1]
assert len(freqs_per_bands) > 1
assert sum(freqs_per_bands) == freqs, f'the number of freqs in the bands must equal {freqs} based on the STFT settings'
freqs_per_bands_with_complex = tuple(2 * f for f in freqs_per_bands)
self.band_split = BandSplit(
dim = dim,
dim_inputs = freqs_per_bands_with_complex
)
self.mask_estimator = MaskEstimator(
dim = dim,
dim_inputs = freqs_per_bands_with_complex,
depth = mask_estimator_depth
)
# for the multi-resolution stft loss
self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight
self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes
self.multi_stft_n_fft = stft_n_fft
self.multi_stft_kwargs = dict(
hop_length = multi_stft_hop_size,
normalized = multi_stft_normalized
)
def forward(
self,
raw_audio,
target = None,
return_loss_breakdown = False
):
"""
einops
b - batch
f - freq
t - time
c - complex (2)
d - feature dimension
"""
# to stft
stft_repr = torch.stft(raw_audio, **self.stft_kwargs, return_complex = True)
stft_repr = torch.view_as_real(stft_repr)
x = rearrange(stft_repr, 'b f t c -> b t (f c)')
x = self.band_split(x)
# axial / hierarchical attention
for time_transformer, freq_transformer in self.layers:
x = rearrange(x, 'b t f d -> b f t d')
x, ps = pack([x], 'b * d')
x = time_transformer(x)
x, = unpack(x, ps, 'b * d')
x = rearrange(x, 'b f t d -> b t f d')
x, ps = pack([x], 'b * d')
x = freq_transformer(x)
x, = unpack(x, ps, 'b * d')
mask = self.mask_estimator(x)
mask = rearrange(mask, 'b t (f c) -> b f t c', c = 2)
# modulate frequency representation
stft_repr = stft_repr * mask
# istft
stft_repr = torch.view_as_complex(stft_repr)
recon_audio = torch.istft(stft_repr, **self.stft_kwargs, return_complex = False)
# if a target is passed in, calculate loss for learning
if not exists(target):
return recon_audio
target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft
loss = F.l1_loss(recon_audio, target)
multi_stft_resolution_loss = 0.
for window_size in self.multi_stft_resolutions_window_sizes:
res_stft_kwargs = dict(
n_fft = max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft
win_length = window_size,
return_complex = True,
**self.multi_stft_kwargs,
)
recon_Y = torch.stft(recon_audio, **res_stft_kwargs)
target_Y = torch.stft(target, **res_stft_kwargs)
multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y)
weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight
total_loss = loss + weighted_multi_resolution_loss
if not return_loss_breakdown:
return total_loss
return total_loss, (loss, multi_stft_resolution_loss)
|
import random
import torch
import torch.linalg
import numpy as np
class BlackHole(object):
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, name):
return self
def seed_all(seed):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def recursive_to(obj, device):
if isinstance(obj, torch.Tensor):
try:
return obj.cuda(device=device, non_blocking=True)
except RuntimeError:
return obj.to(device)
elif isinstance(obj, list):
return [recursive_to(o, device=device) for o in obj]
elif isinstance(obj, tuple):
return (recursive_to(o, device=device) for o in obj)
elif isinstance(obj, dict):
return {k: recursive_to(v, device=device) for k, v in obj.items()}
else:
return obj
|
import warnings
import torch
from Bio import BiopythonWarning
from Bio.PDB import Selection
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import three_to_one, three_to_index, is_aa
NON_STANDARD_SUBSTITUTIONS = {
'2AS':'ASP', '3AH':'HIS', '5HP':'GLU', 'ACL':'ARG', 'AGM':'ARG', 'AIB':'ALA', 'ALM':'ALA', 'ALO':'THR', 'ALY':'LYS', 'ARM':'ARG',
'ASA':'ASP', 'ASB':'ASP', 'ASK':'ASP', 'ASL':'ASP', 'ASQ':'ASP', 'AYA':'ALA', 'BCS':'CYS', 'BHD':'ASP', 'BMT':'THR', 'BNN':'ALA',
'BUC':'CYS', 'BUG':'LEU', 'C5C':'CYS', 'C6C':'CYS', 'CAS':'CYS', 'CCS':'CYS', 'CEA':'CYS', 'CGU':'GLU', 'CHG':'ALA', 'CLE':'LEU', 'CME':'CYS',
'CSD':'ALA', 'CSO':'CYS', 'CSP':'CYS', 'CSS':'CYS', 'CSW':'CYS', 'CSX':'CYS', 'CXM':'MET', 'CY1':'CYS', 'CY3':'CYS', 'CYG':'CYS',
'CYM':'CYS', 'CYQ':'CYS', 'DAH':'PHE', 'DAL':'ALA', 'DAR':'ARG', 'DAS':'ASP', 'DCY':'CYS', 'DGL':'GLU', 'DGN':'GLN', 'DHA':'ALA',
'DHI':'HIS', 'DIL':'ILE', 'DIV':'VAL', 'DLE':'LEU', 'DLY':'LYS', 'DNP':'ALA', 'DPN':'PHE', 'DPR':'PRO', 'DSN':'SER', 'DSP':'ASP',
'DTH':'THR', 'DTR':'TRP', 'DTY':'TYR', 'DVA':'VAL', 'EFC':'CYS', 'FLA':'ALA', 'FME':'MET', 'GGL':'GLU', 'GL3':'GLY', 'GLZ':'GLY',
'GMA':'GLU', 'GSC':'GLY', 'HAC':'ALA', 'HAR':'ARG', 'HIC':'HIS', 'HIP':'HIS', 'HMR':'ARG', 'HPQ':'PHE', 'HTR':'TRP', 'HYP':'PRO',
'IAS':'ASP', 'IIL':'ILE', 'IYR':'TYR', 'KCX':'LYS', 'LLP':'LYS', 'LLY':'LYS', 'LTR':'TRP', 'LYM':'LYS', 'LYZ':'LYS', 'MAA':'ALA', 'MEN':'ASN',
'MHS':'HIS', 'MIS':'SER', 'MLE':'LEU', 'MPQ':'GLY', 'MSA':'GLY', 'MSE':'MET', 'MVA':'VAL', 'NEM':'HIS', 'NEP':'HIS', 'NLE':'LEU',
'NLN':'LEU', 'NLP':'LEU', 'NMC':'GLY', 'OAS':'SER', 'OCS':'CYS', 'OMT':'MET', 'PAQ':'TYR', 'PCA':'GLU', 'PEC':'CYS', 'PHI':'PHE',
'PHL':'PHE', 'PR3':'CYS', 'PRR':'ALA', 'PTR':'TYR', 'PYX':'CYS', 'SAC':'SER', 'SAR':'GLY', 'SCH':'CYS', 'SCS':'CYS', 'SCY':'CYS',
'SEL':'SER', 'SEP':'SER', 'SET':'SER', 'SHC':'CYS', 'SHR':'LYS', 'SMC':'CYS', 'SOC':'CYS', 'STY':'TYR', 'SVA':'SER', 'TIH':'ALA',
'TPL':'TRP', 'TPO':'THR', 'TPQ':'ALA', 'TRG':'LYS', 'TRO':'TRP', 'TYB':'TYR', 'TYI':'TYR', 'TYQ':'TYR', 'TYS':'TYR', 'TYY':'TYR'
}
RESIDUE_SIDECHAIN_POSTFIXES = {
'A': ['B'],
'R': ['B', 'G', 'D', 'E', 'Z', 'H1', 'H2'],
'N': ['B', 'G', 'D1', 'D2'],
'D': ['B', 'G', 'D1', 'D2'],
'C': ['B', 'G'],
'E': ['B', 'G', 'D', 'E1', 'E2'],
'Q': ['B', 'G', 'D', 'E1', 'E2'],
'G': [],
'H': ['B', 'G', 'D1', 'D2', 'E1', 'E2'],
'I': ['B', 'G1', 'G2', 'D1'],
'L': ['B', 'G', 'D1', 'D2'],
'K': ['B', 'G', 'D', 'E', 'Z'],
'M': ['B', 'G', 'D', 'E'],
'F': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'Z'],
'P': ['B', 'G', 'D'],
'S': ['B', 'G'],
'T': ['B', 'G1', 'G2'],
'W': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'E3', 'Z2', 'Z3', 'H2'],
'Y': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'Z', 'H'],
'V': ['B', 'G1', 'G2'],
}
GLY_INDEX = 5
ATOM_N, ATOM_CA, ATOM_C, ATOM_O, ATOM_CB = 0, 1, 2, 3, 4
def augmented_three_to_one(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return three_to_one(three)
def augmented_three_to_index(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return three_to_index(three)
def augmented_is_aa(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return is_aa(three, standard=True)
def is_hetero_residue(res):
return len(res.id[0].strip()) > 0
def get_atom_name_postfix(atom):
name = atom.get_name()
if name in ('N', 'CA', 'C', 'O'):
return name
if name[-1].isnumeric():
return name[-2:]
else:
return name[-1:]
def get_residue_pos14(res):
pos14 = torch.full([14, 3], float('inf'))
suffix_to_atom = {get_atom_name_postfix(a):a for a in res.get_atoms()}
atom_order = ['N', 'CA', 'C', 'O'] + RESIDUE_SIDECHAIN_POSTFIXES[augmented_three_to_one(res.get_resname())]
for i, atom_suffix in enumerate(atom_order):
if atom_suffix not in suffix_to_atom: continue
pos14[i,0], pos14[i,1], pos14[i,2] = suffix_to_atom[atom_suffix].get_coord().tolist()
return pos14
def parse_pdb(path, model_id=0):
warnings.simplefilter('ignore', BiopythonWarning)
parser = PDBParser()
structure = parser.get_structure(None, path)
return parse_complex(structure, model_id)
def parse_complex(structure, model_id=None):
if model_id is not None:
structure = structure[model_id]
chains = Selection.unfold_entities(structure, 'C')
aa, resseq, icode, seq = [], [], [], []
pos14, pos14_mask = [], []
chain_id, chain_seq = [], []
for i, chain in enumerate(chains):
seq_this = 0
for res in chain:
resname = res.get_resname()
if not augmented_is_aa(resname): continue
if not (res.has_id('CA') and res.has_id('C') and res.has_id('N')): continue
# Chain
chain_id.append(chain.get_id())
chain_seq.append(i+1)
# Residue types
restype = augmented_three_to_index(resname)
aa.append(restype)
# Atom coordinates
pos14_this = get_residue_pos14(res)
pos14_mask_this = pos14_this.isfinite()
pos14.append(pos14_this.nan_to_num(posinf=99999))
pos14_mask.append(pos14_mask_this)
# Sequential number
resseq_this = int(res.get_id()[1])
icode_this = res.get_id()[2]
if seq_this == 0:
seq_this = 1
else:
d_resseq = resseq_this - resseq[-1]
if d_resseq == 0: seq_this += 1
else: seq_this += d_resseq
resseq.append(resseq_this)
icode.append(icode_this)
seq.append(seq_this)
if len(aa) == 0:
return None
return {
'name': structure.get_id(),
# Chain
'chain_id': ''.join(chain_id),
'chain_seq': torch.LongTensor(chain_seq),
# Sequence
'aa': torch.LongTensor(aa),
'resseq': torch.LongTensor(resseq),
'icode': ''.join(icode),
'seq': torch.LongTensor(seq),
# Atom positions
'pos14': torch.stack(pos14),
'pos14_mask': torch.stack(pos14_mask),
}
|
import math
import torch
from torch.utils.data._utils.collate import default_collate
from .protein import ATOM_CA, parse_pdb
class PaddingCollate(object):
def __init__(self, length_ref_key='mutation_mask', pad_values={'aa': 20, 'pos14': float('999'), 'icode': ' ', 'chain_id': '-'}, donot_pad={'foldx'}, eight=False):
super().__init__()
self.length_ref_key = length_ref_key
self.pad_values = pad_values
self.donot_pad = donot_pad
self.eight = eight
def _pad_last(self, x, n, value=0):
if isinstance(x, torch.Tensor):
assert x.size(0) <= n
if x.size(0) == n:
return x
pad_size = [n - x.size(0)] + list(x.shape[1:])
pad = torch.full(pad_size, fill_value=value).to(x)
return torch.cat([x, pad], dim=0)
elif isinstance(x, list):
pad = [value] * (n - len(x))
return x + pad
elif isinstance(x, str):
if value == 0: # Won't pad strings if not specified
return x
pad = value * (n - len(x))
return x + pad
elif isinstance(x, dict):
padded = {}
for k, v in x.items():
if k in self.donot_pad:
padded[k] = v
else:
padded[k] = self._pad_last(v, n, value=self._get_pad_value(k))
return padded
else:
return x
@staticmethod
def _get_pad_mask(l, n):
return torch.cat([
torch.ones([l], dtype=torch.bool),
torch.zeros([n-l], dtype=torch.bool)
], dim=0)
def _get_pad_value(self, key):
if key not in self.pad_values:
return 0
return self.pad_values[key]
def __call__(self, data_list):
max_length = max([data[self.length_ref_key].size(0) for data in data_list])
if self.eight:
max_length = math.ceil(max_length / 8) * 8
data_list_padded = []
for data in data_list:
data_padded = {
k: self._pad_last(v, max_length, value=self._get_pad_value(k))
for k, v in data.items() if k in ('wt', 'mut', 'ddG', 'mutation_mask', 'index', 'mutation')
}
data_padded['mask'] = self._get_pad_mask(data[self.length_ref_key].size(0), max_length)
data_list_padded.append(data_padded)
return default_collate(data_list_padded)
def _mask_list(l, mask):
return [l[i] for i in range(len(l)) if mask[i]]
def _mask_string(s, mask):
return ''.join([s[i] for i in range(len(s)) if mask[i]])
def _mask_dict_recursively(d, mask):
out = {}
for k, v in d.items():
if isinstance(v, torch.Tensor) and v.size(0) == mask.size(0):
out[k] = v[mask]
elif isinstance(v, list) and len(v) == mask.size(0):
out[k] = _mask_list(v, mask)
elif isinstance(v, str) and len(v) == mask.size(0):
out[k] = _mask_string(v, mask)
elif isinstance(v, dict):
out[k] = _mask_dict_recursively(v, mask)
else:
out[k] = v
return out
class KnnResidue(object):
def __init__(self, num_neighbors=128):
super().__init__()
self.num_neighbors = num_neighbors
def __call__(self, data):
pos_CA = data['wt']['pos14'][:, ATOM_CA]
pos_CA_mut = pos_CA[data['mutation_mask']]
diff = pos_CA_mut.view(1, -1, 3) - pos_CA.view(-1, 1, 3)
dist = torch.linalg.norm(diff, dim=-1)
try:
mask = torch.zeros([dist.size(0)], dtype=torch.bool)
mask[ dist.min(dim=1)[0].argsort()[:self.num_neighbors] ] = True
except IndexError as e:
print(data)
raise e
return _mask_dict_recursively(data, mask)
def load_wt_mut_pdb_pair(wt_path, mut_path):
data_wt = parse_pdb(wt_path)
data_mut = parse_pdb(mut_path)
transform = KnnResidue()
collate_fn = PaddingCollate()
mutation_mask = (data_wt['aa'] != data_mut['aa'])
batch = collate_fn([transform({'wt': data_wt, 'mut': data_mut, 'mutation_mask': mutation_mask})])
return batch
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.residue import PerResidueEncoder
from models.attention import GAEncoder
from models.common import get_pos_CB, construct_3d_basis
from utils.protein import ATOM_N, ATOM_CA, ATOM_C
class ComplexEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.relpos_embedding = nn.Embedding(cfg.max_relpos*2+2, cfg.pair_feat_dim)
self.residue_encoder = PerResidueEncoder(cfg.node_feat_dim)
if cfg.geomattn is not None:
self.ga_encoder = GAEncoder(
node_feat_dim = cfg.node_feat_dim,
pair_feat_dim = cfg.pair_feat_dim,
num_layers = cfg.geomattn.num_layers,
spatial_attn_mode = cfg.geomattn.spatial_attn_mode,
)
else:
self.out_mlp = nn.Sequential(
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim), nn.ReLU(),
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim), nn.ReLU(),
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim),
)
def forward(self, pos14, aa, seq, chain, mask_atom):
"""
Args:
pos14: (N, L, 14, 3).
aa: (N, L).
seq: (N, L).
chain: (N, L).
mask_atom: (N, L, 14)
Returns:
(N, L, node_ch)
"""
same_chain = (chain[:, None, :] == chain[:, :, None]) # (N, L, L)
relpos = (seq[:, None, :] - seq[:, :, None]).clamp(min=-self.cfg.max_relpos, max=self.cfg.max_relpos) + self.cfg.max_relpos # (N, L, L)
relpos = torch.where(same_chain, relpos, torch.full_like(relpos, fill_value=self.cfg.max_relpos*2+1))
pair_feat = self.relpos_embedding(relpos) # (N, L, L, pair_ch)
R = construct_3d_basis(pos14[:, :, ATOM_CA], pos14[:, :, ATOM_C], pos14[:, :, ATOM_N])
# Residue encoder
res_feat = self.residue_encoder(aa, pos14, mask_atom)
# Geom encoder
t = pos14[:, :, ATOM_CA]
mask_residue = mask_atom[:, :, ATOM_CA]
res_feat = self.ga_encoder(R, t, get_pos_CB(pos14, mask_atom), res_feat, pair_feat, mask_residue)
return res_feat
class DDGReadout(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(feat_dim*2, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim)
)
self.project = nn.Linear(feat_dim, 1, bias=False)
def forward(self, node_feat_wt, node_feat_mut, mask=None):
"""
Args:
node_feat_wt: (N, L, F).
node_feat_mut: (N, L, F).
mask: (N, L).
"""
feat_wm = torch.cat([node_feat_wt, node_feat_mut], dim=-1)
feat_mw = torch.cat([node_feat_mut, node_feat_wt], dim=-1)
feat_diff = self.mlp(feat_wm) - self.mlp(feat_mw) # (N, L, F)
# feat_diff = self.mlp(node_feat_wt) - self.mlp(node_feat_mut)
per_residue_ddg = self.project(feat_diff).squeeze(-1) # (N, L)
if mask is not None:
per_residue_ddg = per_residue_ddg * mask
ddg = per_residue_ddg.sum(dim=1) # (N,)
return ddg
class DDGPredictor(nn.Module):
def __init__(self, cfg):
super().__init__()
self.encoder = ComplexEncoder(cfg)
self.ddG_readout = DDGReadout(cfg.node_feat_dim)
def forward(self, complex_wt, complex_mut, ddG_true=None):
mask_atom_wt = complex_wt['pos14_mask'].all(dim=-1) # (N, L, 14)
mask_atom_mut = complex_mut['pos14_mask'].all(dim=-1)
feat_wt = self.encoder(complex_wt['pos14'], complex_wt['aa'], complex_wt['seq'], complex_wt['chain_seq'], mask_atom_wt)
feat_mut = self.encoder(complex_mut['pos14'], complex_mut['aa'], complex_mut['seq'], complex_mut['chain_seq'], mask_atom_mut)
mask_res = mask_atom_wt[:, :, ATOM_CA]
ddG_pred = self.ddG_readout(feat_wt, feat_mut, mask_res) # One mask is enough
if ddG_true is None:
return ddG_pred
else:
losses = {
'ddG': F.mse_loss(ddG_pred, ddG_true),
}
return losses, ddG_pred
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .common import mask_zero, global_to_local, local_to_global, normalize_vector
def _alpha_from_logits(logits, mask, inf=1e5):
"""
Args:
logits: Logit matrices, (N, L_i, L_j, num_heads).
mask: Masks, (N, L).
Returns:
alpha: Attention weights.
"""
N, L, _, _ = logits.size()
mask_row = mask.view(N, L, 1, 1).expand_as(logits) # (N, L, *, *)
mask_pair = mask_row * mask_row.permute(0, 2, 1, 3) # (N, L, L, *)
logits = torch.where(mask_pair, logits, logits-inf)
alpha = torch.softmax(logits, dim=2) # (N, L, L, num_heads)
alpha = torch.where(mask_row, alpha, torch.zeros_like(alpha))
return alpha
def _heads(x, n_heads, n_ch):
"""
Args:
x: (..., num_heads * num_channels)
Returns:
(..., num_heads, num_channels)
"""
s = list(x.size())[:-1] + [n_heads, n_ch]
return x.view(*s)
class GeometricAttention(nn.Module):
def __init__(self, node_feat_dim, pair_feat_dim, spatial_attn_mode='CB', value_dim=16, query_key_dim=16, num_query_points=8, num_value_points=8, num_heads=12):
super().__init__()
self.node_feat_dim = node_feat_dim
self.pair_feat_dim = pair_feat_dim
self.value_dim = value_dim
self.query_key_dim = query_key_dim
self.num_query_points = num_query_points
self.num_value_points = num_value_points
self.num_heads = num_heads
assert spatial_attn_mode in ('CB', 'vpoint')
self.spatial_attn_mode = spatial_attn_mode
# Node
self.proj_query = nn.Linear(node_feat_dim, query_key_dim*num_heads, bias=False)
self.proj_key = nn.Linear(node_feat_dim, query_key_dim*num_heads, bias=False)
self.proj_value = nn.Linear(node_feat_dim, value_dim*num_heads, bias=False)
# Pair
self.proj_pair_bias = nn.Linear(pair_feat_dim, num_heads, bias=False)
# Spatial
self.spatial_coef = nn.Parameter(torch.full([1, 1, 1, self.num_heads], fill_value=np.log(np.exp(1.) - 1.)), requires_grad=True)
if spatial_attn_mode == 'vpoint':
self.proj_query_point = nn.Linear(node_feat_dim, num_query_points*num_heads*3, bias=False)
self.proj_key_point = nn.Linear(node_feat_dim, num_query_points*num_heads*3, bias=False)
self.proj_value_point = nn.Linear(node_feat_dim, num_value_points*num_heads*3, bias=False)
# Output
if spatial_attn_mode == 'CB':
self.out_transform = nn.Linear(
in_features = (num_heads*pair_feat_dim) + (num_heads*value_dim) + (num_heads*(3+3+1)),
out_features = node_feat_dim,
)
elif spatial_attn_mode == 'vpoint':
self.out_transform = nn.Linear(
in_features = (num_heads*pair_feat_dim) + (num_heads*value_dim) + (num_heads*num_value_points*(3+3+1)),
out_features = node_feat_dim,
)
self.layer_norm = nn.LayerNorm(node_feat_dim)
def _node_logits(self, x):
query_l = _heads(self.proj_query(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, qk_ch)
key_l = _heads(self.proj_key(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, qk_ch)
query_l = query_l.permute(0, 2, 1, 3) # (N,L1,H,C) -> (N,H,L1,C)
key_l = key_l.permute(0, 2, 3, 1) # (N,L2,H,C) -> (N,H,C,L2)
logits = torch.matmul(query_l, key_l) # (N,H,L1,L2)
logits = logits.permute(0, 2, 3, 1) # (N,L1,L2,H)
# logits = (query_l.unsqueeze(2) * key_l.unsqueeze(1) * (1 / np.sqrt(self.query_key_dim))).sum(-1) # (N, L, L, num_heads)
return logits
def _pair_logits(self, z):
logits_pair = self.proj_pair_bias(z)
return logits_pair
def _beta_logits(self, R, t, p_CB):
N, L, _ = t.size()
qk = p_CB[:, :, None, :].expand(N, L, self.num_heads, 3)
sum_sq_dist = ((qk.unsqueeze(2) - qk.unsqueeze(1)) ** 2).sum(-1) # (N, L, L, n_heads)
gamma = F.softplus(self.spatial_coef)
logtis_beta = sum_sq_dist * ((-1 * gamma * np.sqrt(2 / 9)) / 2)
return logtis_beta
def _spatial_logits(self, R, t, x):
N, L, _ = t.size()
# Query
query_points = _heads(self.proj_query_point(x), self.num_heads*self.num_query_points, 3) # (N, L, n_heads * n_pnts, 3)
query_points = local_to_global(R, t, query_points) # Global query coordinates, (N, L, n_heads * n_pnts, 3)
query_s = query_points.reshape(N, L, self.num_heads, -1) # (N, L, n_heads, n_pnts*3)
# Key
key_points = _heads(self.proj_key_point(x), self.num_heads*self.num_query_points, 3) # (N, L, 3, n_heads * n_pnts)
key_points = local_to_global(R, t, key_points) # Global key coordinates, (N, L, n_heads * n_pnts, 3)
key_s = key_points.reshape(N, L, self.num_heads, -1) # (N, L, n_heads, n_pnts*3)
# Q-K Product
sum_sq_dist = ((query_s.unsqueeze(2) - key_s.unsqueeze(1)) ** 2).sum(-1) # (N, L, L, n_heads)
gamma = F.softplus(self.spatial_coef)
logits_spatial = sum_sq_dist * ((-1 * gamma * np.sqrt(2 / (9 * self.num_query_points))) / 2) # (N, L, L, n_heads)
return logits_spatial
def _pair_aggregation(self, alpha, z):
N, L = z.shape[:2]
feat_p2n = alpha.unsqueeze(-1) * z.unsqueeze(-2) # (N, L, L, n_heads, C)
feat_p2n = feat_p2n.sum(dim=2) # (N, L, n_heads, C)
return feat_p2n.reshape(N, L, -1)
def _node_aggregation(self, alpha, x):
N, L = x.shape[:2]
value_l = _heads(self.proj_value(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, v_ch)
feat_node = alpha.unsqueeze(-1) * value_l.unsqueeze(1) # (N, L, L, n_heads, *) @ (N, *, L, n_heads, v_ch)
feat_node = feat_node.sum(dim=2) # (N, L, n_heads, v_ch)
return feat_node.reshape(N, L, -1)
def _beta_aggregation(self, alpha, R, t, p_CB, x):
N, L, _ = t.size()
v = p_CB[:, :, None, :].expand(N, L, self.num_heads, 3) # (N, L, n_heads, 3)
aggr = alpha.reshape(N, L, L, self.num_heads, 1) * v.unsqueeze(1) # (N, *, L, n_heads, 3)
aggr = aggr.sum(dim=2)
feat_points = global_to_local(R, t, aggr) # (N, L, n_heads, 3)
feat_distance = feat_points.norm(dim=-1)
feat_direction = normalize_vector(feat_points, dim=-1, eps=1e-4)
feat_spatial = torch.cat([
feat_points.reshape(N, L, -1),
feat_distance.reshape(N, L, -1),
feat_direction.reshape(N, L, -1),
], dim=-1)
return feat_spatial
def _spatial_aggregation(self, alpha, R, t, x):
N, L, _ = t.size()
value_points = _heads(self.proj_value_point(x), self.num_heads*self.num_value_points, 3) # (N, L, n_heads * n_v_pnts, 3)
value_points = local_to_global(R, t, value_points.reshape(N, L, self.num_heads, self.num_value_points, 3)) # (N, L, n_heads, n_v_pnts, 3)
aggr_points = alpha.reshape(N, L, L, self.num_heads, 1, 1) * value_points.unsqueeze(1) # (N, *, L, n_heads, n_pnts, 3)
aggr_points = aggr_points.sum(dim=2) # (N, L, n_heads, n_pnts, 3)
feat_points = global_to_local(R, t, aggr_points) # (N, L, n_heads, n_pnts, 3)
feat_distance = feat_points.norm(dim=-1) # (N, L, n_heads, n_pnts)
feat_direction = normalize_vector(feat_points, dim=-1, eps=1e-4) # (N, L, n_heads, n_pnts, 3)
feat_spatial = torch.cat([
feat_points.reshape(N, L, -1),
feat_distance.reshape(N, L, -1),
feat_direction.reshape(N, L, -1),
], dim=-1)
return feat_spatial
def forward_beta(self, R, t, p_CB, x, z, mask):
"""
Args:
R: Frame basis matrices, (N, L, 3, 3_index).
t: Frame external (absolute) coordinates, (N, L, 3).
x: Node-wise features, (N, L, F).
z: Pair-wise features, (N, L, L, C).
mask: Masks, (N, L).
Returns:
x': Updated node-wise features, (N, L, F).
"""
# Attention logits
logits_node = self._node_logits(x)
logits_pair = self._pair_logits(z)
logits_spatial = self._beta_logits(R, t, p_CB)
# Summing logits up and apply `softmax`.
logits_sum = logits_node + logits_pair + logits_spatial
alpha = _alpha_from_logits(logits_sum * np.sqrt(1 / 3), mask) # (N, L, L, n_heads)
# Aggregate features
feat_p2n = self._pair_aggregation(alpha, z)
feat_node = self._node_aggregation(alpha, x)
feat_spatial = self._beta_aggregation(alpha, R, t, p_CB, x)
# Finally
feat_all = self.out_transform(torch.cat([feat_p2n, feat_node, feat_spatial], dim=-1)) # (N, L, F)
feat_all = mask_zero(mask.unsqueeze(-1), feat_all)
x_updated = self.layer_norm(x + feat_all)
return x_updated
def forward_vpoint(self, R, t, p_CB, x, z, mask):
"""
Args:
R: Frame basis matrices, (N, L, 3, 3_index).
t: Frame external (absolute) coordinates, (N, L, 3).
x: Node-wise features, (N, L, F).
z: Pair-wise features, (N, L, L, C).
mask: Masks, (N, L).
Returns:
x': Updated node-wise features, (N, L, F).
"""
# Attention logits
logits_node = self._node_logits(x)
logits_pair = self._pair_logits(z)
logits_spatial = self._spatial_logits(R, t, x)
# Summing logits up and apply `softmax`.
logits_sum = logits_node + logits_pair + logits_spatial
alpha = _alpha_from_logits(logits_sum * np.sqrt(1 / 3), mask) # (N, L, L, n_heads)
# Aggregate features
feat_p2n = self._pair_aggregation(alpha, z)
feat_node = self._node_aggregation(alpha, x)
feat_spatial = self._spatial_aggregation(alpha, R, t, x)
# Finally
feat_all = self.out_transform(torch.cat([feat_p2n, feat_node, feat_spatial], dim=-1)) # (N, L, F)
feat_all = mask_zero(mask.unsqueeze(-1), feat_all)
x_updated = self.layer_norm(x + feat_all)
return x_updated
def forward(self, R, t, p_CB, x, z, mask):
if self.spatial_attn_mode == 'CB':
return self.forward_beta(R, t, p_CB, x, z, mask)
else:
return self.forward_vpoint(R, t, p_CB, x, z, mask)
class GAEncoder(nn.Module):
def __init__(self, node_feat_dim, pair_feat_dim, num_layers, spatial_attn_mode='CB'):
super().__init__()
self.blocks = nn.ModuleList([
GeometricAttention(node_feat_dim, pair_feat_dim, spatial_attn_mode=spatial_attn_mode)
for _ in range(num_layers)
])
def forward(self, R, t, p_CB, x, z, mask):
for block in self.blocks:
x = block(R, t, p_CB, x, z, mask) # Residual connection within the block
return x
|
import torch
import torch.nn as nn
from models.common import PositionalEncoding, construct_3d_basis, global_to_local
class PerResidueEncoder(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.aatype_embed = nn.Embedding(21, feat_dim)
self.torsion_embed = PositionalEncoding()
self.mlp = nn.Sequential(
nn.Linear(21*14*3 + feat_dim, feat_dim * 2), nn.ReLU(),
nn.Linear(feat_dim * 2, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim)
)
def forward(self, aa, pos14, atom_mask):
"""
Args:
aa: (N, L).
pos14: (N, L, 14, 3).
atom_mask: (N, L, 14).
"""
N, L = aa.size()
R = construct_3d_basis(pos14[:, :, 1], pos14[:, :, 2], pos14[:, :, 0]) # (N, L, 3, 3)
t = pos14[:, :, 1] # (N, L, 3)
crd14 = global_to_local(R, t, pos14) # (N, L, 14, 3)
crd14_mask = atom_mask[:, :, :, None].expand_as(crd14)
crd14 = torch.where(crd14_mask, crd14, torch.zeros_like(crd14))
aa_expand = aa[:, :, None, None, None].expand(N, L, 21, 14, 3)
rng_expand = torch.arange(0, 21)[None, None, :, None, None].expand(N, L, 21, 14, 3).to(aa_expand)
place_mask = (aa_expand == rng_expand)
crd_expand = crd14[:, :, None, :, :].expand(N, L, 21, 14, 3)
crd_expand = torch.where(place_mask, crd_expand, torch.zeros_like(crd_expand))
crd_feat = crd_expand.reshape(N, L, 21 * 14 * 3)
aa_feat = self.aatype_embed(aa) # (N, L, feat)
out_feat = self.mlp(torch.cat([crd_feat, aa_feat], dim=-1))
return out_feat
|
import torch
import torch.nn as nn
from utils.protein import ATOM_CA, ATOM_CB
def get_pos_CB(pos14, atom_mask):
"""
Args:
pos14: (N, L, 14, 3)
atom_mask: (N, L, 14)
"""
N, L = pos14.shape[:2]
mask_CB = atom_mask[:, :, ATOM_CB] # (N, L)
mask_CB = mask_CB[:, :, None].expand(N, L, 3)
pos_CA = pos14[:, :, ATOM_CA] # (N, L, 3)
pos_CB = pos14[:, :, ATOM_CB]
return torch.where(mask_CB, pos_CB, pos_CA)
def mask_zero(mask, value):
return torch.where(mask, value, torch.zeros_like(value))
class PositionalEncoding(nn.Module):
def __init__(self, num_funcs=6):
super().__init__()
self.num_funcs = num_funcs
self.register_buffer('freq_bands', 2.0 ** torch.linspace(0.0, num_funcs-1, num_funcs))
def get_out_dim(self, in_dim):
return in_dim * (2 * self.num_funcs + 1)
def forward(self, x):
"""
Args:
x: (..., d).
"""
shape = list(x.shape[:-1]) + [-1]
x = x.unsqueeze(-1) # (..., d, 1)
code = torch.cat([x, torch.sin(x * self.freq_bands), torch.cos(x * self.freq_bands)], dim=-1) # (..., d, 2f+1)
code = code.reshape(shape)
return code
def safe_norm(x, dim=-1, keepdim=False, eps=1e-8, sqrt=True):
out = torch.clamp(torch.sum(torch.square(x), dim=dim, keepdim=keepdim), min=eps)
return torch.sqrt(out) if sqrt else out
def normalize_vector(v, dim, eps=1e-6):
return v / (torch.linalg.norm(v, ord=2, dim=dim, keepdim=True) + eps)
def project_v2v(v, e, dim):
"""
Description:
Project vector `v` onto vector `e`.
Args:
v: (N, L, 3).
e: (N, L, 3).
"""
return (e * v).sum(dim=dim, keepdim=True) * e
def construct_3d_basis(center, p1, p2):
"""
Args:
center: (N, L, 3), usually the position of C_alpha.
p1: (N, L, 3), usually the position of C.
p2: (N, L, 3), usually the position of N.
Returns
A batch of orthogonal basis matrix, (N, L, 3, 3cols_index).
The matrix is composed of 3 column vectors: [e1, e2, e3].
"""
v1 = p1 - center # (N, L, 3)
e1 = normalize_vector(v1, dim=-1)
v2 = p2 - center # (N, L, 3)
u2 = v2 - project_v2v(v2, e1, dim=-1)
e2 = normalize_vector(u2, dim=-1)
e3 = torch.cross(e1, e2, dim=-1) # (N, L, 3)
mat = torch.cat([
e1.unsqueeze(-1), e2.unsqueeze(-1), e3.unsqueeze(-1)
], dim=-1) # (N, L, 3, 3_index)
return mat
def local_to_global(R, t, p):
"""
Description:
Convert local (internal) coordinates to global (external) coordinates q.
q <- Rp + t
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
p: Local coordinates, (N, L, ..., 3).
Returns:
q: Global coordinates, (N, L, ..., 3).
"""
assert p.size(-1) == 3
p_size = p.size()
N, L = p_size[0], p_size[1]
p = p.view(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
q = torch.matmul(R, p) + t.unsqueeze(-1) # (N, L, 3, *)
q = q.transpose(-1, -2).reshape(p_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return q
def global_to_local(R, t, q):
"""
Description:
Convert global (external) coordinates q to local (internal) coordinates p.
p <- R^{T}(q - t)
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
q: Global coordinates, (N, L, ..., 3).
Returns:
p: Local coordinates, (N, L, ..., 3).
"""
assert q.size(-1) == 3
q_size = q.size()
N, L = q_size[0], q_size[1]
q = q.reshape(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
if t is None:
p = torch.matmul(R.transpose(-1, -2), q) # (N, L, 3, *)
else:
p = torch.matmul(R.transpose(-1, -2), (q - t.unsqueeze(-1))) # (N, L, 3, *)
p = p.transpose(-1, -2).reshape(q_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return p
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import argparse
import torch
from models.predictor import DDGPredictor
from utils.misc import *
from utils.data import *
from utils.protein import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('wt_pdb', type=str)
parser.add_argument('mut_pdb', type=str)
parser.add_argument('--model', type=str, default='./data/model.pt')
parser.add_argument('--device', type=str, default='cuda')
args = parser.parse_args()
batch = load_wt_mut_pdb_pair(args.wt_pdb, args.mut_pdb)
batch = recursive_to(batch, args.device)
ckpt = torch.load(args.model)
config = ckpt['config']
weight = ckpt['model']
model = DDGPredictor(config.model).to(args.device)
model.load_state_dict(weight)
with torch.no_grad():
model.eval()
pred = model(batch['wt'], batch['mut'])
print('Predicted ddG: %.2f' % pred.item())
|
from setuptools import setup, find_packages
setup(
name = 'aoa_pytorch',
packages = find_packages(exclude=['examples']),
version = '0.0.2',
license='MIT',
description = 'Attention on Attention - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/SAoA-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'visual question answering'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from aoa_pytorch.aoa_pytorch import AttentionOnAttention
AoA = AttentionOnAttention
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class AttentionOnAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
aoa_dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.dropout = nn.Dropout(dropout)
self.aoa = nn.Sequential(
nn.Linear(2 * inner_dim, 2 * dim),
nn.GLU(),
nn.Dropout(aoa_dropout)
)
def forward(self, x, context = None):
h = self.heads
q_ = self.to_q(x)
context = default(context, x)
kv = self.to_kv(context).chunk(2, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q_, *kv))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# attention
attn = dots.softmax(dim = -1)
attn = self.dropout(attn)
# weighted average of values
attn_out = einsum('b h i j, b h j d -> b h i d', attn, v)
# concat heads
out = rearrange(attn_out, 'b h n d -> b n (h d)', h = h)
# attention on attention
out = self.aoa(torch.cat((out, q_), dim = -1))
return out
|
from setuptools import setup, find_packages
setup(
name = 'autoregressive-linear-attention-cuda',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Autoregressive Linear Attention CUDA kernel',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/autoregressive-linear-attention-cuda',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'linear attention',
'cuda'
],
install_requires=[
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from setuptools import setup, find_packages
setup(
name = 'adjacent-attention-pytorch',
packages = find_packages(),
version = '0.0.12',
license='MIT',
description = 'Adjacent Attention Network - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/adjacent-attention-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'graph neural network',
'transformers'
],
install_requires=[
'einops>=0.3',
'torch>=1.6',
'isab-pytorch<0.2'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from adjacent_attention_network.adjacent_attention_network import AdjacentAttentionNetwork
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from isab_pytorch import ISAB
# helpers
def exists(val):
return val is not None
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, **kwargs):
return self.net(x)
# adjacent attention class
class AdjacentAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 4,
dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.null_k = nn.Parameter(torch.randn(heads, dim_head))
self.null_v = nn.Parameter(torch.randn(heads, dim_head))
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
adj_kv_indices,
mask
):
b, n, d, h = *x.shape, self.heads
flat_indices = repeat(adj_kv_indices, 'b n a -> (b h) (n a)', h = h)
# derive query, key, value
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# gather keys and values according to adjacency matrix
k, v = map(lambda t: rearrange(t, 'b h n d -> (b h) n d'), (k, v))
k = batched_index_select(k, flat_indices)
v = batched_index_select(v, flat_indices)
k, v = map(lambda t: rearrange(t, '(b h) (n a) d -> b h n a d', h = h, n = n), (k, v))
# add null key / value, so a node can attend to nothing
# have come across this in GNN literature as some other name
nk, nv = map(lambda t: rearrange(t, 'h d -> () h () () d').expand(b, -1, n, 1, -1), (self.null_k, self.null_v))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
mask = F.pad(mask, (1, 0), value = 1)
# similarity of each node to its neighbors
sim = einsum('b h n d, b h n a d -> b h n a', q, k) * self.scale
# mask out neighbors that are just padding
mask_value = -torch.finfo(sim.dtype).max
mask = rearrange(mask.bool(), 'b n a -> b () n a')
sim.masked_fill_(~mask.bool(), mask_value)
# attention
attn = sim.softmax(dim = -1)
# dropout
attn = self.dropout(attn)
# get weighted average of the values of all neighbors
out = einsum('b h n a, b h n a d -> b h n d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
# combine output
return self.to_out(out)
# adjacent network (layers of adjacent attention)
class AdjacentAttentionNetwork(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 4,
num_neighbors_cutoff = None,
num_global_nodes = 0,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.num_neighbors_cutoff = num_neighbors_cutoff
self.layers = nn.ModuleList([])
for _ in range(depth):
global_attn = PreNorm(dim, ISAB(
dim = dim,
heads = heads,
num_induced_points = num_global_nodes
)) if num_global_nodes > 0 else None
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, AdjacentAttention(
dim = dim,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout
))),
global_attn,
Residual(PreNorm(dim, FeedForward(
dim = dim,
dropout = ff_dropout
)))
]))
def forward(self, x, adjacency_mat, mask = None):
device, n = x.device, x.shape[1]
diag = torch.eye(adjacency_mat.shape[-1], device = device).bool()
adjacency_mat |= diag # nodes should pay attention itself (self-interacting)
# zero out points on adjacency matrix
# where the nodes are just padding
if exists(mask):
adjacency_mat &= (mask[:, :, None] * mask[:, None, :])
adj_mat = adjacency_mat.float()
# if we don't set a hard limit to the number of neighbors:
# - get the maximum number of neighbors and pad the rest of the nodes with less than that number of neighbors
# else:
# - randomly sample the cutoff number of neighbors for any node that exceeds the max
# - this would be similar to random sparse attention (bigbird)
# get the maximum number of neighbors
max_neighbors = int(adj_mat.sum(dim = -1).max())
if exists(self.num_neighbors_cutoff) and max_neighbors > self.num_neighbors_cutoff:
# to randomly sample the neighbors, add a small uniform noise to the mask and topk
noise = torch.empty((n, n), device = device).uniform_(-0.01, 0.01)
adj_mat = adj_mat + noise
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = self.num_neighbors_cutoff)
# cast the mask back to 0s and 1s
adj_mask = (adj_mask > 0.5).float()
else:
# todo - get distribution of number of neighbors, and strategically break up attention (message passing) to multiple steps
# - start with a bimodal num neighbors test case, then generalize
# use topk to get all the neighbors
# also pass the mask into the attention, as some neighbors will be just padding and not actually neighbors
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = max_neighbors)
for attn, global_attn, ff in self.layers:
x = attn(
x,
adj_kv_indices = adj_kv_indices,
mask = adj_mask
)
if exists(global_attn):
out, _ = global_attn(x, mask = mask)
x = x + out
x = ff(x)
return x
|
from setuptools import setup, find_packages
setup(
name = 'chroma-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Chroma - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/chroma-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion',
'protein design'
],
install_requires=[
'einops>=0.6',
'invariant-point-attention',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import torch
import os
import logging
from transformers import AutoTokenizer, AutoModelForMaskedLM, logging
from tf_bind_transformer.cache_utils import cache_fn, run_once
logging.set_verbosity_error()
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
CONTEXT_EMBED_USE_CPU = os.getenv('CONTEXT_EMBED_USE_CPU', None) is not None
if CONTEXT_EMBED_USE_CPU:
print('calculating context embed only on cpu')
MODELS = dict(
pubmed = dict(
dim = 768,
path = 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract',
)
)
GLOBAL_VARIABLES = dict(model = None, tokenizer = None)
def get_contextual_dim(model_name):
assert model_name in MODELS
return MODELS[model_name]['dim']
@run_once('init_transformer')
def init_transformer(model_name):
path = MODELS[model_name]['path']
GLOBAL_VARIABLES['tokenizer'] = AutoTokenizer.from_pretrained(path)
model = AutoModelForMaskedLM.from_pretrained(path)
if not CONTEXT_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = model
@torch.no_grad()
def tokenize_text(
text,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True
):
init_transformer(model_name)
model = GLOBAL_VARIABLES['model']
tokenizer = GLOBAL_VARIABLES['tokenizer']
encoding = tokenizer.batch_encode_plus(
[text],
add_special_tokens = True,
padding = True,
truncation = True,
max_length = max_length,
return_attention_mask = True,
return_tensors = 'pt'
)
if not CONTEXT_EMBED_USE_CPU:
encoding = map_values(lambda t: t.cuda(), encoding)
model.eval()
with torch.no_grad():
outputs = model(**encoding, output_hidden_states = True)
hidden_state = outputs.hidden_states[hidden_state_index][0]
if return_cls_token:
return hidden_state[0]
return hidden_state.mean(dim = 0)
def get_text_repr(
texts,
*,
device,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True,
):
assert model_name in MODELS, f'{model_name} not found in available text transformers to use'
if isinstance(texts, str):
texts = [texts]
get_context_repr_fn = cache_fn(tokenize_text, path = f'contexts/{model_name}')
representations = [get_context_repr_fn(text, max_length = max_length, model_name = model_name, hidden_state_index = hidden_state_index, return_cls_token = return_cls_token) for text in texts]
return torch.stack(representations).to(device)
|
from chroma_pytorch.chroma_pytorch import Chroma
|
import torch
from torch import nn, einsum
from einops import rearrange, repeat
import math
from pathlib import Path
from random import random
from functools import partial
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
from torch.special import expm1
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torchvision import transforms as T, utils
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from tqdm.auto import tqdm
from ema_pytorch import EMA
from accelerate import Accelerator
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
resnet_block_groups = 8,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
self.channels = channels
input_channels = channels * 2
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinusoidal_dim)
fourier_dim = learned_sinusoidal_dim + 1
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, channels, 1)
def forward(self, x, time, x_self_cond = None):
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# chroma class
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class Chroma(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
use_ddim = False,
noise_schedule = 'cosine',
time_difference = 0.
):
super().__init__()
self.model = model
self.channels = self.model.channels
self.image_size = image_size
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.timesteps = timesteps
self.use_ddim = use_ddim
# proposed in the paper, summed to time_next
# as a way to fix a deficiency in self-conditioning and lower FID when the number of sampling timesteps is < 400
self.time_difference = time_difference
@property
def device(self):
return next(self.model.parameters()).device
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
@torch.no_grad()
def ddpm_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device=device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step', total = self.timesteps):
# add the time delay
time_next = (time_next - self.time_difference).clamp(min = 0.)
noise_cond = self.log_snr(time)
# get predicted x0
x_start = self.model(img, noise_cond, x_start)
# clip x0
x_start.clamp_(-1., 1.)
# get log(snr)
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
# get alpha sigma of time and next time
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# derive posterior mean and variance
c = -expm1(log_snr - log_snr_next)
mean = alpha_next * (img * (1 - c) / alpha + c * x_start)
variance = (sigma_next ** 2) * c
log_variance = log(variance)
# get noise
noise = torch.where(
rearrange(time_next > 0, 'b -> b 1 1 1'),
torch.randn_like(img),
torch.zeros_like(img)
)
img = mean + (0.5 * log_variance).exp() * noise
return img
@torch.no_grad()
def ddim_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device = device)
x_start = None
for times, times_next in tqdm(time_pairs, desc = 'sampling loop time step'):
# get times and noise levels
log_snr = self.log_snr(times)
log_snr_next = self.log_snr(times_next)
padded_log_snr, padded_log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(padded_log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(padded_log_snr_next)
# add the time delay
times_next = (times_next - time_difference).clamp(min = 0.)
# predict x0
x_start = self.model(img, log_snr, x_start)
# clip x0
x_start.clamp_(-1., 1.)
# get predicted noise
pred_noise = (img - alpha * x_start) / sigma.clamp(min = 1e-8)
# calculate x next
img = x_start * alpha_next + pred_noise * sigma_next
return img
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.ddpm_sample if not self.use_ddim else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
def forward(self, img, *args, **kwargs):
batch, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
# sample random times
times = torch.zeros((batch,), device = device).float().uniform_(0, 1.)
# noise sample
noise = torch.randn_like(img)
noise_level = self.log_snr(times)
padded_noise_level = right_pad_dims_to(img, noise_level)
alpha, sigma = log_snr_to_alpha_sigma(padded_noise_level)
noised_img = alpha * img + sigma * noise
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
self_cond = None
if random() < 0.5:
with torch.no_grad():
self_cond = self.model(noised_img, noise_level).detach_()
# predict and take gradient step
pred = self.model(noised_img, noise_level, self_cond)
return F.mse_loss(pred, img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'))
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
self.step += 1
pbar.update(1)
accelerator.print('training complete')
|
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['big_sleep']
from version import __version__
setup(
name = 'big-sleep',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'dream = big_sleep.cli:main',
],
},
version = __version__,
license='MIT',
description = 'Big Sleep',
author = 'Ryan Murdock, Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/big-sleep',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'text to image',
'generative adversarial networks'
],
install_requires=[
'torch>=1.7.1',
'einops>=0.3',
'fire',
'ftfy',
'pytorch-pretrained-biggan',
'regex',
'torchvision>=0.8.2',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import time
import shutil
import torch
from big_sleep import Imagine
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
num_attempts = 4
for attempt in range(num_attempts):
dream = Imagine(
text = "an armchair in the form of pikachu\\an armchair imitating pikachu\\abstract",
text_min = "blur\\zoom",
lr = 7e-2,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 5,
iterations = 50,
save_progress = False,
bilinear = False,
open_folder = False,
seed = None,
torch_deterministic = False,
max_classes = 20,
class_temperature = 2.,
save_date_time = False,
save_best = True,
experimental_resample = True,
ema_decay = 0.99
)
dream()
shutil.copy(dream.textpath + ".best.png", f"{attempt}.png")
try:
time.sleep(2)
del dream
time.sleep(2)
torch.cuda.empty_cache()
except Exception:
torch.cuda.empty_cache()
|
__version__ = '0.9.1'
|
"""Good differentiable image resampling for PyTorch."""
from functools import update_wrapper
import math
import torch
from torch.nn import functional as F
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def odd(fn):
return update_wrapper(lambda x: torch.sign(x) * fn(abs(x)), fn)
def _to_linear_srgb(input):
cond = input <= 0.04045
a = input / 12.92
b = ((input + 0.055) / 1.055)**2.4
return torch.where(cond, a, b)
def _to_nonlinear_srgb(input):
cond = input <= 0.0031308
a = 12.92 * input
b = 1.055 * input**(1/2.4) - 0.055
return torch.where(cond, a, b)
to_linear_srgb = odd(_to_linear_srgb)
to_nonlinear_srgb = odd(_to_nonlinear_srgb)
def resample(input, size, align_corners=True, is_srgb=False):
n, c, h, w = input.shape
dh, dw = size
if is_srgb:
input = to_linear_srgb(input)
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 3), 3).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 3), 3).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
input = F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
if is_srgb:
input = to_nonlinear_srgb(input)
return input
|
# Exponential Moving Average (from https://gist.github.com/crowsonkb/76b94d5238272722290734bf4725d204)
"""Exponential moving average for PyTorch. Adapted from
https://www.zijianhu.com/post/pytorch/ema/ by crowsonkb
"""
from copy import deepcopy
import torch
from torch import nn
class EMA(nn.Module):
def __init__(self, model, decay):
super().__init__()
self.model = model
self.decay = decay
self.register_buffer('accum', torch.tensor(1.))
self._biased = deepcopy(self.model)
self.average = deepcopy(self.model)
for param in self._biased.parameters():
param.detach_().zero_()
for param in self.average.parameters():
param.detach_().zero_()
self.update()
@torch.no_grad()
def update(self):
assert self.training, 'Update should only be called during training'
self.accum *= self.decay
model_params = dict(self.model.named_parameters())
biased_params = dict(self._biased.named_parameters())
average_params = dict(self.average.named_parameters())
assert model_params.keys() == biased_params.keys() == average_params.keys(), f'Model parameter keys incompatible with EMA stored parameter keys'
for name, param in model_params.items():
biased_params[name].mul_(self.decay)
biased_params[name].add_((1 - self.decay) * param)
average_params[name].copy_(biased_params[name])
average_params[name].div_(1 - self.accum)
model_buffers = dict(self.model.named_buffers())
biased_buffers = dict(self._biased.named_buffers())
average_buffers = dict(self.average.named_buffers())
assert model_buffers.keys() == biased_buffers.keys() == average_buffers.keys()
for name, buffer in model_buffers.items():
biased_buffers[name].copy_(buffer)
average_buffers[name].copy_(buffer)
def forward(self, *args, **kwargs):
if self.training:
return self.model(*args, **kwargs)
return self.average(*args, **kwargs)
|
from big_sleep.big_sleep import BigSleep, Imagine
|
# this code is a copy from huggingface
# with some minor modifications
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import json
import copy
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BIGGAN_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
Path.home() / '.pytorch_pretrained_biggan'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BIGGAN_CACHE = os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_biggan'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PRETRAINED_MODEL_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-pytorch_model.bin",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-pytorch_model.bin",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-config.json",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-config.json",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-config.json",
}
WEIGHTS_NAME = 'pytorch_model.bin'
CONFIG_NAME = 'config.json'
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
class BigGANConfig(object):
""" Configuration class to store the configuration of a `BigGAN`.
Defaults are for the 128x128 model.
layers tuple are (up-sample in the layer ?, input channels, output channels)
"""
def __init__(self,
output_dim=128,
z_dim=128,
class_embed_dim=128,
channel_width=128,
num_classes=1000,
layers=[(False, 16, 16),
(True, 16, 16),
(False, 16, 16),
(True, 16, 8),
(False, 8, 8),
(True, 8, 4),
(False, 4, 4),
(True, 4, 2),
(False, 2, 2),
(True, 2, 1)],
attention_layer_position=8,
eps=1e-4,
n_stats=51):
"""Constructs BigGANConfig. """
self.output_dim = output_dim
self.z_dim = z_dim
self.class_embed_dim = class_embed_dim
self.channel_width = channel_width
self.num_classes = num_classes
self.layers = layers
self.attention_layer_position = attention_layer_position
self.eps = eps
self.n_stats = n_stats
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BigGANConfig` from a Python dictionary of parameters."""
config = BigGANConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BigGANConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def snconv2d(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Conv2d(**kwargs), eps=eps)
def snlinear(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Linear(**kwargs), eps=eps)
def sn_embedding(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Embedding(**kwargs), eps=eps)
class SelfAttn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels, eps=1e-12):
super(SelfAttn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_o_conv = snconv2d(in_channels=in_channels//2, out_channels=in_channels,
kernel_size=1, bias=False, eps=eps)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g - o_conv
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_o_conv(attn_g)
# Out
out = x + self.gamma*attn_g
return out
class BigGANBatchNorm(nn.Module):
""" This is a batch norm module that can handle conditional input and can be provided with pre-computed
activation means and variances for various truncation parameters.
We cannot just rely on torch.batch_norm since it cannot handle
batched weights (pytorch 1.0.1). We computate batch_norm our-self without updating running means and variances.
If you want to train this model you should add running means and variance computation logic.
"""
def __init__(self, num_features, condition_vector_dim=None, n_stats=51, eps=1e-4, conditional=True):
super(BigGANBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.conditional = conditional
# We use pre-computed statistics for n_stats values of truncation between 0 and 1
self.register_buffer('running_means', torch.zeros(n_stats, num_features))
self.register_buffer('running_vars', torch.ones(n_stats, num_features))
self.step_size = 1.0 / (n_stats - 1)
if conditional:
assert condition_vector_dim is not None
self.scale = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
self.offset = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
else:
self.weight = torch.nn.Parameter(torch.Tensor(num_features))
self.bias = torch.nn.Parameter(torch.Tensor(num_features))
def forward(self, x, truncation, condition_vector=None):
# Retreive pre-computed statistics associated to this truncation
coef, start_idx = math.modf(truncation / self.step_size)
start_idx = int(start_idx)
if coef != 0.0: # Interpolate
running_mean = self.running_means[start_idx] * coef + self.running_means[start_idx + 1] * (1 - coef)
running_var = self.running_vars[start_idx] * coef + self.running_vars[start_idx + 1] * (1 - coef)
else:
running_mean = self.running_means[start_idx]
running_var = self.running_vars[start_idx]
if self.conditional:
running_mean = running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
running_var = running_var.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
weight = 1 + self.scale(condition_vector).unsqueeze(-1).unsqueeze(-1)
bias = self.offset(condition_vector).unsqueeze(-1).unsqueeze(-1)
out = (x - running_mean) / torch.sqrt(running_var + self.eps) * weight + bias
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias,
training=False, momentum=0.0, eps=self.eps)
return out
class GenBlock(nn.Module):
def __init__(self, in_size, out_size, condition_vector_dim, reduction_factor=4, up_sample=False,
n_stats=51, eps=1e-12):
super(GenBlock, self).__init__()
self.up_sample = up_sample
self.drop_channels = (in_size != out_size)
middle_size = in_size // reduction_factor
self.bn_0 = BigGANBatchNorm(in_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_0 = snconv2d(in_channels=in_size, out_channels=middle_size, kernel_size=1, eps=eps)
self.bn_1 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_1 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_2 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_2 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_3 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_3 = snconv2d(in_channels=middle_size, out_channels=out_size, kernel_size=1, eps=eps)
self.relu = nn.ReLU()
def forward(self, x, cond_vector, truncation):
x0 = x
x = self.bn_0(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_0(x)
x = self.bn_1(x, truncation, cond_vector)
x = self.relu(x)
if self.up_sample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv_1(x)
x = self.bn_2(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_2(x)
x = self.bn_3(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_3(x)
if self.drop_channels:
new_channels = x0.shape[1] // 2
x0 = x0[:, :new_channels, ...]
if self.up_sample:
x0 = F.interpolate(x0, scale_factor=2, mode='nearest')
out = x + x0
return out
class Generator(nn.Module):
def __init__(self, config):
super(Generator, self).__init__()
self.config = config
ch = config.channel_width
condition_vector_dim = config.z_dim * 2
self.gen_z = snlinear(in_features=condition_vector_dim,
out_features=4 * 4 * 16 * ch, eps=config.eps)
layers = []
for i, layer in enumerate(config.layers):
if i == config.attention_layer_position:
layers.append(SelfAttn(ch*layer[1], eps=config.eps))
layers.append(GenBlock(ch*layer[1],
ch*layer[2],
condition_vector_dim,
up_sample=layer[0],
n_stats=config.n_stats,
eps=config.eps))
self.layers = nn.ModuleList(layers)
self.bn = BigGANBatchNorm(ch, n_stats=config.n_stats, eps=config.eps, conditional=False)
self.relu = nn.ReLU()
self.conv_to_rgb = snconv2d(in_channels=ch, out_channels=ch, kernel_size=3, padding=1, eps=config.eps)
self.tanh = nn.Tanh()
def forward(self, cond_vector, truncation):
z = self.gen_z(cond_vector[0].unsqueeze(0))
# We use this conversion step to be able to use TF weights:
# TF convention on shape is [batch, height, width, channels]
# PT convention on shape is [batch, channels, height, width]
z = z.view(-1, 4, 4, 16 * self.config.channel_width)
z = z.permute(0, 3, 1, 2).contiguous()
next_available_latent_index = 1
for layer in self.layers:
if isinstance(layer, GenBlock):
z = layer(z, cond_vector[next_available_latent_index].unsqueeze(0), truncation)
next_available_latent_index += 1
else:
z = layer(z)
z = self.bn(z, truncation)
z = self.relu(z)
z = self.conv_to_rgb(z)
z = z[:, :3, ...]
z = self.tanh(z)
return z
class BigGAN(nn.Module):
"""BigGAN Generator."""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
model_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
model_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
try:
resolved_model_file = cached_path(model_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error("Wrong model name, should be a valid path to a folder containing "
"a {} file and a {} file or a model name in {}".format(
WEIGHTS_NAME, CONFIG_NAME, PRETRAINED_MODEL_ARCHIVE_MAP.keys()))
raise
logger.info("loading model {} from cache at {}".format(pretrained_model_name_or_path, resolved_model_file))
# Load config
config = BigGANConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
state_dict = torch.load(resolved_model_file, map_location='cpu' if not torch.cuda.is_available() else None)
model.load_state_dict(state_dict, strict=False)
return model
def __init__(self, config):
super(BigGAN, self).__init__()
self.config = config
self.embeddings = nn.Linear(config.num_classes, config.z_dim, bias=False)
self.generator = Generator(config)
def forward(self, z, class_label, truncation):
assert 0 < truncation <= 1
embed = self.embeddings(class_label)
cond_vector = torch.cat((z, embed), dim=1)
z = self.generator(cond_vector, truncation)
return z
|
import fire
import random as rnd
from big_sleep import Imagine, version
from pathlib import Path
from .version import __version__;
def train(
text=None,
img=None,
text_min="",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
epochs = 20,
iterations = 1050,
save_every = 50,
overwrite = False,
save_progress = False,
save_date_time = False,
bilinear = False,
open_folder = True,
seed = 0,
append_seed = False,
random = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_best = False,
experimental_resample = False,
ema_decay = 0.5,
num_cutouts = 128,
center_bias = False,
larger_model = False
):
print(f'Starting up... v{__version__}')
if random:
seed = rnd.randint(0, 1e6)
imagine = Imagine(
text=text,
img=img,
text_min=text_min,
lr = lr,
image_size = image_size,
gradient_accumulate_every = gradient_accumulate_every,
epochs = epochs,
iterations = iterations,
save_every = save_every,
save_progress = save_progress,
bilinear = bilinear,
seed = seed,
append_seed = append_seed,
torch_deterministic = torch_deterministic,
open_folder = open_folder,
max_classes = max_classes,
class_temperature = class_temperature,
save_date_time = save_date_time,
save_best = save_best,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_model
)
if not overwrite and imagine.filename.exists():
answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()
if answer not in ('yes', 'y'):
exit()
imagine()
def main():
fire.Fire(train)
|
import os
import sys
import subprocess
import signal
import string
import re
from datetime import datetime
from pathlib import Path
import random
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from torchvision.utils import save_image
import torchvision.transforms as T
from PIL import Image
from tqdm import tqdm, trange
from big_sleep.ema import EMA
from big_sleep.resample import resample
from big_sleep.biggan import BigGAN
from big_sleep.clip import load, tokenize
assert torch.cuda.is_available(), 'CUDA must be available in order to use Big Sleep'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum,frame):
print('detecting keyboard interrupt, gracefully exiting')
global terminate
terminate = True
signal.signal(signal.SIGINT,signal_handling)
# helpers
def exists(val):
return val is not None
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/','\\')]
if cmd_list == None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def create_text_path(text=None, img=None, encoding=None):
input_name = ""
if text is not None:
input_name += text
if img is not None:
if isinstance(img, str):
img_name = "".join(img.split(".")[:-1]) # replace spaces by underscores, remove img extension
img_name = img_name.split("/")[-1] # only take img name, not path
else:
img_name = "PIL_img"
input_name += "_" + img_name
if encoding is not None:
input_name = "your_encoding"
return input_name.replace("-", "_").replace(",", "").replace(" ", "_").replace("|", "--").strip('-_')[:255]
# tensor helpers
def differentiable_topk(x, k, temperature=1.):
n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == (k - 1)
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x = x.scatter(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(n, k, dim).sum(dim = 1)
def create_clip_img_transform(image_width):
clip_mean = [0.48145466, 0.4578275, 0.40821073]
clip_std = [0.26862954, 0.26130258, 0.27577711]
transform = T.Compose([
#T.ToPILImage(),
T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor(),
T.Normalize(mean=clip_mean, std=clip_std)
])
return transform
def rand_cutout(image, size, center_bias=False, center_focus=2):
width = image.shape[-1]
min_offset = 0
max_offset = width - size
if center_bias:
# sample around image center
center = max_offset / 2
std = center / center_focus
offset_x = int(random.gauss(mu=center, sigma=std))
offset_y = int(random.gauss(mu=center, sigma=std))
# resample uniformly if over boundaries
offset_x = random.randint(min_offset, max_offset) if (offset_x > max_offset or offset_x < min_offset) else offset_x
offset_y = random.randint(min_offset, max_offset) if (offset_y > max_offset or offset_y < min_offset) else offset_y
else:
offset_x = random.randint(min_offset, max_offset)
offset_y = random.randint(min_offset, max_offset)
cutout = image[:, :, offset_x:offset_x + size, offset_y:offset_y + size]
return cutout
# load biggan
class Latents(torch.nn.Module):
def __init__(
self,
num_latents = 15,
num_classes = 1000,
z_dim = 128,
max_classes = None,
class_temperature = 2.
):
super().__init__()
self.normu = torch.nn.Parameter(torch.zeros(num_latents, z_dim).normal_(std = 1))
self.cls = torch.nn.Parameter(torch.zeros(num_latents, num_classes).normal_(mean = -3.9, std = .3))
self.register_buffer('thresh_lat', torch.tensor(1))
assert not exists(max_classes) or max_classes > 0 and max_classes <= num_classes, f'max_classes must be between 0 and {num_classes}'
self.max_classes = max_classes
self.class_temperature = class_temperature
def forward(self):
if exists(self.max_classes):
classes = differentiable_topk(self.cls, self.max_classes, temperature = self.class_temperature)
else:
classes = torch.sigmoid(self.cls)
return self.normu, classes
class Model(nn.Module):
def __init__(
self,
image_size,
max_classes = None,
class_temperature = 2.,
ema_decay = 0.99
):
super().__init__()
assert image_size in (128, 256, 512), 'image size must be one of 128, 256, or 512'
self.biggan = BigGAN.from_pretrained(f'biggan-deep-{image_size}')
self.max_classes = max_classes
self.class_temperature = class_temperature
self.ema_decay\
= ema_decay
self.init_latents()
def init_latents(self):
latents = Latents(
num_latents = len(self.biggan.config.layers) + 1,
num_classes = self.biggan.config.num_classes,
z_dim = self.biggan.config.z_dim,
max_classes = self.max_classes,
class_temperature = self.class_temperature
)
self.latents = EMA(latents, self.ema_decay)
def forward(self):
self.biggan.eval()
out = self.biggan(*self.latents(), 1)
return (out + 1) / 2
class BigSleep(nn.Module):
def __init__(
self,
num_cutouts = 128,
loss_coef = 100,
image_size = 512,
bilinear = False,
max_classes = None,
class_temperature = 2.,
experimental_resample = False,
ema_decay = 0.99,
center_bias = False,
larger_clip = False
):
super().__init__()
self.loss_coef = loss_coef
self.image_size = image_size
self.num_cutouts = num_cutouts
self.experimental_resample = experimental_resample
self.center_bias = center_bias
self.interpolation_settings = {'mode': 'bilinear', 'align_corners': False} if bilinear else {'mode': 'nearest'}
model_name = 'ViT-B/32' if not larger_clip else 'ViT-L/14'
self.perceptor, self.normalize_image = load(model_name, jit = False)
self.model = Model(
image_size = image_size,
max_classes = max_classes,
class_temperature = class_temperature,
ema_decay = ema_decay
)
def reset(self):
self.model.init_latents()
def sim_txt_to_img(self, text_embed, img_embed, text_type="max"):
sign = -1
if text_type == "min":
sign = 1
return sign * self.loss_coef * torch.cosine_similarity(text_embed, img_embed, dim = -1).mean()
def forward(self, text_embeds, text_min_embeds=[], return_loss = True):
width, num_cutouts = self.image_size, self.num_cutouts
out = self.model()
if not return_loss:
return out
pieces = []
for ch in range(num_cutouts):
# sample cutout size
size = int(width * torch.zeros(1,).normal_(mean=.8, std=.3).clip(.5, .95))
# get cutout
apper = rand_cutout(out, size, center_bias=self.center_bias)
if (self.experimental_resample):
apper = resample(apper, (224, 224))
else:
apper = F.interpolate(apper, (224, 224), **self.interpolation_settings)
pieces.append(apper)
into = torch.cat(pieces)
into = self.normalize_image(into)
image_embed = self.perceptor.encode_image(into)
latents, soft_one_hot_classes = self.model.latents()
num_latents = latents.shape[0]
latent_thres = self.model.latents.model.thresh_lat
lat_loss = torch.abs(1 - torch.std(latents, dim=1)).mean() + \
torch.abs(torch.mean(latents, dim = 1)).mean() + \
4 * torch.max(torch.square(latents).mean(), latent_thres)
for array in latents:
mean = torch.mean(array)
diffs = array - mean
var = torch.mean(torch.pow(diffs, 2.0))
std = torch.pow(var, 0.5)
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0))
kurtoses = torch.mean(torch.pow(zscores, 4.0)) - 3.0
lat_loss = lat_loss + torch.abs(kurtoses) / num_latents + torch.abs(skews) / num_latents
cls_loss = ((50 * torch.topk(soft_one_hot_classes, largest = False, dim = 1, k = 999)[0]) ** 2).mean()
results = []
for txt_embed in text_embeds:
results.append(self.sim_txt_to_img(txt_embed, image_embed))
for txt_min_embed in text_min_embeds:
results.append(self.sim_txt_to_img(txt_min_embed, image_embed, "min"))
sim_loss = sum(results).mean()
return out, (lat_loss, cls_loss, sim_loss)
class Imagine(nn.Module):
def __init__(
self,
*,
text=None,
img=None,
encoding=None,
text_min = "",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 20,
iterations = 1050,
save_progress = False,
bilinear = False,
open_folder = True,
seed = None,
append_seed = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_date_time = False,
save_best = False,
experimental_resample = False,
ema_decay = 0.99,
num_cutouts = 128,
center_bias = False,
larger_clip = False
):
super().__init__()
if torch_deterministic:
assert not bilinear, 'the deterministic (seeded) operation does not work with interpolation (PyTorch 1.7.1)'
torch.set_deterministic(True)
self.seed = seed
self.append_seed = append_seed
if exists(seed):
print(f'setting seed of {seed}')
if seed == 0:
print('you can override this with --seed argument in the command line, or --random for a randomly chosen one')
torch.manual_seed(seed)
self.epochs = epochs
self.iterations = iterations
model = BigSleep(
image_size = image_size,
bilinear = bilinear,
max_classes = max_classes,
class_temperature = class_temperature,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_clip
).cuda()
self.model = model
self.lr = lr
self.optimizer = Adam(model.model.latents.model.parameters(), lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_progress = save_progress
self.save_date_time = save_date_time
self.save_best = save_best
self.current_best_score = 0
self.open_folder = open_folder
self.total_image_updates = (self.epochs * self.iterations) / self.save_every
self.encoded_texts = {
"max": [],
"min": []
}
# create img transform
self.clip_transform = create_clip_img_transform(224)
# create starting encoding
self.set_clip_encoding(text=text, img=img, encoding=encoding, text_min=text_min)
@property
def seed_suffix(self):
return f'.{self.seed}' if self.append_seed and exists(self.seed) else ''
def set_text(self, text):
self.set_clip_encoding(text = text)
def create_clip_encoding(self, text=None, img=None, encoding=None):
self.text = text
self.img = img
if encoding is not None:
encoding = encoding.cuda()
#elif self.create_story:
# encoding = self.update_story_encoding(epoch=0, iteration=1)
elif text is not None and img is not None:
encoding = (self.create_text_encoding(text) + self.create_img_encoding(img)) / 2
elif text is not None:
encoding = self.create_text_encoding(text)
elif img is not None:
encoding = self.create_img_encoding(img)
return encoding
def create_text_encoding(self, text):
tokenized_text = tokenize(text).cuda()
with torch.no_grad():
text_encoding = self.model.perceptor.encode_text(tokenized_text).detach()
return text_encoding
def create_img_encoding(self, img):
if isinstance(img, str):
img = Image.open(img)
normed_img = self.clip_transform(img).unsqueeze(0).cuda()
with torch.no_grad():
img_encoding = self.model.perceptor.encode_image(normed_img).detach()
return img_encoding
def encode_multiple_phrases(self, text, img=None, encoding=None, text_type="max"):
if text is not None and "|" in text:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=prompt_min, img=img, encoding=encoding) for prompt_min in text.split("|")]
else:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=text, img=img, encoding=encoding)]
def encode_max_and_min(self, text, img=None, encoding=None, text_min=""):
self.encode_multiple_phrases(text, img=img, encoding=encoding)
if text_min is not None and text_min != "":
self.encode_multiple_phrases(text_min, img=img, encoding=encoding, text_type="min")
def set_clip_encoding(self, text=None, img=None, encoding=None, text_min=""):
self.current_best_score = 0
self.text = text
self.text_min = text_min
if len(text_min) > 0:
text = text + "_wout_" + text_min[:255] if text is not None else "wout_" + text_min[:255]
text_path = create_text_path(text=text, img=img, encoding=encoding)
if self.save_date_time:
text_path = datetime.now().strftime("%y%m%d-%H%M%S-") + text_path
self.text_path = text_path
self.filename = Path(f'./{text_path}{self.seed_suffix}.png')
self.encode_max_and_min(text, img=img, encoding=encoding, text_min=text_min) # Tokenize and encode each prompt
def reset(self):
self.model.reset()
self.model = self.model.cuda()
self.optimizer = Adam(self.model.model.latents.parameters(), self.lr)
def train_step(self, epoch, i, pbar=None):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
loss = sum(losses) / self.gradient_accumulate_every
total_loss += loss
loss.backward()
self.optimizer.step()
self.model.model.latents.update()
self.optimizer.zero_grad()
if (i + 1) % self.save_every == 0:
with torch.no_grad():
self.model.model.latents.eval()
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
top_score, best = torch.topk(losses[2], k=1, largest=False)
image = self.model.model()[best].cpu()
self.model.model.latents.train()
save_image(image, str(self.filename))
if pbar is not None:
pbar.update(1)
else:
print(f'image updated at "./{str(self.filename)}"')
if self.save_progress:
total_iterations = epoch * self.iterations + i
num = total_iterations // self.save_every
save_image(image, Path(f'./{self.text_path}.{num}{self.seed_suffix}.png'))
if self.save_best and top_score.item() < self.current_best_score:
self.current_best_score = top_score.item()
save_image(image, Path(f'./{self.text_path}{self.seed_suffix}.best.png'))
return out, total_loss
def forward(self):
penalizing = ""
if len(self.text_min) > 0:
penalizing = f'penalizing "{self.text_min}"'
print(f'Imagining "{self.text_path}" {penalizing}...')
with torch.no_grad():
self.model(self.encoded_texts["max"][0]) # one warmup step due to issue with CLIP and CUDA
if self.open_folder:
open_folder('./')
self.open_folder = False
image_pbar = tqdm(total=self.total_image_updates, desc='image update', position=2, leave=True)
epoch_pbar = trange(self.epochs, desc = ' epochs', position=0, leave=True)
for epoch in (ep for ep in epoch_pbar if not terminate):
pbar = trange(self.iterations, desc=' iteration', position=1, leave=True)
image_pbar.update(0)
for i in (it for it in pbar if not terminate):
out, loss = self.train_step(epoch, i, image_pbar)
pbar.set_description(f'loss: {loss.item():04.2f}')
|
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from pathlib import Path
import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform():
return Compose([
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
import gzip
_tokenizer = SimpleTokenizer()
|
from setuptools import setup, find_packages
setup(
name = 'axial_positional_embedding',
packages = find_packages(),
version = '0.2.1',
license='MIT',
description = 'Axial Positional Embedding',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/axial-positional-embedding',
keywords = ['transformers', 'artificial intelligence'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import torch
from torch import nn
from operator import mul
from functools import reduce
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
self.dim = dim
self.shape = axial_shape
self.max_seq_len = reduce(mul, axial_shape, 1)
self.summed = axial_dims is None
axial_dims = ((dim,) * len(axial_shape)) if self.summed else axial_dims
assert len(self.shape) == len(axial_dims), 'number of axial dimensions must equal the number of dimensions in the shape'
assert self.summed or not self.summed and sum(axial_dims) == dim, f'axial dimensions must sum up to the target dimension {dim}'
self.weights = ParameterList(self, 'weights', len(axial_shape))
for ind, (shape, axial_dim) in enumerate(zip(self.shape, axial_dims)):
ax_shape = [1] * len(self.shape)
ax_shape[ind] = shape
ax_shape = (1, *ax_shape, axial_dim)
ax_emb = nn.Parameter(torch.zeros(ax_shape).normal_(0, 1))
self.weights.append(ax_emb)
def forward(self, x):
b, t, e = x.shape
assert (t <= self.max_seq_len), f'Sequence length ({t}) must be less than the maximum sequence length allowed ({self.max_seq_len})'
embs = []
for ax_emb in self.weights.to_list():
axial_dim = ax_emb.shape[-1]
expand_shape = (b, *self.shape, axial_dim)
emb = ax_emb.expand(expand_shape).reshape(b, self.max_seq_len, axial_dim)
embs.append(emb)
pos_emb = sum(embs) if self.summed else torch.cat(embs, dim=-1)
return pos_emb[:, :t].to(x)
# a mock parameter list object until below issue is resolved
# https://github.com/pytorch/pytorch/issues/36035
class ParameterList(object):
def __init__(self, kls, prefix, length):
self.ind = 0
self.kls = kls
self.prefix = prefix
self.length = length
def _keyname(self, prefix, ind):
return f'{prefix}_{ind}'
def append(self, x):
setattr(self.kls, self._keyname(self.prefix, self.ind), x)
self.ind += 1
def to_list(self):
return [getattr(self.kls, self._keyname(self.prefix, i)) for i in range(self.length)]
# Axial Positional Embedding for Images
class AxialPositionalEmbeddingImage(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
assert len(axial_shape) == 2, 'Axial shape must have 2 dimensions for images'
self.pos_emb = AxialPositionalEmbedding(dim, axial_shape, axial_dims)
def forward(self, img):
b, c, h, w = img.shape
img = img.permute(0, 2, 3, 1).reshape(b, h * w, c)
pos_emb = self.pos_emb(img)
return pos_emb.reshape(b, h, w, c).permute(0, 3, 1, 2)
|
from axial_positional_embedding.axial_positional_embedding import AxialPositionalEmbedding, AxialPositionalEmbeddingImage
|
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
import torch.nn.functional as F
from einops import rearrange
import sidechainnet as scn
from alphafold2_pytorch import Alphafold2
import alphafold2_pytorch.constants as constants
from alphafold2_pytorch.utils import get_bucketed_distance_matrix
# constants
DEVICE = None # defaults to cuda if available, else cpu
NUM_BATCHES = int(1e5)
GRADIENT_ACCUMULATE_EVERY = 16
LEARNING_RATE = 3e-4
IGNORE_INDEX = -100
THRESHOLD_LENGTH = 250
# set device
DISTOGRAM_BUCKETS = constants.DISTOGRAM_BUCKETS
DEVICE = constants.DEVICE
# helpers
def cycle(loader, cond = lambda x: True):
while True:
for data in loader:
if not cond(data):
continue
yield data
# get data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = 1,
dynamic_batching = False
)
data = iter(data['train'])
data_cond = lambda t: t[1].shape[1] < THRESHOLD_LENGTH
dl = cycle(data, data_cond)
# model
model = Alphafold2(
dim = 256,
depth = 1,
heads = 8,
dim_head = 64
).to(DEVICE)
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training loop
for _ in range(NUM_BATCHES):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seq, coords, mask = batch.seqs, batch.crds, batch.msks
b, l, _ = seq.shape
# prepare mask, labels
seq, coords, mask = seq.argmax(dim = -1).to(DEVICE), coords.to(DEVICE), mask.to(DEVICE).bool()
coords = rearrange(coords, 'b (l c) d -> b l c d', l = l)
discretized_distances = get_bucketed_distance_matrix(coords[:, :, 1], mask, DISTOGRAM_BUCKETS, IGNORE_INDEX)
# predict
distogram = model(seq, mask = mask)
distogram = rearrange(distogram, 'b i j c -> b c i j')
# loss
loss = F.cross_entropy(
distogram,
discretized_distances,
ignore_index = IGNORE_INDEX
)
loss.backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
|
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
import torch.nn.functional as F
from einops import rearrange
# data
import sidechainnet as scn
from sidechainnet.sequence.utils import VOCAB
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES
# models
from alphafold2_pytorch import Alphafold2
import alphafold2_pytorch.constants as constants
from se3_transformer_pytorch import SE3Transformer
from alphafold2_pytorch.utils import *
# constants
FEATURES = "esm" # one of ["esm", "msa", "msa_transformer", None]
DEVICE = None # defaults to cuda if available, else cpu
NUM_BATCHES = int(1e5)
GRADIENT_ACCUMULATE_EVERY = 16
LEARNING_RATE = 3e-4
IGNORE_INDEX = -100
THRESHOLD_LENGTH = 250
TO_PDB = False
SAVE_DIR = ""
# set device
DEVICE = constants.DEVICE
DISTOGRAM_BUCKETS = constants.DISTOGRAM_BUCKETS
# set emebdder model from esm if appropiate - Load ESM-1b model
if FEATURES == "esm":
# from pytorch hub (almost 30gb)
embedd_model, alphabet = torch.hub.load("facebookresearch/esm", "esm1b_t33_650M_UR50S")
batch_converter = alphabet.get_batch_converter()
## alternatively do
# import esm # after installing esm
# model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
batch_converter = alphabet.get_batch_converter()
# helpers
def cycle(loader, cond = lambda x: True):
while True:
for data in loader:
if not cond(data):
continue
yield data
# get data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = 1,
dynamic_batching = False
)
data = iter(data['train'])
data_cond = lambda t: t[1].shape[1] < THRESHOLD_LENGTH
dl = cycle(data, data_cond)
# model
model = Alphafold2(
dim = 256,
depth = 1,
heads = 8,
dim_head = 64,
predict_coords = True,
structure_module_dim = 8,
structure_module_depth = 2,
structure_module_heads = 4,
structure_module_dim_head = 16,
structure_module_refinement_iters = 2
).to(DEVICE)
# optimizer
dispersion_weight = 0.1
criterion = nn.MSELoss()
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training loop
for _ in range(NUM_BATCHES):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seq, coords, mask = batch.seqs, batch.crds, batch.msks
b, l, _ = seq.shape
# prepare data and mask labels
seq, coords, mask = seq.argmax(dim = -1).to(DEVICE), coords.to(DEVICE), mask.to(DEVICE)
# coords = rearrange(coords, 'b (l c) d -> b l c d', l = l) # no need to rearrange for now
# mask the atoms and backbone positions for each residue
# sequence embedding (msa / esm / attn / or nothing)
msa, embedds = None
# get embedds
if FEATURES == "esm":
embedds = get_esm_embedd(seq, embedd_model, batch_converter)
# get msa here
elif FEATURES == "msa":
pass
# no embeddings
else:
pass
# predict - out is (batch, L * 3, 3)
refined = model(
seq,
msa = msa,
embedds = embedds,
mask = mask
)
# build SC container. set SC points to CA and optionally place carbonyl O
proto_sidechain = sidechain_container(coords_3d, n_aa=batch,
cloud_mask=cloud_mask, place_oxygen=False)
# rotate / align
coords_aligned, labels_aligned = Kabsch(refined, coords[flat_cloud_mask])
# atom mask
cloud_mask = scn_cloud_mask(seq, boolean = False)
flat_cloud_mask = rearrange(cloud_mask, 'b l c -> b (l c)')
# chain_mask is all atoms that will be backpropped thru -> existing + trainable
chain_mask = (mask * cloud_mask)[cloud_mask]
flat_chain_mask = rearrange(chain_mask, 'b l c -> b (l c)')
# save pdb files for visualization
if TO_PDB:
# idx from batch to save prot and label
idx = 0
coords2pdb(seq[idx, :, 0], coords_aligned[idx], cloud_mask, prefix=SAVE_DIR, name="pred.pdb")
coords2pdb(seq[idx, :, 0], labels_aligned[idx], cloud_mask, prefix=SAVE_DIR, name="label.pdb")
# loss - RMSE + distogram_dispersion
loss = torch.sqrt(criterion(coords_aligned[flat_chain_mask], labels_aligned[flat_chain_mask])) + \
dispersion_weight * torch.norm( (1/weights)-1 )
loss.backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
|
from setuptools import setup, find_packages
setup(
name = 'alphafold2-pytorch',
packages = find_packages(),
version = '0.4.32',
license='MIT',
description = 'AlphaFold2 - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang, Eric Alcaide',
author_email = '[email protected], [email protected]',
url = 'https://github.com/lucidrains/alphafold2',
keywords = [
'artificial intelligence',
'attention mechanism',
'protein folding'
],
install_requires=[
'einops>=0.3',
'En-transformer>=0.2.3',
'invariant-point-attention',
'mdtraj>=1.8',
'numpy',
'proDy',
'pytorch3d',
'requests',
'sidechainnet',
'torch>=1.6',
'transformers',
'tqdm',
'biopython',
'mp-nerf>=0.1.5'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
)
|
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from contextlib import contextmanager
from einops import reduce
# helpers
def exists(val):
return val is not None
@contextmanager
def null_context():
yield
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
# function wrapper for determinism on backwards
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# reversible self attention block
class ReversibleSelfAttnBlock(nn.Module):
def __init__(self, f, g, j, k):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.j = Deterministic(j)
self.k = Deterministic(k)
def forward(self, x, m, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_pos_emb = None, msa_pos_emb = None, _reverse = True, **kwargs):
x1, x2 = torch.chunk(x, 2, dim = 2)
m1, m2 = torch.chunk(m, 2, dim = 2)
y1, y2, n1, n2 = None, None, None, None
context = torch.no_grad if _reverse else null_context
record_rng = self.training and _reverse
with context():
y1 = x1 + self.f(x2, shape = seq_shape, record_rng = record_rng, mask = mask, rotary_emb = seq_pos_emb)
y2 = x2 + self.g(y1, shape = seq_shape, record_rng = record_rng)
n1 = m1 + self.j(m2, shape = msa_shape, record_rng = record_rng, mask = msa_mask, rotary_emb = msa_pos_emb)
n2 = m2 + self.k(n1, record_rng = record_rng)
return torch.cat((y1, y2), dim = 2), torch.cat((n1, n2), dim = 2)
def backward_pass(self, y, n, dy, dn, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_pos_emb = None, msa_pos_emb = None, **kwargs):
y1, y2 = torch.chunk(y, 2, dim = 2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim = 2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, shape = seq_shape, set_rng = True)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, shape = seq_shape, set_rng = True, mask = mask, rotary_emb = seq_pos_emb)
torch.autograd.backward(fx2, dx1, retain_graph = True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim = 2)
dx = torch.cat([dx1, dx2], dim = 2)
n1, n2 = torch.chunk(n, 2, dim = 2)
del n
dn1, dn2 = torch.chunk(dn, 2, dim = 2)
del dn
with torch.enable_grad():
n1.requires_grad = True
gn1 = self.k(n1, set_rng = True)
torch.autograd.backward(gn1, dn2)
with torch.no_grad():
m2 = n2 - gn1
del n2, gn1
dm1 = dn1 + n1.grad
del dn1
n1.grad = None
with torch.enable_grad():
m2.requires_grad = True
fm2 = self.j(m2, shape = msa_shape, set_rng = True, mask = msa_mask, rotary_emb = msa_pos_emb)
torch.autograd.backward(fm2, dm1, retain_graph=True)
with torch.no_grad():
m1 = n1 - fm2
del n1, fm2
dm2 = dn2 + m2.grad
del dn2
m2.grad = None
m = torch.cat([m1, m2.detach()], dim = 2)
dm = torch.cat([dm1, dm2], dim = 2)
return x, m, dx, dm
# reversible cross attention block
class ReversibleCrossAttnBlock(nn.Module):
def __init__(self, f, g, j, k):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.j = Deterministic(j)
self.k = Deterministic(k)
def forward(self, x, m, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_to_msa_pos_emb = None, msa_to_seq_pos_emb = None, _reverse = True, **kwargs):
x1, x2 = torch.chunk(x, 2, dim = 2)
m1, m2 = torch.chunk(m, 2, dim = 2)
y1, y2, n1, n2 = None, None, None, None
context = torch.no_grad if _reverse else null_context
record_rng = self.training and _reverse
with context():
y1 = x1 + self.f(x2, m2, record_rng = record_rng, mask = mask, context_mask = msa_mask, shape = seq_shape, context_shape = msa_shape, rotary_emb = seq_to_msa_pos_emb)
y2 = x2 + self.k(y1, shape = seq_shape, record_rng = record_rng)
n1 = m1 + self.j(m2, y2, record_rng = record_rng, mask = msa_mask, context_mask = mask, shape = msa_shape, context_shape = seq_shape, rotary_emb = msa_to_seq_pos_emb)
n2 = m2 + self.g(n1, record_rng = record_rng)
return torch.cat((y1, y2), dim = 2), torch.cat((n1, n2), dim = 2)
def backward_pass(self, y, n, dy, dn, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_to_msa_pos_emb = None, msa_to_seq_pos_emb = None, **kwargs):
n1, n2 = torch.chunk(n, 2, dim = 2)
del n
dn1, dn2 = torch.chunk(dn, 2, dim = 2)
del dn
y1, y2 = torch.chunk(y, 2, dim = 2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim = 2)
del dy
with torch.enable_grad():
n1.requires_grad = True
gn1 = self.g(n1, set_rng = True)
torch.autograd.backward(gn1, dn2)
with torch.no_grad():
m2 = n2 - gn1
del n2, gn1
dm1 = dn1 + n1.grad
del dn1
n1.grad = None
with torch.enable_grad():
m2.requires_grad = True
y2.requires_grad = True
fm2 = self.j(m2, y2, set_rng=True, mask = msa_mask, context_mask = mask, shape = msa_shape, context_shape = seq_shape, rotary_emb = msa_to_seq_pos_emb)
torch.autograd.backward(fm2, dm1)
with torch.no_grad():
m1 = n1 - fm2
del n1, fm2
dm2 = dn2 + m2.grad
dx2 = dy2 + y2.grad
del dn2
del dy2
m2.grad = None
y2.grad = None
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.k(y1, shape = seq_shape, set_rng = True)
torch.autograd.backward(gy1, dx2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
m2.requires_grad = True
fx2 = self.f(x2, m2, set_rng = True, mask = mask, context_mask = msa_mask, shape = seq_shape, context_shape = msa_shape, rotary_emb = seq_to_msa_pos_emb)
torch.autograd.backward(fx2, dx1)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dx2 + x2.grad
dm2 = dm2 + m2.grad
x2.grad = None
m2.grad = None
with torch.no_grad():
m = torch.cat([m1, m2.detach()], dim = 2)
dm = torch.cat([dm1, dm2], dim = 2)
x = torch.cat([x1, x2.detach()], dim = 2)
dx = torch.cat([dx1, dx2], dim = 2)
return x, m, dx, dm
# reverse and non reverse functions
class ReversibleFunction(Function):
@staticmethod
def forward(ctx, inp, ind, blocks, kwargs):
x, m = split_at_index(1, ind, inp)
for block in blocks:
x, m = block(x, m, _reverse = True, **kwargs)
ctx.blocks = blocks
ctx.kwargs = kwargs
ctx.ind = ind
ctx.save_for_backward(x.detach(), m.detach())
return torch.cat((x, m), dim = 1)
@staticmethod
def backward(ctx, d):
ind = ctx.ind
blocks = ctx.blocks
kwargs = ctx.kwargs
dy, dn = split_at_index(1, ind, d)
y, n = ctx.saved_tensors
for block in blocks[::-1]:
y, n, dy, dn = block.backward_pass(y, n, dy, dn, **kwargs)
d = torch.cat((dy, dn), dim = 1)
return d, None, None, None
reversible_apply = ReversibleFunction.apply
def irreversible_apply(inputs, ind, blocks, kwargs):
x, m = split_at_index(1, ind, inputs)
for block in blocks:
x, m = block(x, m, _reverse = False, **kwargs)
return torch.cat((x, m), dim = 1)
# main reversible sequence class
class ReversibleSequence(nn.Module):
def __init__(self, input_blocks, block_types):
super().__init__()
self.block_types = block_types
blocks = nn.ModuleList([])
for block, block_type in zip(input_blocks, block_types):
if block_type == 'self':
reversible_klass = ReversibleSelfAttnBlock
elif block_type == 'cross':
reversible_klass = ReversibleCrossAttnBlock
elif block_type == 'conv':
reversible_klass = ReversibleSelfAttnBlock
blocks.append(reversible_klass(*block))
self.blocks = blocks
def forward(
self,
seq,
msa,
seq_shape = None,
msa_shape = None,
mask = None,
msa_mask = None,
seq_pos_emb = None,
msa_pos_emb = None,
seq_to_msa_pos_emb = None,
msa_to_seq_pos_emb = None,
reverse = True
):
assert exists(msa), 'reversibility does not work with no MSA sequences yet'
blocks = self.blocks
seq, msa = list(map(lambda t: torch.cat((t, t), dim = -1), (seq, msa)))
kwargs = {'mask': mask, 'msa_mask': msa_mask, 'seq_shape': seq_shape, 'msa_shape': msa_shape, 'seq_pos_emb': seq_pos_emb, 'msa_pos_emb': msa_pos_emb, 'seq_to_msa_pos_emb': seq_to_msa_pos_emb, 'msa_to_seq_pos_emb': msa_to_seq_pos_emb}
fn = reversible_apply if reverse else irreversible_apply
ind = seq.shape[1]
inp = torch.cat((seq, msa), dim = 1)
out = fn(inp, ind, blocks, kwargs)
seq, msa = split_at_index(1, ind, out)
return list(map(lambda t: reduce(t, 'b n (c d) -> b n d', 'mean', c = 2), (seq, msa)))
|
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from alphafold2_pytorch import constants
from einops import rearrange
# MSA MLM
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
class MLM(nn.Module):
def __init__(
self,
dim,
num_tokens,
mask_id,
mask_prob = 0.15,
random_replace_token_prob = 0.1,
keep_token_same_prob = 0.1,
exclude_token_ids = (0,)
):
super().__init__()
self.to_logits = nn.Linear(dim, num_tokens)
self.mask_id = mask_id
self.mask_prob = mask_prob
self.exclude_token_ids = exclude_token_ids
self.keep_token_same_prob = keep_token_same_prob
self.random_replace_token_prob = random_replace_token_prob
def noise(self, seq, mask):
num_msa = seq.shape[1]
seq = rearrange(seq, 'b n ... -> (b n) ...')
mask = rearrange(mask, 'b n ... -> (b n) ...')
# prepare masks for noising sequence
excluded_tokens_mask = mask
for token_id in self.exclude_token_ids:
excluded_tokens_mask = excluded_tokens_mask & (seq != token_id)
mlm_mask = get_mask_subset_with_prob(excluded_tokens_mask, self.mask_prob)
# keep some tokens the same
replace_token_with_mask = get_mask_subset_with_prob(mlm_mask, 1. - self.keep_token_same_prob)
# replace with mask
seq = seq.masked_fill(mlm_mask, self.mask_id)
# generate random tokens
random_replace_token_prob_mask = get_mask_subset_with_prob(mlm_mask, (1 - self.keep_token_same_prob) * self.random_replace_token_prob)
random_tokens = torch.randint(1, constants.NUM_AMINO_ACIDS, seq.shape).to(seq.device)
for token_id in self.exclude_token_ids:
random_replace_token_prob_mask = random_replace_token_prob_mask & (random_tokens != token_id) # make sure you never substitute a token with an excluded token type (pad, start, end)
# noise sequence
noised_seq = torch.where(random_replace_token_prob_mask, random_tokens, seq)
noised_seq = rearrange(noised_seq, '(b n) ... -> b n ...', n = num_msa)
mlm_mask = rearrange(mlm_mask, '(b n) ... -> b n ...', n = num_msa)
return noised_seq, mlm_mask
def forward(self, seq_embed, original_seq, mask):
logits = self.to_logits(seq_embed)
seq_logits = logits[mask]
seq_labels = original_seq[mask]
loss = F.cross_entropy(seq_logits, seq_labels, reduction = 'mean')
return loss
|
import torch
# constants
MAX_NUM_MSA = 20
MAX_NUM_TEMPLATES = 10
NUM_AMINO_ACIDS = 21
NUM_EMBEDDS_TR = 1280 # best esm model
NUM_EMBEDDS_T5 = 1024 # best t5 model
NUM_COORDS_PER_RES = 14
DISTOGRAM_BUCKETS = 37
THETA_BUCKETS = 25
PHI_BUCKETS = 13
OMEGA_BUCKETS = 25
# embedding related constants
MSA_EMBED_DIM = 768
MSA_MODEL_PATH = ["facebookresearch/esm", "esm_msa1_t12_100M_UR50S"]
ESM_EMBED_DIM = 1280
ESM_MODEL_PATH = ["facebookresearch/esm", "esm1b_t33_650M_UR50S"]
PROTTRAN_EMBED_DIM = 1024
# default device
DEVICE_NAME = 'cuda' if torch.cuda.is_available() else 'cpu'
DEVICE = torch.device(DEVICE_NAME)
# aminoacid data
AA_DATA = {
'A': {
'bonds': [[0,1], [1,2], [2,3], [1,4]]
},
'R': {
'bonds': [[0,1], [1,2], [2,3], [2,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [8,10]]
},
'N': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'D': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'C': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5]]
},
'Q': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [6,8]]
},
'E': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8]]
},
'G': {
'bonds': [[0,1], [1,2], [2,3]]
},
'H': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [5,9]]
},
'I': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[4,7]]
},
'L': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'K': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8]]
},
'M': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7]]
},
'F': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [9,10], [5,10]]
},
'P': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[0,6]]
},
'S': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5]]
},
'T': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [4,6]]
},
'W': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [9,10], [10,11], [11,12],
[12, 13], [5,13], [8,13]]
},
'Y': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [8,10], [10,11], [5,11]]
},
'V': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [4,6]]
},
'_': {
'bonds': []
}
}
|
from alphafold2_pytorch.alphafold2 import Alphafold2, Evoformer
|
# utils for working with 3d-protein structures
import os
import re
import numpy as np
import torch
import contextlib
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# custom
import mp_nerf
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
def exists(val):
return val is not None
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
def get_bucketed_distance_matrix(coords, mask, num_buckets = constants.DISTOGRAM_BUCKETS, ignore_index = -100):
distances = torch.cdist(coords, coords, p=2)
boundaries = torch.linspace(2, 20, steps = num_buckets, device = coords.device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
discretized_distances.masked_fill_(~(mask[..., None] & mask[..., None, :]), ignore_index)
return discretized_distances
# decorators
def set_backend_kwarg(fn):
@wraps(fn)
def inner(*args, backend = 'auto', **kwargs):
if backend == 'auto':
backend = 'torch' if isinstance(args[0], torch.Tensor) else 'numpy'
kwargs.update(backend = backend)
return fn(*args, **kwargs)
return inner
def expand_dims_to(t, length = 3):
if length == 0:
return t
return t.reshape(*((1,) * length), *t.shape) # will work with both torch and numpy
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
def outer(fn):
@wraps(fn)
def inner(x, y, **kwargs):
assert len(x.shape) == len(y.shape), "Shapes of A and B must match."
remaining_len = dim_len - len(x.shape)
x = expand_dims_to(x, length = remaining_len)
y = expand_dims_to(y, length = remaining_len)
return fn(x, y, **kwargs)
return inner
return outer
def invoke_torch_or_numpy(torch_fn, numpy_fn):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
backend = kwargs.pop('backend')
passed_args = fn(*args, **kwargs)
passed_args = list(passed_args)
if isinstance(passed_args[-1], dict):
passed_kwargs = passed_args.pop()
else:
passed_kwargs = {}
backend_fn = torch_fn if backend == 'torch' else numpy_fn
return backend_fn(*passed_args, **passed_kwargs)
return inner
return outer
@contextlib.contextmanager
def torch_default_dtype(dtype):
prev_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(prev_dtype)
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(constants.NUM_COORDS_PER_RES)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(constants.NUM_COORDS_PER_RES)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
# common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
# adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def ids_to_prottran_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for ids in x:
chars = ' '.join([id2aa[i] for i in ids])
chars = re.sub(r"[UZOB]", "X", chars)
out.append(chars)
return out
def get_prottran_embedd(seq, model, tokenizer, device = None):
from transformers import pipeline
fe = pipeline('feature-extraction', model = model, tokenizer = tokenizer, device = (-1 if not exists(device) else device.index))
max_seq_len = seq.shape[1]
embedd_inputs = ids_to_prottran_input(seq.cpu().tolist())
embedding = fe(embedd_inputs)
embedding = torch.tensor(embedding, device = device)
return embedding[:, 1:(max_seq_len + 1)]
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
# use MSA transformer
REPR_LAYER_NUM = 12
device = seq.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:max_seq_len+1, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = seq.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:max_seq_len+1, :].unsqueeze(dim=1)
return token_reps
def get_t5_embedd(seq, tokenizer, encoder, msa_data=None, device=None):
""" Returns the ProtT5-XL-U50 embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* tokenizer: tokenizer model: T5Tokenizer
* encoder: encoder model: T5EncoderModel
ex: from transformers import T5EncoderModel, T5Tokenizer
model_name = "Rostlab/prot_t5_xl_uniref50"
tokenizer = T5Tokenizer.from_pretrained(model_name, do_lower_case=False )
model = T5EncoderModel.from_pretrained(model_name)
# prepare model
model = model.to(device)
model = model.eval()
if torch.cuda.is_available():
model = model.half()
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for T5 models
* embedd_dim: number of embedding dimensions. 1024 for T5 models
"""
# get params and prepare
device = seq.device if device is None else device
embedd_inputs = ids_to_prottran_input(seq.cpu().tolist())
# embedd - https://huggingface.co/Rostlab/prot_t5_xl_uniref50
inputs_embedding = []
shift_left, shift_right = 0, -1
ids = tokenizer.batch_encode_plus(embedd_inputs, add_special_tokens=True,
padding=True,
return_tensors="pt")
with torch.no_grad():
embedding = encoder(input_ids=torch.tensor(ids['input_ids']).to(device),
attention_mask=torch.tensor(ids["attention_mask"]).to(device))
# return (batch, seq_len, embedd_dim)
token_reps = embedding.last_hidden_state[:, shift_left:shift_right].to(device)
token_reps = expand_dims_to(token_reps, 4-len(token_reps.shape))
return token_reps.float()
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the ids for all protein entries.
"""
# store ids here
ids = set([])
# iterate for all batches
for i,batch in tqdm(enumerate(dataloaders['train'])):
# for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# check if all fragments are : 4_LETTER_PDB + NUM + CHAIN
max_len_10 = len(batch.pids[i]) < 10
fragments = [len(x) <= 4 for x in batch.pids[i].split("_")]
fragments_under_4 = sum(fragments) == len(fragments) # AND CONDITION
# record id
if max_len_10 and fragments_under_4:
ids.add(batch.pids[i])
else:
if verbose:
print("skip:", batch.pids[i], "under 4", fragments)
except StopIteration:
break
# returns set of ids
return ids
def scn_cloud_mask(scn_seq, boolean=True, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* scn_seq: (batch, length) sequence as provided by Sidechainnet package
* boolean: whether to return as array of idxs or boolean values
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (batch, length, NUM_COORDS_PER_RES) boolean mask
"""
scn_seq = expand_dims_to(scn_seq, 2 - len(scn_seq.shape))
# early check for coords mask
if coords is not None:
batch_mask = ( rearrange(coords, '... (l c) d -> ... l c d', c=constants.NUM_COORDS_PER_RES) == 0 ).sum(dim=-1) < coords.shape[-1]
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
# do loop in cpu
device = scn_seq.device
batch_mask = []
scn_seq = scn_seq.cpu().tolist()
for i, seq in enumerate(scn_seq):
# get masks for each prot (points for each aa)
batch_mask.append( torch.tensor([CUSTOM_INFO[VOCAB._int2char[aa]]['cloud_mask'] \
for aa in seq]).bool().to(device) )
# concat in last dim
batch_mask = torch.stack(batch_mask, dim=0)
# return mask (boolean or indexes)
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
def scn_backbone_mask(scn_seq, boolean=True, n_aa=3):
""" Gets the boolean mask for N and CA positions.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
* n_aa: number of atoms in a backbone. (may include cbeta as 4th pos)
* bool: whether to return as array of idxs or boolean values
Outputs: (N_mask, CA_mask, C_mask)
"""
wrapper = torch.zeros(*scn_seq.shape, n_aa).to(scn_seq.device)
# N is the first atom in every AA. CA is the 2nd.
wrapper[..., 0] = 1
wrapper[..., 1] = 2
wrapper[..., 2] = 3
wrapper = rearrange(wrapper, '... l c -> ... (l c)')
# find idxs
N_mask = wrapper == 1
CA_mask = wrapper == 2
C_mask = wrapper == 3
if boolean:
return N_mask, CA_mask, C_mask
return torch.nonzero(N_mask), torch.nonzero(CA_mask), torch.nonzero(C_mask)
def scn_atom_embedd(scn_seq):
""" Returns the token for each atom in the aa.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
"""
device = scn_seq.device
batch_tokens = []
# do loop in cpu
scn_seq = scn_seq.cpu().tolist()
for i,seq in enumerate(scn_seq):
batch_tokens.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa)]["atom_id_embedd"] \
for aa in seq]) )
batch_tokens = torch.stack(batch_tokens, dim=0).long().to(device)
return batch_tokens
def mat_input_to_masked(x, x_mask=None, edges_mat=None, edges=None,
edge_mask=None, edge_attr_mat=None,
edge_attr=None):
""" Turns the padded input and edges + mask into the
non-padded inputs and edges.
At least one of (edges_mat, edges) must be provided.
The same format for edges and edge_attr must be provided
(either adj matrix form or flattened form).
Inputs:
* x: ((batch), N, D) a tensor of N nodes and D dims for each one
* x_mask: ((batch), N,) boolean mask for x
* edges: (2, E) optional. indices of the corresponding adjancecy matrix.
* edges_mat: ((batch), N, N) optional. adjacency matrix for x
* edge_mask: optional. boolean mask of the same shape of either "edge_mat" or "edges".
* edge_attr: (E, D_edge) optional. edge attributes of D_edge dims.
* edge_attr_mat: ((batch), N, N) optional. adjacency matrix with features
Outputs:
* x: (N_, D) the masked node features
* edge_index: (2, E_) the masked x-indices for the edges
* edge_attr: (E_, D_edge) the masked edge attributes
* batch: (N_,) the corresponding index in the batch for each node
"""
# collapse batch dimension
if len(x.shape) == 3:
batch_dim = x.shape[1]
# collapse for x and its mask
x = rearrange(x, 'b n d ... -> (b n) d ...')
if x_mask is not None:
x_mask = rearrange(x_mask, 'b n ... -> (b n) ...')
else:
x_mask = torch.ones_like(x[..., 0]).bool()
# collapse for edge indexes and attributes if needed
if edges_mat is not None and edges is None:
edges = torch.nonzero(edges_mat, as_tuple=False).t()
edges = edges[1:] + edges[:1]*batch_dim
# get the batch identifier for each node
batch = (torch.arange(x.shape[0], device=x.device) // batch_dim)[x_mask]
else:
# edges to indices format
if edges_mat is not None and edges is None:
edges = torch.nonzero(edges_mat, as_tuple=False).t()
# get the batch identifier for each node
batch = torch.zeros(x.shape[0], device=x.device).to(x.device)
# adapt edge attrs if provided
if edge_attr_mat is not None and edge_attr is None:
edge_attr = edge_attr[edges_mat.bool()]
# gen edge_mask if not provided
if edge_mask is None:
edge_mask = torch.ones_like(edges[-1]).bool()
# begin applying masks
x = x[x_mask]
# process edge indexes: get square mat and remove all non-coding atoms
max_num = edges.max().item()+1
wrapper = torch.zeros(max_num, max_num).to(x.device)
wrapper[edges[0][edge_mask], edges[1][edge_mask]] = 1
wrapper = wrapper[x_mask, :][:, x_mask]
edge_index = torch.nonzero(wrapper, as_tuple=False).t()
# process edge attr
edge_attr = edge_attr[edge_mask] if edge_attr is not None else None
return x, edge_index, edge_attr, batch
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: ij positions of the adjacency matrix
* edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
new_adj_mat = adj_mat.clone()
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
idxs = adj_mat.nonzero().t()
vals = adj_mat[idxs[0], idxs[1]]
new_idxs = idxs.clone()
new_vals = vals.clone()
m, k, n = 3 * [adj_mat.shape[0]] # (m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
new_idxs, new_vals = torch_sparse.spspmm(new_idxs, new_vals, idxs, vals, m=m, k=k, n=n)
new_vals = new_vals.bool().float()
# fill by indexes bc it's faster in sparse mode - will need an intersection function
previous = attr_mat[new_idxs[0], new_idxs[1]].bool().float()
attr_mat[new_idxs[0], new_idxs[1]] = (1 - previous)*(i+1)
else:
new_adj_mat = (new_adj_mat @ adj_mat).bool().float()
attr_mat.masked_fill( (new_adj_mat - attr_mat.bool().float()).bool(), i+1 )
return new_adj_mat, attr_mat
def prot_covalent_bond(seqs, adj_degree=1, cloud_mask=None, mat=True, sparse=False):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: (b, n) torch long.
* adj_degree: int. adjacency degree
* cloud_mask: mask selecting the present atoms.
* mat: whether to return as indexes of only atoms (PyG version)
or matrices of masked atoms (for batched training).
for indexes, only 1 seq is supported.
* sparse: bool. whether to use torch_sparse for adj_mat calc
Outputs: edge_idxs, edge_types (degree of adjacency).
"""
device = seqs.device
# set up container adj_mat (will get trimmed - less than 14)
next_aa = NUM_COORDS_PER_RES
adj_mat = torch.zeros(seqs.shape[0], *[seqs.shape[1]*NUM_COORDS_PER_RES]*2)
# not needed to device since it's only for indices
seq_list = seqs.cpu().tolist()
for s,seq in enumerate(seq_list):
next_idx = 0
for i,idx in enumerate(seq):
aa_bonds = constants.AA_DATA[VOCAB._int2char[idx]]['bonds']
# if no edges -> padding token -> finish bond creation for this seq
if len(aa_bonds) == 0:
break
# correct next position. for indexes functionality
next_aa = max(aa_bonds, key=lambda x: max(x))[-1]
# offset by pos in chain ( intra-aa bonds + with next aa )
bonds = next_idx + torch.tensor( aa_bonds + [[2, next_aa]] ).t()
next_idx += next_aa
# delete link with next if final AA in seq
if i == seqs.shape[1] - 1:
bonds = bonds[:, :-1]
# modify adj mat
adj_mat[s, bonds[0], bonds[1]] = 1
# convert to undirected
adj_mat[s] = adj_mat[s] + adj_mat[s].t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=sparse)
if mat:
# return the full matrix/tensor
return attr_mat.bool().to(seqs.device), attr_mat.to(device)
else:
edge_idxs = attr_mat[0].nonzero().t().long()
edge_types = attr_mat[0, edge_idxs[0], edge_idxs[1]]
return edge_idxs.to(seqs.device), edge_types.to(seqs.device)
def sidechain_container(seqs, backbones, atom_mask, cloud_mask=None, padding_tok=20):
""" Gets a backbone of the protein, returns the whole coordinates
with sidechains (same format as sidechainnet). Keeps differentiability.
Inputs:
* seqs: (batch, L) either tensor or list
* backbones: (batch, L*n_aa, 3): assume batch=1 (could be extended (?not tested)).
Coords for (N-term, C-alpha, C-term, (c_beta)) of every aa.
* atom_mask: (14,). int or bool tensor specifying which atoms are passed.
* cloud_mask: (batch, l, c). optional. cloud mask from scn_cloud_mask`.
sets point outside of mask to 0. if passed, else c_alpha
* padding: int. padding token. same as in sidechainnet: 20
Outputs: whole coordinates of shape (batch, L, 14, 3)
"""
atom_mask = atom_mask.bool().cpu().detach()
cum_atom_mask = atom_mask.cumsum(dim=-1).tolist()
device = backbones.device
batch, length = backbones.shape[0], backbones.shape[1] // cum_atom_mask[-1]
predicted = rearrange(backbones, 'b (l back) d -> b l back d', l=length)
# early check if whole chain is already pred
if cum_atom_mask[-1] == 14:
return predicted
# build scaffold from (N, CA, C, CB) - do in cpu
new_coords = torch.zeros(batch, length, constants.NUM_COORDS_PER_RES, 3)
predicted = predicted.cpu() if predicted.is_cuda else predicted
# fill atoms if they have been passed
for i,atom in enumerate(atom_mask.tolist()):
if atom:
new_coords[:, :, i] = predicted[:, :, cum_atom_mask[i]-1]
# generate sidechain if not passed
for s,seq in enumerate(seqs):
# format seq accordingly
if isinstance(seq, torch.Tensor):
padding = (seq == padding_tok).sum().item()
seq_str = ''.join([VOCAB._int2char[aa] for aa in seq.cpu().numpy()[:-padding or None]])
elif isinstance(seq, str):
padding = 0
seq_str = seq
# get scaffolds - will overwrite oxygen since its position is fully determined by N-C-CA
scaffolds = mp_nerf.proteins.build_scaffolds_from_scn_angles(seq_str, angles=None, device="cpu")
coords, _ = mp_nerf.proteins.sidechain_fold(wrapper = new_coords[s, :-padding or None].detach(),
**scaffolds, c_beta = cum_atom_mask[4]==5)
# add detached scn
for i,atom in enumerate(atom_mask.tolist()):
if not atom:
new_coords[:, :-padding or None, i] = coords[:, i]
new_coords = new_coords.to(device)
if cloud_mask is not None:
new_coords[torch.logical_not(cloud_mask)] = 0.
# replace any nan-s with previous point location (or N if pos is 13th of AA)
nan_mask = list(torch.nonzero(new_coords!=new_coords, as_tuple=True))
new_coords[nan_mask[0], nan_mask[1], nan_mask[2]] = new_coords[nan_mask[0],
nan_mask[1],
(nan_mask[-2]+1) % new_coords.shape[-1]]
return new_coords.to(device)
# distance utils (distogram to dist mat + masking)
def center_distogram_torch(distogram, bins=DISTANCE_THRESHOLDS, min_t=1., center="mean", wide="std"):
""" Returns the central estimate of a distogram. Median for now.
Inputs:
* distogram: (batch, N, N, B) where B is the number of buckets.
* bins: (B,) containing the cutoffs for the different buckets
* min_t: float. lower bound for distances.
Outputs:
* central: (batch, N, N)
* dispersion: (batch, N, N)
* weights: (batch, N, N)
"""
shape, device = distogram.shape, distogram.device
# threshold to weights and find mean value of each bin
n_bins = ( bins - 0.5 * (bins[2] - bins[1]) ).to(device)
n_bins[0] = 1.5
n_bins[-1] = 1.33*bins[-1] # above last threshold is ignored
max_bin_allowed = torch.tensor(n_bins.shape[0]-1).to(device).long()
# calculate measures of centrality and dispersion -
magnitudes = distogram.sum(dim=-1)
if center == "median":
cum_dist = torch.cumsum(distogram, dim=-1)
medium = 0.5 * cum_dist[..., -1:]
central = torch.searchsorted(cum_dist, medium).squeeze()
central = n_bins[ torch.min(central, max_bin_allowed) ]
elif center == "mean":
central = (distogram * n_bins).sum(dim=-1) / magnitudes
# create mask for last class - (IGNORE_INDEX)
mask = (central <= bins[-2].item()).float()
# mask diagonal to 0 dist - don't do masked filling to avoid inplace errors
diag_idxs = np.arange(shape[-2])
central = expand_dims_to(central, 3 - len(central.shape))
central[:, diag_idxs, diag_idxs] *= 0.
# provide weights
if wide == "var":
dispersion = (distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes
elif wide == "std":
dispersion = ((distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes).sqrt()
else:
dispersion = torch.zeros_like(central, device=device)
# rescale to 0-1. lower std / var --> weight=1. set potential nan's to 0
weights = mask / (1+dispersion)
weights[weights != weights] *= 0.
weights[:, diag_idxs, diag_idxs] *= 0.
return central, weights
# distance matrix to 3d coords: https://github.com/scikit-learn/scikit-learn/blob/42aff4e2e/sklearn/manifold/_mds.py#L279
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distogram is (N x N) and symmetric
Outs:
* best_3d_coords: (batch x 3 x N)
* historic_stresses: (batch x steps)
"""
device, dtype = pre_dist_mat.device, pre_dist_mat.type()
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
diag_idxs = np.arange(N)
his = [torch.tensor([np.inf]*batch, device=device)]
# initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf
# follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf
D = pre_dist_mat**2
M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D)
# do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)
# https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336
svds = [torch.svd_lowrank(mi) for mi in M]
u = torch.stack([svd[0] for svd in svds], dim=0)
s = torch.stack([svd[1] for svd in svds], dim=0)
v = torch.stack([svd[2] for svd in svds], dim=0)
best_3d_coords = torch.bmm(u, torch.diag_embed(s).abs().sqrt())[..., :3]
# only eigen - way faster but not weights
if weights is None and eigen==True:
return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))
elif eigen==True:
if verbose:
print("Can't use eigen flag if weights are active. Fallback to iterative")
# continue the iterative way
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
best_3d_coords = best_3d_coords.contiguous()
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[ dist_mat <= 0 ] += 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)
# update
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (his[-1] - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
his.append( stress / dis )
return torch.transpose(best_3d_coords, -1,-2), torch.stack(his, dim=0)
def mds_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Out:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = np.ones_like(pre_dist_mat)
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
his = [np.inf]
# init random coords
best_stress = np.inf * np.ones(batch)
best_3d_coords = 2*np.random.rand(batch, 3, N) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = np.linalg.norm(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :], axis=-3)
stress = (( weights * (dist_mat - pre_dist_mat) )**2).sum(axis=(-1, -2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, np.arange(N), np.arange(N)] += ratio.sum(axis=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * np.matmul(best_3d_coords, B))
dis = np.linalg.norm(coords, axis=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
best_stress = stress / dis
his.append(best_stress)
return best_3d_coords, np.array(his)
def get_dihedral_torch(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Can't use torch.dot bc it does not broadcast
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_dihedral_numpy(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return np.arctan2( ( (np.linalg.norm(u2, axis=-1, keepdims=True) * u1) * np.cross(u2,u3, axis=-1)).sum(axis=-1),
( np.cross(u1,u2, axis=-1) * np.cross(u2, u3, axis=-1) ).sum(axis=-1) )
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (batch, N) boolean mask for N-term positions
* CA_mask: (batch, N) boolean mask for C-alpha positions
* C_mask: (batch, N) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
Note: use [0] since all prots in batch have same backbone
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
# ensure dims
N_mask = expand_dims_to( N_mask, 2-len(N_mask.shape) )
CA_mask = expand_dims_to( CA_mask, 2-len(CA_mask.shape) )
if C_mask is not None:
C_mask = expand_dims_to( C_mask, 2-len(C_mask.shape) )
else:
C_mask = torch.logical_not(torch.logical_or(N_mask,CA_mask))
# select points
n_terms = pred_coords_[:, N_mask[0].squeeze()]
c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
c_terms = pred_coords_[:, C_mask[0].squeeze()]
# compute phis for every pritein in the batch
phis = [get_dihedral_torch(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return torch.stack([(x<0).float().mean() for x in phis], dim=0 )
return phis
def calc_phis_numpy(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (N, ) boolean mask for N-term positions
* CA_mask: (N, ) boolean mask for C-alpha positions
* C_mask: (N, ) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = np.transpose(pred_coords, (0, 2, 1))
n_terms = pred_coords_[:, N_mask.squeeze()]
c_alphas = pred_coords_[:, CA_mask.squeeze()]
# select c_term auto if not passed
if C_mask is not None:
c_terms = pred_coords_[:, C_mask]
else:
c_terms = pred_coords_[:, (np.ones_like(N_mask)-N_mask-CA_mask).squeeze().astype(bool) ]
# compute phis for every pritein in the batch
phis = [get_dihedral_numpy(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return np.array( [(x<0).mean() for x in phis] )
return phis
# alignment by centering + rotation to compute optimal RMSD
# adapted from : https://github.com/charnley/rmsd/
def kabsch_torch(X, Y, cpu=True):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
device = X.device
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t()).detach()
if cpu:
C = C.cpu()
# Optimal rotation matrix via SVD
if int(torch.__version__.split(".")[1]) < 8:
# warning! int torch 1.<8 : W must be transposed
V, S, W = torch.svd(C)
W = W.t()
else:
V, S, W = torch.linalg.svd(C)
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W).to(device)
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
# metrics - more formulas here: http://predictioncenter.org/casp12/doc/help.html
def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2,
custom=None, distmat_mask=None, clamp=None):
""" Calculates a loss on the distance matrix - no need to align structs.
Inputs:
* X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed.
* X_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed.
* Y_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* p: int. power for the distance calculation (2 for euclidean)
* q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc)
* custom: func or None. custom loss over distance matrices.
ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good)
* distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional.
* clamp: tuple of (min,max) values for clipping distance matrices. ex: (0,150)
"""
assert (X is not None or X_mat is not None) and \
(Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided"
# calculate distance matrices
if X_mat is None:
X = X.squeeze()
if clamp is not None:
X = torch.clamp(X, *clamp)
X_mat = torch.cdist(X, X, p=p)
if Y_mat is None:
Y = Y.squeeze()
if clamp is not None:
Y = torch.clamp(Y, *clamp)
Y_mat = torch.cdist(Y, Y, p=p)
if distmat_mask is None:
distmat_mask = torch.ones_like(Y_mat).bool()
# do custom expression if passed
if custom is not None:
return custom(X_mat.squeeze(), Y_mat.squeeze()).mean()
# **2 ensures always positive. Later scale back to desired power
else:
loss = ( X_mat - Y_mat )**2
if q != 2:
loss = loss**(q/2)
return loss[distmat_mask].mean()
def rmsd_torch(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
def rmsd_numpy(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return np.sqrt( np.mean((X - Y)**2, axis=(-1, -2)) )
def gdt_torch(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
device = X.device
if weights is None:
weights = torch.ones(1,len(cutoffs))
else:
weights = torch.tensor([weights]).to(device)
# set zeros and fill with values
GDT = torch.zeros(X.shape[0], len(cutoffs), device=device)
dist = ((X - Y)**2).sum(dim=1).sqrt()
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).float().mean(dim=-1)
# weighted mean
return (GDT*weights).mean(-1)
def gdt_numpy(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
if weights is None:
weights = np.ones( (1,len(cutoffs)) )
else:
weights = np.array([weights])
# set zeros and fill with values
GDT = np.zeros( (X.shape[0], len(cutoffs)) )
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).mean(axis=-1)
# weighted mean
return (GDT*weights).mean(-1)
def tmscore_torch(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = max(15, X.shape[-1])
d0 = 1.24 * (L - 15)**(1/3) - 1.8
# get distance
dist = ((X - Y)**2).sum(dim=1).sqrt()
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(dim=-1)
def tmscore_numpy(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = max(15, X.shape[-1])
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(axis=-1)
def mdscaling_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None,
eigen=False, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_torch(pre_dist_mat, weights=weights,iters=iters,
tol=tol, eigen=eigen, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_torch(preds, N_mask, CA_mask, C_mask, prop=True)
to_correct = torch.nonzero( (phi_ratios < 0.5)).view(-1)
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
preds[to_correct, -1] = (-1)*preds[to_correct, -1]
if verbose == 2:
print("Corrected mirror idxs:", to_correct)
return preds, stresses
def mdscaling_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_numpy(pre_dist_mat, weights=weights,iters=iters,
tol=tol, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_numpy(preds, N_mask, CA_mask, C_mask, prop=True)
for i,pred in enumerate(preds):
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
if phi_ratios < 0.5:
preds[i, -1] = (-1)*preds[i, -1]
if verbose == 2:
print("Corrected mirror in struct no.", i)
return preds, stresses
def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):
""" Computes the lddt score for each C_alpha.
https://academic.oup.com/bioinformatics/article/29/21/2722/195896
Inputs:
* true_coords: (b, l, c, d) in sidechainnet format.
* pred_coords: (b, l, c, d) in sidechainnet format.
* cloud_mask : (b, l, c) adapted for scn format.
* r_0: float. maximum inclusion radius in reference struct.
Outputs:
* (b, l) lddt for c_alpha scores (ranging between 0 and 1)
See wrapper below.
"""
device, dtype = true_coords.device, true_coords.type()
thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)
# adapt masks
cloud_mask = cloud_mask.bool().cpu()
c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim
c_alpha_mask[..., 1] = True
# container for c_alpha scores (between 0,1)
wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)
for bi, seq in enumerate(true_coords):
# select atoms for study
c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions
selected_pred = pred_coords[bi, c_alphas, :]
selected_target = true_coords[bi, c_alphas, :]
# get number under distance
dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)
dist_mat_target = torch.cdist(selected_target, selected_target, p=2)
under_r0_target = dist_mat_target < r_0
compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]
# measure diff below threshold
score = torch.zeros_like(under_r0_target).float()
max_score = torch.zeros_like(under_r0_target).float()
max_score[under_r0_target] = 4.
# measure under how many thresholds
score[under_r0_target] = thresholds.shape[0] - \
torch.bucketize( compare_dists, boundaries=thresholds ).float()
# dont include diagonal
l_mask = c_alphas.float().sum(dim=-1).bool()
wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \
( max_score.sum(dim=-1) - thresholds.shape[0] )
return wrapper
################
### WRAPPERS ###
################
@set_backend_kwarg
@invoke_torch_or_numpy(mdscaling_torch, mdscaling_numpy)
def MDScaling(pre_dist_mat, **kwargs):
""" Gets distance matrix (-ces). Outputs 3d.
Assumes (for now) distrogram is (N x N) and symmetric.
For support of ditograms: see `center_distogram_torch()`
Inputs:
* pre_dist_mat: (1, N, N) distance matrix.
* weights: optional. (N x N) pairwise relative weights .
* iters: number of iterations to run the algorithm on
* tol: relative tolerance at which to stop the algorithm if no better
improvement is achieved
* backend: one of ["numpy", "torch", "auto"] for backend choice
* fix_mirror: int. number of iterations to run the 3d generation and
pick the best mirror (highest number of negative phis)
* N_mask: indexing array/tensor for indices of backbone N.
Only used if fix_mirror > 0.
* CA_mask: indexing array/tensor for indices of backbone C_alpha.
Only used if fix_mirror > 0.
* verbose: whether to print logs
Outputs:
* best_3d_coords: (3 x N)
* historic_stress: (timesteps, )
"""
pre_dist_mat = expand_dims_to(pre_dist_mat, 3 - len(pre_dist_mat.shape))
return pre_dist_mat, kwargs
@expand_arg_dims(dim_len = 2)
@set_backend_kwarg
@invoke_torch_or_numpy(kabsch_torch, kabsch_numpy)
def Kabsch(A, B):
""" Returns Kabsch-rotated matrices resulting
from aligning A into B.
Adapted from: https://github.com/charnley/rmsd/
* Inputs:
* A,B are (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of shape (3 x N)
"""
# run calcs - pick the 0th bc an additional dim was created
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(rmsd_torch, rmsd_numpy)
def RMSD(A, B):
""" Returns RMSD score as defined here (lower is better):
https://en.wikipedia.org/wiki/
Root-mean-square_deviation_of_atomic_positions
* Inputs:
* A,B are (B x 3 x N) or (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of size (B,)
"""
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(gdt_torch, gdt_numpy)
def GDT(A, B, *, mode="TS", cutoffs=[1,2,4,8], weights=None):
""" Returns GDT score as defined here (highre is better):
Supports both TS and HA
http://predictioncenter.org/casp12/doc/help.html
* Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* cutoffs: defines thresholds for gdt
* weights: list containing the weights
* mode: one of ["numpy", "torch", "auto"] for backend
* Outputs: tensor/array of size (B,)
"""
# define cutoffs for each type of gdt and weights
cutoffs = [0.5,1,2,4] if mode in ["HA", "ha"] else [1,2,4,8]
# calculate GDT
return A, B, cutoffs, {'weights': weights}
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(tmscore_torch, tmscore_numpy)
def TMscore(A, B):
""" Returns TMscore as defined here (higher is better):
>0.5 (likely) >0.6 (highly likely) same folding.
= 0.2. https://en.wikipedia.org/wiki/Template_modeling_score
Warning! It's not exactly the code in:
https://zhanglab.ccmb.med.umich.edu/TM-score/TMscore.cpp
but will suffice for now.
Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* mode: one of ["numpy", "torch", "auto"] for backend
Outputs: tensor/array of size (B,)
"""
return A, B
|
import torch
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from inspect import isfunction
from functools import partial
from dataclasses import dataclass
import torch.nn.functional as F
from math import sqrt
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from alphafold2_pytorch.utils import *
import alphafold2_pytorch.constants as constants
from alphafold2_pytorch.mlm import MLM
# structure module
from invariant_point_attention import IPABlock
from pytorch3d.transforms import quaternion_multiply, quaternion_to_matrix
# constants
@dataclass
class Recyclables:
coords: torch.Tensor
single_msa_repr_row: torch.Tensor
pairwise_repr: torch.Tensor
@dataclass
class ReturnValues:
distance: torch.Tensor = None
theta: torch.Tensor = None
phi: torch.Tensor = None
omega: torch.Tensor = None
msa_mlm_loss: torch.Tensor = None
recyclables: Recyclables = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, tuple) else (val,) * depth
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# helper classes
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, x):
return self.val
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
init_zero_(self.net[-1])
def forward(self, x, **kwargs):
x = self.norm(x)
return self.net(x)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
seq_len = None,
heads = 8,
dim_head = 64,
dropout = 0.,
gating = True
):
super().__init__()
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads= heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.gating = nn.Linear(dim, inner_dim)
nn.init.constant_(self.gating.weight, 0.)
nn.init.constant_(self.gating.bias, 1.)
self.dropout = nn.Dropout(dropout)
init_zero_(self.to_out)
def forward(self, x, mask = None, attn_bias = None, context = None, context_mask = None, tie_dim = None):
device, orig_shape, h, has_context = x.device, x.shape, self.heads, exists(context)
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
i, j = q.shape[-2], k.shape[-2]
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# scale
q = q * self.scale
# query / key similarities
if exists(tie_dim):
# as in the paper, for the extra MSAs
# they average the queries along the rows of the MSAs
# they named this particular module MSAColumnGlobalAttention
q, k = map(lambda t: rearrange(t, '(b r) ... -> b r ...', r = tie_dim), (q, k))
q = q.mean(dim = 1)
dots = einsum('b h i d, b r h j d -> b r h i j', q, k)
dots = rearrange(dots, 'b r ... -> (b r) ...')
else:
dots = einsum('b h i d, b h j d -> b h i j', q, k)
# add attention bias, if supplied (for pairwise to msa attention communication)
if exists(attn_bias):
dots = dots + attn_bias
# masking
if exists(mask):
mask = default(mask, lambda: torch.ones(1, i, device = device).bool())
context_mask = mask if not has_context else default(context_mask, lambda: torch.ones(1, k.shape[-2], device = device).bool())
mask_value = -torch.finfo(dots.dtype).max
mask = mask[:, None, :, None] * context_mask[:, None, None, :]
dots = dots.masked_fill(~mask, mask_value)
# attention
attn = dots.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# gating
gates = self.gating(x)
out = out * gates.sigmoid()
# combine to out
out = self.to_out(out)
return out
class AxialAttention(nn.Module):
def __init__(
self,
dim,
heads,
row_attn = True,
col_attn = True,
accept_edges = False,
global_query_attn = False,
**kwargs
):
super().__init__()
assert not (not row_attn and not col_attn), 'row or column attention must be turned on'
self.row_attn = row_attn
self.col_attn = col_attn
self.global_query_attn = global_query_attn
self.norm = nn.LayerNorm(dim)
self.attn = Attention(dim = dim, heads = heads, **kwargs)
self.edges_to_attn_bias = nn.Sequential(
nn.Linear(dim, heads, bias = False),
Rearrange('b i j h -> b h i j')
) if accept_edges else None
def forward(self, x, edges = None, mask = None):
assert self.row_attn ^ self.col_attn, 'has to be either row or column attention, but not both'
b, h, w, d = x.shape
x = self.norm(x)
# axial attention
if self.col_attn:
axial_dim = w
mask_fold_axial_eq = 'b h w -> (b w) h'
input_fold_eq = 'b h w d -> (b w) h d'
output_fold_eq = '(b w) h d -> b h w d'
elif self.row_attn:
axial_dim = h
mask_fold_axial_eq = 'b h w -> (b h) w'
input_fold_eq = 'b h w d -> (b h) w d'
output_fold_eq = '(b h) w d -> b h w d'
x = rearrange(x, input_fold_eq)
if exists(mask):
mask = rearrange(mask, mask_fold_axial_eq)
attn_bias = None
if exists(self.edges_to_attn_bias) and exists(edges):
attn_bias = self.edges_to_attn_bias(edges)
attn_bias = repeat(attn_bias, 'b h i j -> (b x) h i j', x = axial_dim)
tie_dim = axial_dim if self.global_query_attn else None
out = self.attn(x, mask = mask, attn_bias = attn_bias, tie_dim = tie_dim)
out = rearrange(out, output_fold_eq, h = h, w = w)
return out
class TriangleMultiplicativeModule(nn.Module):
def __init__(
self,
*,
dim,
hidden_dim = None,
mix = 'ingoing'
):
super().__init__()
assert mix in {'ingoing', 'outgoing'}, 'mix must be either ingoing or outgoing'
hidden_dim = default(hidden_dim, dim)
self.norm = nn.LayerNorm(dim)
self.left_proj = nn.Linear(dim, hidden_dim)
self.right_proj = nn.Linear(dim, hidden_dim)
self.left_gate = nn.Linear(dim, hidden_dim)
self.right_gate = nn.Linear(dim, hidden_dim)
self.out_gate = nn.Linear(dim, hidden_dim)
# initialize all gating to be identity
for gate in (self.left_gate, self.right_gate, self.out_gate):
nn.init.constant_(gate.weight, 0.)
nn.init.constant_(gate.bias, 1.)
if mix == 'outgoing':
self.mix_einsum_eq = '... i k d, ... j k d -> ... i j d'
elif mix == 'ingoing':
self.mix_einsum_eq = '... k j d, ... k i d -> ... i j d'
self.to_out_norm = nn.LayerNorm(hidden_dim)
self.to_out = nn.Linear(hidden_dim, dim)
def forward(self, x, mask = None):
assert x.shape[1] == x.shape[2], 'feature map must be symmetrical'
if exists(mask):
mask = rearrange(mask, 'b i j -> b i j ()')
x = self.norm(x)
left = self.left_proj(x)
right = self.right_proj(x)
if exists(mask):
left = left * mask
right = right * mask
left_gate = self.left_gate(x).sigmoid()
right_gate = self.right_gate(x).sigmoid()
out_gate = self.out_gate(x).sigmoid()
left = left * left_gate
right = right * right_gate
out = einsum(self.mix_einsum_eq, left, right)
out = self.to_out_norm(out)
out = out * out_gate
return self.to_out(out)
# evoformer blocks
class OuterMean(nn.Module):
def __init__(
self,
dim,
hidden_dim = None,
eps = 1e-5
):
super().__init__()
self.eps = eps
self.norm = nn.LayerNorm(dim)
hidden_dim = default(hidden_dim, dim)
self.left_proj = nn.Linear(dim, hidden_dim)
self.right_proj = nn.Linear(dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, dim)
def forward(self, x, mask = None):
x = self.norm(x)
left = self.left_proj(x)
right = self.right_proj(x)
outer = rearrange(left, 'b m i d -> b m i () d') * rearrange(right, 'b m j d -> b m () j d')
if exists(mask):
# masked mean, if there are padding in the rows of the MSA
mask = rearrange(mask, 'b m i -> b m i () ()') * rearrange(mask, 'b m j -> b m () j ()')
outer = outer.masked_fill(~mask, 0.)
outer = outer.mean(dim = 1) / (mask.sum(dim = 1) + self.eps)
else:
outer = outer.mean(dim = 1)
return self.proj_out(outer)
class PairwiseAttentionBlock(nn.Module):
def __init__(
self,
dim,
seq_len,
heads,
dim_head,
dropout = 0.,
global_column_attn = False
):
super().__init__()
self.outer_mean = OuterMean(dim)
self.triangle_attention_outgoing = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = True, col_attn = False, accept_edges = True)
self.triangle_attention_ingoing = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = False, col_attn = True, accept_edges = True, global_query_attn = global_column_attn)
self.triangle_multiply_outgoing = TriangleMultiplicativeModule(dim = dim, mix = 'outgoing')
self.triangle_multiply_ingoing = TriangleMultiplicativeModule(dim = dim, mix = 'ingoing')
def forward(
self,
x,
mask = None,
msa_repr = None,
msa_mask = None
):
if exists(msa_repr):
x = x + self.outer_mean(msa_repr, mask = msa_mask)
x = self.triangle_multiply_outgoing(x, mask = mask) + x
x = self.triangle_multiply_ingoing(x, mask = mask) + x
x = self.triangle_attention_outgoing(x, edges = x, mask = mask) + x
x = self.triangle_attention_ingoing(x, edges = x, mask = mask) + x
return x
class MsaAttentionBlock(nn.Module):
def __init__(
self,
dim,
seq_len,
heads,
dim_head,
dropout = 0.
):
super().__init__()
self.row_attn = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = True, col_attn = False, accept_edges = True)
self.col_attn = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = False, col_attn = True)
def forward(
self,
x,
mask = None,
pairwise_repr = None
):
x = self.row_attn(x, mask = mask, edges = pairwise_repr) + x
x = self.col_attn(x, mask = mask) + x
return x
# main evoformer class
class EvoformerBlock(nn.Module):
def __init__(
self,
*,
dim,
seq_len,
heads,
dim_head,
attn_dropout,
ff_dropout,
global_column_attn = False
):
super().__init__()
self.layer = nn.ModuleList([
PairwiseAttentionBlock(dim = dim, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout, global_column_attn = global_column_attn),
FeedForward(dim = dim, dropout = ff_dropout),
MsaAttentionBlock(dim = dim, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout),
FeedForward(dim = dim, dropout = ff_dropout),
])
def forward(self, inputs):
x, m, mask, msa_mask = inputs
attn, ff, msa_attn, msa_ff = self.layer
# msa attention and transition
m = msa_attn(m, mask = msa_mask, pairwise_repr = x)
m = msa_ff(m) + m
# pairwise attention and transition
x = attn(x, mask = mask, msa_repr = m, msa_mask = msa_mask)
x = ff(x) + x
return x, m, mask, msa_mask
class Evoformer(nn.Module):
def __init__(
self,
*,
depth,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([EvoformerBlock(**kwargs) for _ in range(depth)])
def forward(
self,
x,
m,
mask = None,
msa_mask = None
):
inp = (x, m, mask, msa_mask)
x, m, *_ = checkpoint_sequential(self.layers, 1, inp)
return x, m
class Alphafold2(nn.Module):
def __init__(
self,
*,
dim,
max_seq_len = 2048,
depth = 6,
heads = 8,
dim_head = 64,
max_rel_dist = 32,
num_tokens = constants.NUM_AMINO_ACIDS,
num_embedds = constants.NUM_EMBEDDS_TR,
max_num_msas = constants.MAX_NUM_MSA,
max_num_templates = constants.MAX_NUM_TEMPLATES,
extra_msa_evoformer_layers = 4,
attn_dropout = 0.,
ff_dropout = 0.,
templates_dim = 32,
templates_embed_layers = 4,
templates_angles_feats_dim = 55,
predict_angles = False,
symmetrize_omega = False,
predict_coords = False, # structure module related keyword arguments below
structure_module_depth = 4,
structure_module_heads = 1,
structure_module_dim_head = 4,
disable_token_embed = False,
mlm_mask_prob = 0.15,
mlm_random_replace_token_prob = 0.1,
mlm_keep_token_same_prob = 0.1,
mlm_exclude_token_ids = (0,),
recycling_distance_buckets = 32
):
super().__init__()
self.dim = dim
# token embedding
self.token_emb = nn.Embedding(num_tokens + 1, dim) if not disable_token_embed else Always(0)
self.to_pairwise_repr = nn.Linear(dim, dim * 2)
self.disable_token_embed = disable_token_embed
# positional embedding
self.max_rel_dist = max_rel_dist
self.pos_emb = nn.Embedding(max_rel_dist * 2 + 1, dim)
# extra msa embedding
self.extra_msa_evoformer = Evoformer(
dim = dim,
depth = extra_msa_evoformer_layers,
seq_len = max_seq_len,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
global_column_attn = True
)
# template embedding
self.to_template_embed = nn.Linear(templates_dim, dim)
self.templates_embed_layers = templates_embed_layers
self.template_pairwise_embedder = PairwiseAttentionBlock(
dim = dim,
dim_head = dim_head,
heads = heads,
seq_len = max_seq_len
)
self.template_pointwise_attn = Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout
)
self.template_angle_mlp = nn.Sequential(
nn.Linear(templates_angles_feats_dim, dim),
nn.GELU(),
nn.Linear(dim, dim)
)
# projection for angles, if needed
self.predict_angles = predict_angles
self.symmetrize_omega = symmetrize_omega
if predict_angles:
self.to_prob_theta = nn.Linear(dim, constants.THETA_BUCKETS)
self.to_prob_phi = nn.Linear(dim, constants.PHI_BUCKETS)
self.to_prob_omega = nn.Linear(dim, constants.OMEGA_BUCKETS)
# custom embedding projection
self.embedd_project = nn.Linear(num_embedds, dim)
# main trunk modules
self.net = Evoformer(
dim = dim,
depth = depth,
seq_len = max_seq_len,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout
)
# MSA SSL MLM
self.mlm = MLM(
dim = dim,
num_tokens = num_tokens,
mask_id = num_tokens, # last token of embedding is used for masking
mask_prob = mlm_mask_prob,
keep_token_same_prob = mlm_keep_token_same_prob,
random_replace_token_prob = mlm_random_replace_token_prob,
exclude_token_ids = mlm_exclude_token_ids
)
# calculate distogram logits
self.to_distogram_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, constants.DISTOGRAM_BUCKETS)
)
# to coordinate output
self.predict_coords = predict_coords
self.structure_module_depth = structure_module_depth
self.msa_to_single_repr_dim = nn.Linear(dim, dim)
self.trunk_to_pairwise_repr_dim = nn.Linear(dim, dim)
with torch_default_dtype(torch.float32):
self.ipa_block = IPABlock(
dim = dim,
heads = structure_module_heads,
)
self.to_quaternion_update = nn.Linear(dim, 6)
init_zero_(self.ipa_block.attn.to_out)
self.to_points = nn.Linear(dim, 3)
# aux confidence measure
self.lddt_linear = nn.Linear(dim, 1)
# recycling params
self.recycling_msa_norm = nn.LayerNorm(dim)
self.recycling_pairwise_norm = nn.LayerNorm(dim)
self.recycling_distance_embed = nn.Embedding(recycling_distance_buckets, dim)
self.recycling_distance_buckets = recycling_distance_buckets
def forward(
self,
seq,
msa = None,
mask = None,
msa_mask = None,
extra_msa = None,
extra_msa_mask = None,
seq_index = None,
seq_embed = None,
msa_embed = None,
templates_feats = None,
templates_mask = None,
templates_angles = None,
embedds = None,
recyclables = None,
return_trunk = False,
return_confidence = False,
return_recyclables = False,
return_aux_logits = False
):
assert not (self.disable_token_embed and not exists(seq_embed)), 'sequence embedding must be supplied if one has disabled token embedding'
assert not (self.disable_token_embed and not exists(msa_embed)), 'msa embedding must be supplied if one has disabled token embedding'
# if MSA is not passed in, just use the sequence itself
if not exists(msa):
msa = rearrange(seq, 'b n -> b () n')
msa_mask = rearrange(mask, 'b n -> b () n')
# assert on sequence length
assert msa.shape[-1] == seq.shape[-1], 'sequence length of MSA and primary sequence must be the same'
# variables
b, n, device = *seq.shape[:2], seq.device
n_range = torch.arange(n, device = device)
# unpack (AA_code, atom_pos)
if isinstance(seq, (list, tuple)):
seq, seq_pos = seq
# embed main sequence
x = self.token_emb(seq)
if exists(seq_embed):
x += seq_embed
# mlm for MSAs
if self.training and exists(msa):
original_msa = msa
msa_mask = default(msa_mask, lambda: torch.ones_like(msa).bool())
noised_msa, replaced_msa_mask = self.mlm.noise(msa, msa_mask)
msa = noised_msa
# embed multiple sequence alignment (msa)
if exists(msa):
m = self.token_emb(msa)
if exists(msa_embed):
m = m + msa_embed
# add single representation to msa representation
m = m + rearrange(x, 'b n d -> b () n d')
# get msa_mask to all ones if none was passed
msa_mask = default(msa_mask, lambda: torch.ones_like(msa).bool())
elif exists(embedds):
m = self.embedd_project(embedds)
# get msa_mask to all ones if none was passed
msa_mask = default(msa_mask, lambda: torch.ones_like(embedds[..., -1]).bool())
else:
raise Error('either MSA or embeds must be given')
# derive pairwise representation
x_left, x_right = self.to_pairwise_repr(x).chunk(2, dim = -1)
x = rearrange(x_left, 'b i d -> b i () d') + rearrange(x_right, 'b j d-> b () j d') # create pair-wise residue embeds
x_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j') if exists(mask) else None
# add relative positional embedding
seq_index = default(seq_index, lambda: torch.arange(n, device = device))
seq_rel_dist = rearrange(seq_index, 'i -> () i ()') - rearrange(seq_index, 'j -> () () j')
seq_rel_dist = seq_rel_dist.clamp(-self.max_rel_dist, self.max_rel_dist) + self.max_rel_dist
rel_pos_emb = self.pos_emb(seq_rel_dist)
x = x + rel_pos_emb
# add recyclables, if present
if exists(recyclables):
m[:, 0] = m[:, 0] + self.recycling_msa_norm(recyclables.single_msa_repr_row)
x = x + self.recycling_pairwise_norm(recyclables.pairwise_repr)
distances = torch.cdist(recyclables.coords, recyclables.coords, p=2)
boundaries = torch.linspace(2, 20, steps = self.recycling_distance_buckets, device = device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
distance_embed = self.recycling_distance_embed(discretized_distances)
x = x + distance_embed
# embed templates, if present
if exists(templates_feats):
_, num_templates, *_ = templates_feats.shape
# embed template
t = self.to_template_embed(templates_feats)
t_mask_crossed = rearrange(templates_mask, 'b t i -> b t i ()') * rearrange(templates_mask, 'b t j -> b t () j')
t = rearrange(t, 'b t ... -> (b t) ...')
t_mask_crossed = rearrange(t_mask_crossed, 'b t ... -> (b t) ...')
for _ in range(self.templates_embed_layers):
t = self.template_pairwise_embedder(t, mask = t_mask_crossed)
t = rearrange(t, '(b t) ... -> b t ...', t = num_templates)
t_mask_crossed = rearrange(t_mask_crossed, '(b t) ... -> b t ...', t = num_templates)
# template pos emb
x_point = rearrange(x, 'b i j d -> (b i j) () d')
t_point = rearrange(t, 'b t i j d -> (b i j) t d')
x_mask_point = rearrange(x_mask, 'b i j -> (b i j) ()')
t_mask_point = rearrange(t_mask_crossed, 'b t i j -> (b i j) t')
template_pooled = self.template_pointwise_attn(
x_point,
context = t_point,
mask = x_mask_point,
context_mask = t_mask_point
)
template_pooled_mask = rearrange(t_mask_point.sum(dim = -1) > 0, 'b -> b () ()')
template_pooled = template_pooled * template_pooled_mask
template_pooled = rearrange(template_pooled, '(b i j) () d -> b i j d', i = n, j = n)
x = x + template_pooled
# add template angle features to MSAs by passing through MLP and then concat
if exists(templates_angles):
t_angle_feats = self.template_angle_mlp(templates_angles)
m = torch.cat((m, t_angle_feats), dim = 1)
msa_mask = torch.cat((msa_mask, templates_mask), dim = 1)
# embed extra msa, if present
if exists(extra_msa):
extra_m = self.token_emb(msa)
extra_msa_mask = default(extra_msa_mask, torch.ones_like(extra_m).bool())
x, extra_m = self.extra_msa_evoformer(
x,
extra_m,
mask = x_mask,
msa_mask = extra_msa_mask
)
# trunk
x, m = self.net(
x,
m,
mask = x_mask,
msa_mask = msa_mask
)
# ready output container
ret = ReturnValues()
# calculate theta and phi before symmetrization
if self.predict_angles:
ret.theta_logits = self.to_prob_theta(x)
ret.phi_logits = self.to_prob_phi(x)
# embeds to distogram
trunk_embeds = (x + rearrange(x, 'b i j d -> b j i d')) * 0.5 # symmetrize
distance_pred = self.to_distogram_logits(trunk_embeds)
ret.distance = distance_pred
# calculate mlm loss, if training
msa_mlm_loss = None
if self.training and exists(msa):
num_msa = original_msa.shape[1]
msa_mlm_loss = self.mlm(m[:, :num_msa], original_msa, replaced_msa_mask)
# determine angles, if specified
if self.predict_angles:
omega_input = trunk_embeds if self.symmetrize_omega else x
ret.omega_logits = self.to_prob_omega(omega_input)
if not self.predict_coords or return_trunk:
return ret
# derive single and pairwise embeddings for structural refinement
single_msa_repr_row = m[:, 0]
single_repr = self.msa_to_single_repr_dim(single_msa_repr_row)
pairwise_repr = self.trunk_to_pairwise_repr_dim(x)
# prepare float32 precision for equivariance
original_dtype = single_repr.dtype
single_repr, pairwise_repr = map(lambda t: t.float(), (single_repr, pairwise_repr))
# iterative refinement with equivariant transformer in high precision
with torch_default_dtype(torch.float32):
quaternions = torch.tensor([1., 0., 0., 0.], device = device) # initial rotations
quaternions = repeat(quaternions, 'd -> b n d', b = b, n = n)
translations = torch.zeros((b, n, 3), device = device)
# go through the layers and apply invariant point attention and feedforward
for i in range(self.structure_module_depth):
is_last = i == (self.structure_module_depth - 1)
# the detach comes from
# https://github.com/deepmind/alphafold/blob/0bab1bf84d9d887aba5cfb6d09af1e8c3ecbc408/alphafold/model/folding.py#L383
rotations = quaternion_to_matrix(quaternions)
if not is_last:
rotations = rotations.detach()
single_repr = self.ipa_block(
single_repr,
mask = mask,
pairwise_repr = pairwise_repr,
rotations = rotations,
translations = translations
)
# update quaternion and translation
quaternion_update, translation_update = self.to_quaternion_update(single_repr).chunk(2, dim = -1)
quaternion_update = F.pad(quaternion_update, (1, 0), value = 1.)
quaternions = quaternion_multiply(quaternions, quaternion_update)
translations = translations + einsum('b n c, b n c r -> b n r', translation_update, rotations)
points_local = self.to_points(single_repr)
rotations = quaternion_to_matrix(quaternions)
coords = einsum('b n c, b n c d -> b n d', points_local, rotations) + translations
coords.type(original_dtype)
if return_recyclables:
coords, single_msa_repr_row, pairwise_repr = map(torch.detach, (coords, single_msa_repr_row, pairwise_repr))
ret.recyclables = Recyclables(coords, single_msa_repr_row, pairwise_repr)
if return_aux_logits:
return coords, ret
if return_confidence:
return coords, self.lddt_linear(single_repr.float())
return coords
|
import torch
import torch.nn.functional as F
from torch import nn
from alphafold2_pytorch.utils import get_msa_embedd, get_esm_embedd, get_prottran_embedd, exists
from alphafold2_pytorch.constants import MSA_MODEL_PATH, MSA_EMBED_DIM, ESM_MODEL_PATH, ESM_EMBED_DIM, PROTTRAN_EMBED_DIM
from einops import rearrange
class ProtTranEmbedWrapper(nn.Module):
def __init__(self, *, alphafold2):
super().__init__()
from transformers import AutoTokenizer, AutoModel
self.alphafold2 = alphafold2
self.project_embed = nn.Linear(PROTTRAN_EMBED_DIM, alphafold2.dim)
self.tokenizer = AutoTokenizer.from_pretrained('Rostlab/prot_bert', do_lower_case=False)
self.model = AutoModel.from_pretrained('Rostlab/prot_bert')
def forward(self, seq, msa, msa_mask = None, **kwargs):
device = seq.device
num_msa = msa.shape[1]
msa_flat = rearrange(msa, 'b m n -> (b m) n')
seq_embed = get_prottran_embedd(seq, self.model, self.tokenizer, device = device)
msa_embed = get_prottran_embedd(msa_flat, self.model, self.tokenizer, device = device)
seq_embed, msa_embed = map(self.project_embed, (seq_embed, msa_embed))
msa_embed = rearrange(msa_embed, '(b m) n d -> b m n d', m = num_msa)
return self.alphafold2(seq, msa, seq_embed = seq_embed, msa_embed = msa_embed, msa_mask = msa_mask, **kwargs)
class MSAEmbedWrapper(nn.Module):
def __init__(self, *, alphafold2):
super().__init__()
self.alphafold2 = alphafold2
model, alphabet = torch.hub.load(*MSA_MODEL_PATH)
batch_converter = alphabet.get_batch_converter()
self.model = model
self.batch_converter = batch_converter
self.project_embed = nn.Linear(MSA_EMBED_DIM, alphafold2.dim) if MSA_EMBED_DIM != alphafold2.dim else nn.Identity()
def forward(self, seq, msa, msa_mask = None, **kwargs):
assert seq.shape[-1] == msa.shape[-1], 'sequence and msa must have the same length if you wish to use MSA transformer embeddings'
model, batch_converter, device = self.model, self.batch_converter, seq.device
seq_and_msa = torch.cat((seq.unsqueeze(1), msa), dim = 1)
if exists(msa_mask):
# in the event that there are rows in the MSA that are completely padding
# process each batch element individually, so that padding isn't processed
# with row-tied attention
num_msa = msa_mask.any(dim = -1).sum(dim = -1).tolist()
seq_and_msa_list = seq_and_msa.unbind(dim = 0)
num_rows = seq_and_msa.shape[1]
embeds = []
for num, batch_el in zip(num_msa, seq_and_msa_list):
batch_el = rearrange(batch_el, '... -> () ...')
batch_el = batch_el[:, :num]
embed = get_msa_embedd(batch_el, model, batch_converter, device = device)
embed = F.pad(embed, (0, 0, 0, 0, 0, num_rows - num), value = 0.)
embeds.append(embed)
embeds = torch.cat(embeds, dim = 0)
else:
embeds = get_msa_embedd(seq_and_msa, model, batch_converter, device = device)
embeds = self.project_embed(embeds)
seq_embed, msa_embed = embeds[:, 0], embeds[:, 1:]
return self.alphafold2(seq, msa, seq_embed = seq_embed, msa_embed = msa_embed, msa_mask = msa_mask, **kwargs)
class ESMEmbedWrapper(nn.Module):
def __init__(self, *, alphafold2):
super().__init__()
self.alphafold2 = alphafold2
model, alphabet = torch.hub.load(*ESM_MODEL_PATH)
batch_converter = alphabet.get_batch_converter()
self.model = model
self.batch_converter = batch_converter
self.project_embed = nn.Linear(ESM_EMBED_DIM, alphafold2.dim) if ESM_EMBED_DIM != alphafold2.dim else nn.Identity()
def forward(self, seq, msa=None, **kwargs):
model, batch_converter, device = self.model, self.batch_converter, seq.device
seq_embeds = get_esm_embedd(seq, model, batch_converter, device = device)
seq_embeds = self.project_embed(seq_embeds)
if msa is not None:
flat_msa = rearrange(msa, 'b m n -> (b m) n')
msa_embeds = get_esm_embedd(flat_msa, model, batch_converter, device = device)
msa_embeds = rearrange(msa_embeds, '(b m) n d -> b m n d')
msa_embeds = self.project_embed(msa_embeds)
else:
msa_embeds = None
return self.alphafold2(seq, msa, seq_embed = seq_embeds, msa_embed = msa_embeds, **kwargs)
|
from math import log, sqrt, pi
import torch
from torch import nn, einsum
from einops import rearrange, repeat
# rotary embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(x, sinu_pos):
sin, cos = map(lambda t: rearrange(t, 'b ... -> b () ...'), sinu_pos)
rot_dim = sin.shape[-1]
x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
x = x * cos + rotate_every_two(x) * sin
return torch.cat((x, x_pass), dim = -1)
# positional embeddings
class DepthWiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True, groups = None):
super().__init__()
groups = default(groups, dim_in)
self.net = nn.Sequential(
nn.Conv1d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = groups, stride = stride, bias = bias),
nn.Conv1d(dim_in, dim_out, 1, bias = bias)
)
def forward(self, x):
return self.net(x)
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, n, device):
seq = torch.arange(n, device = device).type_as(self.inv_freq)
freqs = einsum('i , j -> i j', seq, self.inv_freq)
freqs = repeat(freqs, 'i j -> () i (j r)', r = 2)
return [freqs.sin(), freqs.cos()]
class AxialRotaryEmbedding(nn.Module):
def __init__(self, dim, max_freq = 10):
super().__init__()
self.dim = dim // 2
inv_freq = 1. / (10000 ** (torch.arange(0, self.dim, 2).float() / self.dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, n, device):
seq = torch.arange(n, device = device).type_as(self.inv_freq)
x = einsum('n, d -> n d', seq, self.inv_freq)
y = einsum('n, d -> n d', seq, self.inv_freq)
x_sinu = repeat(x, 'i d -> i j d', j = n)
y_sinu = repeat(y, 'j d -> i j d', i = n)
sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)
cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)
sin, cos = map(lambda t: repeat(t, 'i j d -> () (i j) (d r)', r = 2), (sin, cos))
return [sin, cos]
|
import torch
import numpy as np
from alphafold2_pytorch.utils import *
def test_mat_to_masked():
# nodes
x = torch.ones(19, 3)
x_mask = torch.randn(19) > -0.3
# edges
edges_mat = torch.randn(19, 19) < 1
edges = torch.nonzero(edges_mat, as_tuple=False).t()
# test normal edges / nodes
cleaned = mat_input_to_masked(x, x_mask, edges=edges)
cleaned_2 = mat_input_to_masked(x, x_mask, edges_mat=edges_mat)
# test batch dimension
x_ = torch.stack([x]*2, dim=0)
x_mask_ = torch.stack([x_mask]*2, dim=0)
edges_mat_ = torch.stack([edges_mat]*2, dim=0)
cleaned_3 = mat_input_to_masked(x_, x_mask_, edges_mat=edges_mat_)
assert True
def test_center_distogram_median():
distogram = torch.randn(1, 128, 128, 37)
distances, weights = center_distogram_torch(distogram, center = 'median')
assert True
def test_masks():
seqs = torch.randint(20, size=(2, 50))
# cloud point mask - can't test bc it needs sidechainnet installed
# cloud_masks = scn_cloud_mask(seqs, boolean=True)
# atom masking
N_mask, CA_mask, C_mask = scn_backbone_mask(seqs, boolean = True)
assert True
def test_mds_and_mirrors():
distogram = torch.randn(2, 32*3, 32*3, 37)
distances, weights = center_distogram_torch(distogram)
# set out some points (due to padding)
paddings = [7,0]
for i,pad in enumerate(paddings):
if pad > 0:
weights[i, -pad:, -pad:] = 0.
# masks
masker = torch.arange(distogram.shape[1]) % 3
N_mask = (masker==0).bool()
CA_mask = (masker==1).bool()
coords_3d, _ = MDScaling(distances,
weights = weights,
iters = 5,
fix_mirror = 2,
N_mask = N_mask,
CA_mask = CA_mask,
C_mask = None
)
assert list(coords_3d.shape) == [2, 3, 32*3], 'coordinates must be of the right shape after MDS'
def test_sidechain_container():
seqs = torch.tensor([[0]*137, [3]*137]).long()
bb = torch.randn(2, 137*4, 3)
atom_mask = torch.tensor( [1]*4 + [0]*(14-4) )
proto_3d = sidechain_container(seqs, bb, atom_mask=atom_mask)
assert list(proto_3d.shape) == [2, 137, 14, 3]
def test_distmat_loss():
a = torch.randn(2, 137, 14, 3)
b = torch.randn(2, 137, 14, 3)
loss = distmat_loss_torch(a, b, p=2, q=2) # mse on distmat
assert True
def test_lddt():
a = torch.randn(2, 137, 14, 3)
b = torch.randn(2, 137, 14, 3)
cloud_mask = torch.ones(a.shape[:-1]).bool()
lddt_result = lddt_ca_torch(a, b, cloud_mask)
assert list(lddt_result.shape) == [2, 137]
def test_kabsch():
a = torch.randn(3, 8)
b = torch.randn(3, 8)
a_, b_ = Kabsch(a,b)
assert a.shape == a_.shape
def test_tmscore():
a = torch.randn(2, 3, 8)
b = torch.randn(2, 3, 8)
out = TMscore(a, b)
assert True
def test_gdt():
a = torch.randn(1, 3, 8)
b = torch.randn(1, 3, 8)
GDT(a, b, weights = 1)
assert True
|
import torch
from torch import nn
from einops import repeat
from alphafold2_pytorch.alphafold2 import Alphafold2
from alphafold2_pytorch.utils import *
def test_main():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 128))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_no_msa():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 128))
mask = torch.ones_like(seq).bool()
distogram = model(
seq,
mask = mask
)
assert True
def test_anglegrams():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_angles = True
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 128))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
ret = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_templates():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
templates_dim = 32,
templates_angles_feats_dim = 32
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
templates_feats = torch.randn(2, 3, 16, 16, 32)
templates_angles = torch.randn(2, 3, 16, 32)
templates_mask = torch.ones(2, 3, 16).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
templates_feats = templates_feats,
templates_angles = templates_angles,
templates_mask = templates_mask
)
assert True
def test_extra_msa():
model = Alphafold2(
dim = 128,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True
)
seq = torch.randint(0, 21, (2, 4))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 4))
msa_mask = torch.ones_like(msa).bool()
extra_msa = torch.randint(0, 21, (2, 5, 4))
extra_msa_mask = torch.ones_like(extra_msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
extra_msa = extra_msa,
extra_msa_mask = extra_msa_mask
)
assert True
def test_embeddings():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
embedds = torch.randn(2, 1, 16, 1280)
# without mask
distogram = model(
seq,
mask = mask,
embedds = embedds,
msa_mask = None
)
# with mask
embedds_mask = torch.ones_like(embedds[..., -1]).bool()
distogram = model(
seq,
mask = mask,
embedds = embedds,
msa_mask = embedds_mask
)
assert True
def test_coords():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_coords_backbone_with_cbeta():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_coords_all_atoms():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_mds():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_edges_to_equivariant_network():
model = Alphafold2(
dim = 32,
depth = 1,
heads = 2,
dim_head = 32,
predict_coords = True,
predict_angles = True
)
seq = torch.randint(0, 21, (2, 32))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
coords, confidences = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
return_confidence = True
)
assert True, 'should run without errors'
def test_coords_backwards():
model = Alphafold2(
dim = 256,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
coords.sum().backward()
assert True, 'must be able to go backwards through MDS and center distogram'
def test_confidence():
model = Alphafold2(
dim = 256,
depth = 1,
heads = 2,
dim_head = 32,
predict_coords = True
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords, confidences = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
return_confidence = True
)
assert coords.shape[:-1] == confidences.shape[:-1]
def test_recycling():
model = Alphafold2(
dim = 128,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
)
seq = torch.randint(0, 21, (2, 4))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 4))
msa_mask = torch.ones_like(msa).bool()
extra_msa = torch.randint(0, 21, (2, 5, 4))
extra_msa_mask = torch.ones_like(extra_msa).bool()
coords, ret = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
extra_msa = extra_msa,
extra_msa_mask = extra_msa_mask,
return_aux_logits = True,
return_recyclables = True
)
coords, ret = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
extra_msa = extra_msa,
extra_msa_mask = extra_msa_mask,
recyclables = ret.recyclables,
return_aux_logits = True,
return_recyclables = True
)
assert True
|
import pickle
import string
from argparse import ArgumentParser
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import numpy.linalg as LA
import prody
import torch
from Bio import SeqIO
from einops import repeat
from sidechainnet.utils.measure import get_seq_coords_and_angles
from sidechainnet.utils.sequence import ProteinVocabulary
from torch.utils.data import DataLoader, Dataset
from alphafold2_pytorch.constants import DISTOGRAM_BUCKETS
from tqdm import tqdm
try:
import pytorch_lightning as pl
LightningDataModule = pl.LightningDataModule
except ImportError:
LightningDataModule = object
CACHE_PATH = Path("~/.cache/alphafold2_pytorch").expanduser()
DATA_DIR = CACHE_PATH / "trrosetta" / "trrosetta"
URL = "http://s3.amazonaws.com/proteindata/data_pytorch/trrosetta.tar.gz"
REMOVE_KEYS = dict.fromkeys(string.ascii_lowercase)
REMOVE_KEYS["."] = None
REMOVE_KEYS["*"] = None
translation = str.maketrans(REMOVE_KEYS)
DEFAULT_VOCAB = ProteinVocabulary()
def default_tokenize(seq: str) -> List[int]:
return [DEFAULT_VOCAB[ch] for ch in seq]
def read_fasta(filename: str) -> List[Tuple[str, str]]:
def remove_insertions(sequence: str) -> str:
return sequence.translate(translation)
return [
(record.description, remove_insertions(str(record.seq)))
for record in SeqIO.parse(filename, "fasta")
]
def read_pdb(pdb: str):
ag = prody.parsePDB(pdb)
for chain in ag.iterChains():
angles, coords, seq = get_seq_coords_and_angles(chain)
return angles, coords, seq
def download_file(url, filename=None, root=CACHE_PATH):
import os
import urllib
root.mkdir(exist_ok=True, parents=True)
filename = filename or os.path.basename(url)
download_target = root / filename
download_target_tmp = root / f"tmp.{filename}"
if download_target.exists() and not download_target.is_file():
raise RuntimeError(f"{download_target} exists and is not a regular file")
if download_target.is_file():
return download_target
with urllib.request.urlopen(url) as source, open(
download_target_tmp, "wb"
) as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
download_target_tmp.rename(download_target)
return download_target
def get_or_download(url: str = URL):
"""
download and extract trrosetta data
"""
import tarfile
file = CACHE_PATH / "trrosetta.tar.gz"
dir = CACHE_PATH / "trrosetta"
dir_temp = CACHE_PATH / "trrosetta_tmp"
if dir.is_dir():
print(f"Load cached data from {dir}")
return dir
if not file.is_file():
print(f"Cache not found, download from {url} to {file}")
download_file(url)
print(f"Extract data from {file} to {dir}")
with tarfile.open(file, "r:gz") as tar:
tar.extractall(dir_temp)
dir_temp.rename(dir)
return dir
def pad_sequences(sequences, constant_value=0, dtype=None) -> np.ndarray:
batch_size = len(sequences)
shape = [batch_size] + np.max([seq.shape for seq in sequences], 0).tolist()
if dtype is None:
dtype = sequences[0].dtype
if isinstance(sequences[0], np.ndarray):
array = np.full(shape, constant_value, dtype=dtype)
elif isinstance(sequences[0], torch.Tensor):
array = torch.full(shape, constant_value, dtype=dtype)
for arr, seq in zip(array, sequences):
arrslice = tuple(slice(dim) for dim in seq.shape)
arr[arrslice] = seq
return array
class TrRosettaDataset(Dataset):
def __init__(
self,
data_dir: Path,
list_path: Path,
tokenize: Callable[[str], List[int]],
seq_pad_value: int = 20,
random_sample_msa: bool = False,
max_seq_len: int = 300,
max_msa_num: int = 300,
overwrite: bool = False,
):
self.data_dir = data_dir
self.file_list: List[Path] = self.read_file_list(data_dir, list_path)
self.tokenize = tokenize
self.seq_pad_value = seq_pad_value
self.random_sample_msa = random_sample_msa
self.max_seq_len = max_seq_len
self.max_msa_num = max_msa_num
self.overwrite = overwrite
def __len__(self) -> int:
return len(self.file_list)
def read_file_list(self, data_dir: Path, list_path: Path):
file_glob = (data_dir / "npz").glob("*.npz")
files = set(list_path.read_text().split())
if len(files) == 0:
raise ValueError("Passed an empty split file set")
file_list = [f for f in file_glob if f.name in files]
if len(file_list) != len(files):
num_missing = len(files) - len(file_list)
raise FileNotFoundError(
f"{num_missing} specified split files not found in directory"
)
return file_list
def has_cache(self, index):
if self.overwrite:
return False
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
return path.is_file()
def write_cache(self, index, data):
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
path.parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as file:
pickle.dump(data, file)
def read_cache(self, index):
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
with open(path, "rb") as file:
return pickle.load(file)
def __getitem__(self, index):
if self.has_cache(index):
item = self.read_cache(index)
else:
id = self.file_list[index].stem
pdb_path = self.data_dir / "pdb" / f"{id}.pdb"
msa_path = self.data_dir / "a3m" / f"{id}.a3m"
_, msa = zip(*read_fasta(str(msa_path)))
msa = np.array([np.array(list(seq)) for seq in msa])
angles, coords, seq = read_pdb(str(pdb_path))
seq = np.array(list(seq))
coords = coords.reshape((coords.shape[0] // 14, 14, 3))
dist = self.get_bucketed_distance(seq, coords, subset="ca")
item = {
"id": id,
"seq": seq,
"msa": msa,
"coords": coords,
"angles": angles,
"dist": dist
}
self.write_cache(index, item)
item["msa"] = self.sample(item["msa"], self.max_msa_num, self.random_sample_msa)
item = self.crop(item, self.max_seq_len)
return item
def calc_cb(self, coord):
N = coord[0]
CA = coord[1]
C = coord[2]
b = CA - N
c = C - CA
a = np.cross(b, c)
CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
return CB
def get_bucketed_distance(
self, seq, coords, subset="ca", start=2, bins=DISTOGRAM_BUCKETS-1, step=0.5
):
assert subset in ("ca", "cb")
if subset == "ca":
coords = coords[:, 1, :]
elif subset == "cb":
cb_coords = []
for res, coord in zip(seq, coords):
if res == "G":
cb = self.calc_cb(coord)
cb_coords.append(cb)
else:
cb_coords.append(coord[4, :])
coords = np.array(cb_coords)
vcs = coords + np.zeros([coords.shape[0]] + list(coords.shape))
vcs = vcs - np.swapaxes(vcs, 0, 1)
distance_map = LA.norm(vcs, axis=2)
mask = np.ones(distance_map.shape) - np.eye(distance_map.shape[0])
low_pos = np.where(distance_map < start)
high_pos = np.where(distance_map >= start + step * bins)
mask[low_pos] = 0
distance_map = (distance_map - start) // step
distance_map[high_pos] = bins
dist = (distance_map * mask).astype(int)
return dist
def crop(self, item, max_seq_len: int):
seq_len = len(item["seq"])
if seq_len <= max_seq_len or max_seq_len <= 0:
return item
start = 0
end = start + max_seq_len
item["seq"] = item["seq"][start:end]
item["msa"] = item["msa"][:, start:end]
item["coords"] = item["coords"][start:end]
item["angles"] = item["angles"][start:end]
item["dist"] = item["dist"][start:end, start:end]
return item
def sample(self, msa, max_msa_num: int, random: bool):
num_msa, seq_len = len(msa), len(msa[0])
if num_msa <= max_msa_num or max_msa_num <= 0:
return msa
if random:
num_sample = max_msa_num - 1
indices = np.random.choice(num_msa - 1, size=num_sample, replace=False) + 1
indices = np.pad(indices, [1, 0], "constant")
return msa[indices]
else:
return msa[:max_msa_num]
def collate_fn(self, batch):
b = len(batch)
batch = {k: [item[k] for item in batch] for k in batch[0]}
id = batch["id"]
seq = batch["seq"]
msa = batch["msa"]
coords = batch["coords"]
angles = batch["angles"]
dist = batch["dist"]
lengths = torch.LongTensor([len(x[0]) for x in msa])
depths = torch.LongTensor([len(x) for x in msa])
max_len = lengths.max()
max_depth = depths.max()
seq = pad_sequences(
[torch.LongTensor(self.tokenize(seq_)) for seq_ in seq], self.seq_pad_value,
)
msa = pad_sequences(
[torch.LongTensor([self.tokenize(seq_) for seq_ in msa_]) for msa_ in msa],
self.seq_pad_value,
)
coords = pad_sequences([torch.FloatTensor(x) for x in coords], 0.0)
angles = pad_sequences([torch.FloatTensor(x) for x in angles], 0.0)
dist = pad_sequences([torch.LongTensor(x) for x in dist], -100)
mask = repeat(torch.arange(max_len), "l -> b l", b=b) < repeat(
lengths, "b -> b l", l=max_len
)
msa_seq_mask = repeat(
torch.arange(max_len), "l -> b s l", b=b, s=max_depth
) < repeat(lengths, "b -> b s l", s=max_depth, l=max_len)
msa_depth_mask = repeat(
torch.arange(max_depth), "s -> b s l", b=b, l=max_len
) < repeat(depths, "b -> b s l", s=max_depth, l=max_len)
msa_mask = msa_seq_mask & msa_depth_mask
return {
"id": id,
"seq": seq,
"msa": msa,
"coords": coords,
"angles": angles,
"mask": mask,
"msa_mask": msa_mask,
"dist": dist,
}
class TrRosettaDataModule(LightningDataModule):
@staticmethod
def add_data_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--data_dir", type=str, default=str(DATA_DIR))
parser.add_argument("--train_batch_size", type=int, default=1)
parser.add_argument("--eval_batch_size", type=int, default=1)
parser.add_argument("--test_batch_size", type=int, default=1)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--train_max_seq_len", type=int, default=256)
parser.add_argument("--eval_max_seq_len", type=int, default=256)
parser.add_argument("--test_max_seq_len", type=int, default=-1)
parser.add_argument("--train_max_msa_num", type=int, default=256)
parser.add_argument("--eval_max_msa_num", type=int, default=256)
parser.add_argument("--test_max_msa_num", type=int, default=1000)
parser.add_argument("--overwrite", dest="overwrite", action="store_true")
return parser
def __init__(
self,
data_dir: str = DATA_DIR,
train_batch_size: int = 1,
eval_batch_size: int = 1,
test_batch_size: int = 1,
num_workers: int = 0,
train_max_seq_len: int = 256,
eval_max_seq_len: int = 256,
test_max_seq_len: int = -1,
train_max_msa_num: int = 32,
eval_max_msa_num: int = 32,
test_max_msa_num: int = 64,
tokenize: Callable[[str], List[int]] = default_tokenize,
seq_pad_value: int = 20,
overwrite: bool = False,
**kwargs,
):
super(TrRosettaDataModule, self).__init__()
self.data_dir = Path(data_dir).expanduser().resolve()
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.train_max_seq_len = train_max_seq_len
self.eval_max_seq_len = eval_max_seq_len
self.test_max_seq_len = test_max_seq_len
self.train_max_msa_num = train_max_msa_num
self.eval_max_msa_num = eval_max_msa_num
self.test_max_msa_num = test_max_msa_num
self.tokenize = tokenize
self.seq_pad_value = seq_pad_value
self.overwrite = overwrite
get_or_download()
def setup(self, stage: Optional[str] = None):
self.train = TrRosettaDataset(
self.data_dir,
self.data_dir / "train_files.txt",
self.tokenize,
self.seq_pad_value,
random_sample_msa=True,
max_seq_len=self.train_max_seq_len,
max_msa_num=self.train_max_msa_num,
overwrite=self.overwrite,
)
self.val = TrRosettaDataset(
self.data_dir,
self.data_dir / "valid_files.txt",
self.tokenize,
self.seq_pad_value,
random_sample_msa=False,
max_seq_len=self.eval_max_seq_len,
max_msa_num=self.eval_max_msa_num,
overwrite=self.overwrite,
)
self.test = TrRosettaDataset(
self.data_dir,
self.data_dir / "valid_files.txt",
self.tokenize,
self.seq_pad_value,
random_sample_msa=False,
max_seq_len=self.test_max_seq_len,
max_msa_num=self.test_max_msa_num,
overwrite=self.overwrite,
)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(
self.train,
batch_size=self.train_batch_size,
shuffle=True,
collate_fn=self.train.collate_fn,
num_workers=self.num_workers,
)
def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.val,
batch_size=self.eval_batch_size,
shuffle=False,
collate_fn=self.val.collate_fn,
num_workers=self.num_workers,
)
def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.test,
batch_size=self.test_batch_size,
shuffle=False,
collate_fn=self.test.collate_fn,
num_workers=self.num_workers,
)
def test():
dm = TrRosettaDataModule(train_batch_size=1, num_workers=4)
dm.setup()
for batch in dm.train_dataloader():
print("id", batch["id"])
print("seq", batch["seq"].shape, batch["seq"])
print("msa", batch["msa"].shape, batch["msa"][..., :20])
print("msa", batch["msa"].shape, batch["msa"][..., -20:])
print("coords", batch["coords"].shape)
print("angles", batch["angles"].shape)
print("mask", batch["mask"].shape)
print("msa_mask", batch["msa_mask"].shape)
print("dist", batch["dist"].shape, batch["dist"])
break
if __name__ == "__main__":
test()
|
# will use FastRelax routine to refine structure
import os
import json
import warnings
# science
import numpy as np
# pyrosetta installation instructs in readme
try:
import pyrosetta
except ModuleNotFoundError:
msg = "Unable to find an existing installation of the PyRosetta module. " +\
"Functions involving this module such as the FastRelax pipeline " +\
"will not work."
warnings.warn(msg) # no pyRosetta was found
#####################
### ROSETTA STUFF ###
#####################
def pdb2rosetta(route):
""" Takes pdb file route(s) as input and returns rosetta pose(s).
Input:
* route: list or string.
Output: list of 1 or many according to input
"""
if isinstance(route, str):
return [pyrosetta.io.pose_from_pdb(route)]
else:
return list(pyrosetta.io.poses_from_files(route))
def rosetta2pdb(pose, route, verbose=True):
""" Takes pose(s) as input and saves pdb(s) to disk.
Input:
* pose: list or string. rosetta poses object(s).
* route: list or string. destin filenames to be written.
* verbose: bool. warns if lengths dont match and @ every write.
Inspo:
* https://www.rosettacommons.org/demos/latest/tutorials/input_and_output/input_and_output#controlling-output_common-structure-output-files_pdb-file
* https://graylab.jhu.edu/PyRosetta.documentation/pyrosetta.rosetta.core.io.pdb.html#pyrosetta.rosetta.core.io.pdb.dump_pdb
"""
# convert to list
pose = [pose] if isinstance(pose, str) else pose
route = [route] if isinstance(route, str) else route
# check lengths and warn if necessary
if verbose and ( len(pose) != len(route) ):
print("Length of pose and route are not the same. Will stop at the minimum.")
# convert and save
for i,pos in enumerate(pose):
pyrosetta.rosetta.core.io.pdb.dump_pdb(pos, route[i])
if verbose:
print("Saved structure @ "+route)
return
def run_fast_relax(config_route, pdb_route=None, pose=None):
""" Runs the Fast-Relax pipeline.
* config_route: route to json file with config
* pose: rosetta pose to run the pipeline on
Output: rosetta pose
"""
# load rosetta pose - if string or list is passed, convert to pose + recall
if isinstance(pdb_route, str):
pose = pdb2rosetta(pdb_route)
return run_fast_relax(config, pose=pose)
elif isinstance(pdb_route, list):
return [run_fast_relax(config, pdb_route=pdb) for pdb in pdb_route]
# load config:
config = json.load(config_route)
# run fast relax pipeline - examples:
# https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.02-Packing-design-and-regional-relax.ipynb#scrollTo=PYr025Rn1Q8i
# https://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.03-Design-with-a-resfile-and-relax.ipynb
# https://faculty.washington.edu/dimaio/files/demo2.py
raise NotImplementedError("Last step. Not implemented yet.")
|
from setuptools import setup, find_packages
setup(
name = 'axial_attention',
packages = find_packages(),
version = '0.6.1',
license='MIT',
description = 'Axial Attention',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/axial-attention',
keywords = ['attention', 'artificial intelligence'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim = 1)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim = 1)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim = 1)
del y
dy1, dy2 = torch.chunk(dy, 2, dim = 1)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim = 1)
dx = torch.cat([dx1, dx2], dim = 1)
return x, dx
class IrreversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = f
self.g = g
def forward(self, x, f_args, g_args):
x1, x2 = torch.chunk(x, 2, dim = 1)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim = 1)
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, ):
super().__init__()
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for (f, g) in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
x = torch.cat((x, x), dim = 1)
x = _ReversibleFunction.apply(x, self.blocks, block_kwargs)
return torch.stack(x.chunk(2, dim = 1)).mean(dim = 0)
|
from axial_attention.axial_attention import AxialAttention, AxialPositionalEmbedding, AxialImageTransformer, SelfAttention
|
import torch
from torch import nn
from operator import itemgetter
from axial_attention.reversible import ReversibleSequence
# helper functions
def exists(val):
return val is not None
def map_el_ind(arr, ind):
return list(map(itemgetter(ind), arr))
def sort_and_return_indices(arr):
indices = [ind for ind in range(len(arr))]
arr = zip(arr, indices)
arr = sorted(arr)
return map_el_ind(arr, 0), map_el_ind(arr, 1)
# calculates the permutation to bring the input tensor to something attend-able
# also calculates the inverse permutation to bring the tensor back to its original shape
def calculate_permutations(num_dimensions, emb_dim):
total_dimensions = num_dimensions + 2
emb_dim = emb_dim if emb_dim > 0 else (emb_dim + total_dimensions)
axial_dims = [ind for ind in range(1, total_dimensions) if ind != emb_dim]
permutations = []
for axial_dim in axial_dims:
last_two_dims = [axial_dim, emb_dim]
dims_rest = set(range(0, total_dimensions)) - set(last_two_dims)
permutation = [*dims_rest, *last_two_dims]
permutations.append(permutation)
return permutations
# helper classes
class ChanLayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
class Sequential(nn.Module):
def __init__(self, blocks):
super().__init__()
self.blocks = blocks
def forward(self, x):
for f, g in self.blocks:
x = x + f(x)
x = x + g(x)
return x
class PermuteToFrom(nn.Module):
def __init__(self, permutation, fn):
super().__init__()
self.fn = fn
_, inv_permutation = sort_and_return_indices(permutation)
self.permutation = permutation
self.inv_permutation = inv_permutation
def forward(self, x, **kwargs):
axial = x.permute(*self.permutation).contiguous()
shape = axial.shape
*_, t, d = shape
# merge all but axial dimension
axial = axial.reshape(-1, t, d)
# attention
axial = self.fn(axial, **kwargs)
# restore to original shape and permutation
axial = axial.reshape(*shape)
axial = axial.permute(*self.inv_permutation).contiguous()
return axial
# axial pos emb
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, shape, emb_dim_index = 1):
super().__init__()
parameters = []
total_dimensions = len(shape) + 2
ax_dim_indexes = [i for i in range(1, total_dimensions) if i != emb_dim_index]
self.num_axials = len(shape)
for i, (axial_dim, axial_dim_index) in enumerate(zip(shape, ax_dim_indexes)):
shape = [1] * total_dimensions
shape[emb_dim_index] = dim
shape[axial_dim_index] = axial_dim
parameter = nn.Parameter(torch.randn(*shape))
setattr(self, f'param_{i}', parameter)
def forward(self, x):
for i in range(self.num_axials):
x = x + getattr(self, f'param_{i}')
return x
# attention
class SelfAttention(nn.Module):
def __init__(self, dim, heads, dim_heads = None):
super().__init__()
self.dim_heads = (dim // heads) if dim_heads is None else dim_heads
dim_hidden = self.dim_heads * heads
self.heads = heads
self.to_q = nn.Linear(dim, dim_hidden, bias = False)
self.to_kv = nn.Linear(dim, 2 * dim_hidden, bias = False)
self.to_out = nn.Linear(dim_hidden, dim)
def forward(self, x, kv = None):
kv = x if kv is None else kv
q, k, v = (self.to_q(x), *self.to_kv(kv).chunk(2, dim=-1))
b, t, d, h, e = *q.shape, self.heads, self.dim_heads
merge_heads = lambda x: x.reshape(b, -1, h, e).transpose(1, 2).reshape(b * h, -1, e)
q, k, v = map(merge_heads, (q, k, v))
dots = torch.einsum('bie,bje->bij', q, k) * (e ** -0.5)
dots = dots.softmax(dim=-1)
out = torch.einsum('bij,bje->bie', dots, v)
out = out.reshape(b, h, -1, e).transpose(1, 2).reshape(b, -1, d)
out = self.to_out(out)
return out
# axial attention class
class AxialAttention(nn.Module):
def __init__(self, dim, num_dimensions = 2, heads = 8, dim_heads = None, dim_index = -1, sum_axial_out = True):
assert (dim % heads) == 0, 'hidden dimension must be divisible by number of heads'
super().__init__()
self.dim = dim
self.total_dimensions = num_dimensions + 2
self.dim_index = dim_index if dim_index > 0 else (dim_index + self.total_dimensions)
attentions = []
for permutation in calculate_permutations(num_dimensions, dim_index):
attentions.append(PermuteToFrom(permutation, SelfAttention(dim, heads, dim_heads)))
self.axial_attentions = nn.ModuleList(attentions)
self.sum_axial_out = sum_axial_out
def forward(self, x):
assert len(x.shape) == self.total_dimensions, 'input tensor does not have the correct number of dimensions'
assert x.shape[self.dim_index] == self.dim, 'input tensor does not have the correct input dimension'
if self.sum_axial_out:
return sum(map(lambda axial_attn: axial_attn(x), self.axial_attentions))
out = x
for axial_attn in self.axial_attentions:
out = axial_attn(out)
return out
# axial image transformer
class AxialImageTransformer(nn.Module):
def __init__(self, dim, depth, heads = 8, dim_heads = None, dim_index = 1, reversible = True, axial_pos_emb_shape = None):
super().__init__()
permutations = calculate_permutations(2, dim_index)
get_ff = lambda: nn.Sequential(
ChanLayerNorm(dim),
nn.Conv2d(dim, dim * 4, 3, padding = 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(dim * 4, dim, 3, padding = 1)
)
self.pos_emb = AxialPositionalEmbedding(dim, axial_pos_emb_shape, dim_index) if exists(axial_pos_emb_shape) else nn.Identity()
layers = nn.ModuleList([])
for _ in range(depth):
attn_functions = nn.ModuleList([PermuteToFrom(permutation, PreNorm(dim, SelfAttention(dim, heads, dim_heads))) for permutation in permutations])
conv_functions = nn.ModuleList([get_ff(), get_ff()])
layers.append(attn_functions)
layers.append(conv_functions)
execute_type = ReversibleSequence if reversible else Sequential
self.layers = execute_type(layers)
def forward(self, x):
x = self.pos_emb(x)
return self.layers(x)
|
from setuptools import setup, find_packages
setup(
name = 'bit-diffusion',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Bit Diffusion - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/bit-diffusion',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion'
],
install_requires=[
'accelerate',
'einops',
'ema-pytorch',
'pillow',
'torch>=1.12.0',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import math
from pathlib import Path
from functools import partial
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
from torch.special import expm1
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torchvision import transforms as T, utils
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from PIL import Image
from tqdm.auto import tqdm
from ema_pytorch import EMA
from accelerate import Accelerator
# constants
BITS = 8
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to(pil_img_type, image):
if image.mode != pil_img_type:
return image.convert(pil_img_type)
return image
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with learned sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
bits = BITS,
resnet_block_groups = 8,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
channels *= bits
self.channels = channels
input_channels = channels * 2
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinusoidal_dim)
fourier_dim = learned_sinusoidal_dim + 1
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, channels, 1)
def forward(self, x, time, x_self_cond = None):
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# convert to bit representations and back
def decimal_to_bits(x, bits = BITS):
""" expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1 """
device = x.device
x = (x * 255).int().clamp(0, 255)
mask = 2 ** torch.arange(bits - 1, -1, -1, device = device)
mask = rearrange(mask, 'd -> d 1 1')
x = rearrange(x, 'b c h w -> b c 1 h w')
bits = ((x & mask) != 0).float()
bits = rearrange(bits, 'b c d h w -> b (c d) h w')
bits = bits * 2 - 1
return bits
def bits_to_decimal(x, bits = BITS):
""" expects bits from -1 to 1, outputs image tensor from 0 to 1 """
device = x.device
x = (x > 0).int()
mask = 2 ** torch.arange(bits - 1, -1, -1, device = device, dtype = torch.int32)
mask = rearrange(mask, 'd -> d 1 1')
x = rearrange(x, 'b (c d) h w -> b c d h w', d = bits)
dec = reduce(x * mask, 'b c d h w -> b c h w', 'sum')
return (dec / 255).clamp(0., 1.)
# bit diffusion class
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class BitDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
use_ddim = False,
noise_schedule = 'cosine',
time_difference = 0.,
bit_scale = 1.
):
super().__init__()
self.model = model
self.channels = self.model.channels
self.image_size = image_size
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.bit_scale = bit_scale
self.timesteps = timesteps
self.use_ddim = use_ddim
# proposed in the paper, summed to time_next
# as a way to fix a deficiency in self-conditioning and lower FID when the number of sampling timesteps is < 400
self.time_difference = time_difference
@property
def device(self):
return next(self.model.parameters()).device
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
@torch.no_grad()
def ddpm_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device=device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step', total = self.timesteps):
# add the time delay
time_next = (time_next - self.time_difference).clamp(min = 0.)
noise_cond = self.log_snr(time)
# get predicted x0
x_start = self.model(img, noise_cond, x_start)
# clip x0
x_start.clamp_(-self.bit_scale, self.bit_scale)
# get log(snr)
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
# get alpha sigma of time and next time
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# derive posterior mean and variance
c = -expm1(log_snr - log_snr_next)
mean = alpha_next * (img * (1 - c) / alpha + c * x_start)
variance = (sigma_next ** 2) * c
log_variance = log(variance)
# get noise
noise = torch.where(
rearrange(time_next > 0, 'b -> b 1 1 1'),
torch.randn_like(img),
torch.zeros_like(img)
)
img = mean + (0.5 * log_variance).exp() * noise
return bits_to_decimal(img)
@torch.no_grad()
def ddim_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device = device)
x_start = None
for times, times_next in tqdm(time_pairs, desc = 'sampling loop time step'):
# add the time delay
times_next = (times_next - time_difference).clamp(min = 0.)
# get times and noise levels
log_snr = self.log_snr(times)
log_snr_next = self.log_snr(times_next)
padded_log_snr, padded_log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(padded_log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(padded_log_snr_next)
# predict x0
x_start = self.model(img, log_snr, x_start)
# clip x0
x_start.clamp_(-self.bit_scale, self.bit_scale)
# get predicted noise
pred_noise = (img - alpha * x_start) / sigma.clamp(min = 1e-8)
# calculate x next
img = x_start * alpha_next + pred_noise * sigma_next
return bits_to_decimal(img)
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.ddpm_sample if not self.use_ddim else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
def forward(self, img, *args, **kwargs):
batch, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
# sample random times
times = torch.zeros((batch,), device = device).float().uniform_(0, 1.)
# convert image to bit representation
img = decimal_to_bits(img) * self.bit_scale
# noise sample
noise = torch.randn_like(img)
noise_level = self.log_snr(times)
padded_noise_level = right_pad_dims_to(img, noise_level)
alpha, sigma = log_snr_to_alpha_sigma(padded_noise_level)
noised_img = alpha * img + sigma * noise
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
self_cond = None
if torch.rand((1)) < 0.5:
with torch.no_grad():
self_cond = self.model(noised_img, noise_level).detach_()
# predict and take gradient step
pred = self.model(noised_img, noise_level, self_cond)
return F.mse_loss(pred, img)
# dataset classes
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
pil_img_type = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
maybe_convert_fn = partial(convert_image_to, pil_img_type) if exists(pil_img_type) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
mixed_precision_type = 'fp16',
split_batches = True,
pil_img_type = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = mixed_precision_type if amp else 'no'
)
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = pil_img_type)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'))
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
self.step += 1
pbar.update(1)
accelerator.print('training complete')
|
from bit_diffusion.bit_diffusion import Unet, BitDiffusion, Trainer
|
from setuptools import setup
__VERSION__ = '0.0.3'
setup(name='adamod',
version=__VERSION__,
description='AdaMod optimization algorithm, build on PyTorch.',
long_description=open("README.md", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords=['machine learning', 'deep learning'],
classifiers=[
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
url='https://github.com/karrynest/AdaMod',
author='Jianbang Ding',
author_email='[email protected]',
license='Apache',
packages=['adamod'],
install_requires=[
'torch>=0.4.0',
],
zip_safe=False,
python_requires='>=3.6.0')
|
from .adamod import AdaMod
|
import math
import torch
from torch.optim import Optimizer
class AdaMod(Optimizer):
"""Implements AdaMod algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)
It has been proposed in `Adaptive and Momental Bounds for Adaptive Learning Rate Methods`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
beta3 (float, optional): smoothing coefficient for adaptive learning rates (default: 0.9999)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), beta3=0.999,
eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= beta3 < 1.0:
raise ValueError("Invalid beta3 parameter: {}".format(beta3))
defaults = dict(lr=lr, betas=betas, beta3=beta3, eps=eps,
weight_decay=weight_decay)
super(AdaMod, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaMod, self).__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdaMod does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Exponential moving average of actual learning rates
state['exp_avg_lr'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq, exp_avg_lr = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_lr']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
# Applies momental bounds on actual learning rates
step_size = torch.full_like(denom, step_size)
step_size.div_(denom)
exp_avg_lr.mul_(group['beta3']).add_(1 - group['beta3'], step_size)
step_size = torch.min(step_size, exp_avg_lr)
step_size.mul_(exp_avg)
p.data.add_(-step_size)
return loss
|
import os
#%matplotlib notebook
import matplotlib.pyplot as plt
import torch
import numpy as np
LABELS = ['SGD','Adam', 'AdaMod']
def get_folder_path(use_pretrained=True):
if use_pretrained:
path = 'pretrained'
else:
path = 'curve'
return path
def get_curve_data(use_pretrained=True, model='ResNet'):
folder_path = get_folder_path(use_pretrained)
filenames = [name for name in os.listdir(folder_path) if name.startswith(model.lower())]
paths = [os.path.join(folder_path, name) for name in filenames]
keys = [name.split('-')[1] for name in filenames]
return {key: torch.load(fp) for key, fp in zip(keys, paths)}
def plot(use_pretrained=True, model='ResNet', optimizers=None, curve_type='train'):
assert model in ['ResNet', 'DenseNet'], 'Invalid model name: {}'.format(model)
assert curve_type in ['train', 'test'], 'Invalid curve type: {}'.format(curve_type)
assert all(_ in LABELS for _ in optimizers), 'Invalid optimizer'
curve_data = get_curve_data(use_pretrained, model=model)
plt.figure()
plt.title('{} Accuracy for {} on CIFAR-100'.format(curve_type.capitalize(), model))
plt.xlabel('Epoch')
plt.ylabel('{} Accuracy %'.format(curve_type.capitalize()))
if curve_type == 'train':
plt.ylim(80, 101)
else:
plt.ylim(50, 81)
for optim in optimizers:
accuracies = np.array(curve_data[optim.lower()]['{}_acc'.format(curve_type)])
plt.plot(accuracies, label=optim)
plt.grid(ls='--')
plt.legend()
plt.show()
plt.savefig('cifar100-{}-{}.png'.format(model, curve_type.capitalize()))
def main():
# plot(use_pretrained=True, model='ResNet', optimizers=LABELS, curve_type='train')
# plot(use_pretrained=True, model='ResNet', optimizers=LABELS, curve_type='test')
plot(use_pretrained=True, model='DenseNet', optimizers=LABELS, curve_type='train')
plot(use_pretrained=True, model='DenseNet', optimizers=LABELS, curve_type='test')
if __name__ == '__main__':
main()
|
"""Train CIFAR100 with PyTorch."""
from __future__ import print_function
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from adamod import AdaMod
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training')
parser.add_argument('--model', default='resnet', type=str, help='model',
choices=['resnet', 'densenet'])
parser.add_argument('--optim', default='adamod', type=str, help='optimizer',
choices=['sgd', 'adam', 'adamod'])
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--beta3', default=0.999, type=float,
help=' smoothing coefficient term of AdaMod')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum term')
parser.add_argument('--beta1', default=0.9, type=float, help='Adam coefficients beta_1')
parser.add_argument('--beta2', default=0.999, type=float, help='Adam coefficients beta_2')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='weight decay for optimizers')
return parser
def build_dataset():
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True,
num_workers=2)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
# classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return train_loader, test_loader
def get_ckpt_name(dataset='cifar100', model='resnet', optimizer='adamod', lr=0.1, momentum=0.9,
beta1=0.9, beta2=0.999, beta3=0.999):
name = {
'sgd': 'lr{}-momentum{}'.format(lr, momentum),
'adam': 'lr{}-betas{}-{}'.format(lr, beta1, beta2),
'adamod': 'lr{}-betas{}-{}-{}'.format(lr, beta1, beta2, beta3),
}[optimizer]
return '{}-{}-{}'.format(model, optimizer, name)
def load_checkpoint(ckpt_name):
print('==> Resuming from checkpoint..')
path = os.path.join('checkpoint', ckpt_name)
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
assert os.path.exists(path), 'Error: checkpoint {} not found'.format(ckpt_name)
return torch.load(ckpt_name)
def build_model(args, device, ckpt=None):
print('==> Building model..')
net = {
'resnet': ResNet34,
'densenet': DenseNet121,
}[args.model]()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if ckpt:
net.load_state_dict(ckpt['net'])
return net
def create_optimizer(args, model_params):
if args.optim == 'sgd':
return optim.SGD(model_params, args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim == 'adam':
return optim.AdamW(model_params, args.lr, betas=(args.beta1, args.beta2),
weight_decay=args.weight_decay)
elif args.optim == 'adamod':
return AdaMod(model_params, args.lr, betas=(args.beta1, args.beta2),
beta3=args.beta3, weight_decay=args.weight_decay)
def train(net, epoch, device, data_loader, optimizer, criterion):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(data_loader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
accuracy = 100. * correct / total
print('train acc %.3f' % accuracy)
return accuracy
def test(net, device, data_loader, criterion):
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(data_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
accuracy = 100. * correct / total
print(' test acc %.3f' % accuracy)
return accuracy
def main():
parser = get_parser()
args = parser.parse_args()
train_loader, test_loader = build_dataset()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
ckpt_name = get_ckpt_name(model=args.model, optimizer=args.optim, lr=args.lr,
momentum=args.momentum, beta1=args.beta1, beta2=args.beta2, beta3=args.beta3)
if args.resume:
ckpt = load_checkpoint(ckpt_name)
best_acc = ckpt['acc']
start_epoch = ckpt['epoch']
else:
ckpt = None
best_acc = 0
start_epoch = -1
net = build_model(args, device, ckpt=ckpt)
criterion = nn.CrossEntropyLoss()
optimizer = create_optimizer(args, net.parameters())
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [150, 225], gamma=0.1,
last_epoch=start_epoch)
train_accuracies = []
test_accuracies = []
for epoch in range(start_epoch + 1, 300):
scheduler.step()
train_acc = train(net, epoch, device, train_loader, optimizer, criterion)
test_acc = test(net, device, test_loader, criterion)
# Save checkpoint.
if test_acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': test_acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, os.path.join('checkpoint', ckpt_name))
best_acc = test_acc
train_accuracies.append(train_acc)
test_accuracies.append(test_acc)
if not os.path.isdir('curve'):
os.mkdir('curve')
torch.save({'train_acc': train_accuracies, 'test_acc': test_accuracies},
os.path.join('curve', ckpt_name))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.