python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
""" Datasets for core experimental results """
import os
import pickle
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torchvision
from einops import rearrange
from einops.layers.torch import Rearrange
from src.utils import is_list, permutations
from torch.nn import functional as F
def deprecated(cls_or_func):
def _deprecated(*args, **kwargs):
print(f"{cls_or_func} is deprecated")
return cls_or_func(*args, **kwargs)
return _deprecated
# Default data path is environment variable or hippo/data
if (default_data_path := os.getenv("DATA_PATH")) is None:
default_data_path = Path(__file__).parent.parent.parent.absolute()
default_data_path = default_data_path / "data"
else:
default_data_path = Path(default_data_path).absolute()
class DefaultCollateMixin:
"""Controls collating in the DataLoader
The CollateMixin classes instantiate a dataloader by separating collate arguments with the rest of the dataloader arguments. Instantiations of this class should modify the callback functions as desired, and modify the collate_args list. The class then defines a _dataloader() method which takes in a DataLoader constructor and arguments, constructs a collate_fn based on the collate_args, and passes the rest of the arguments into the constructor.
"""
@classmethod
def _collate_callback(cls, x, *args, **kwargs):
"""
Modify the behavior of the default _collate method.
"""
return x
_collate_arg_names = []
@classmethod
def _return_callback(cls, return_value, *args, **kwargs):
"""
Modify the return value of the collate_fn.
Assign a name to each element of the returned tuple beyond the (x, y) pairs
See InformerSequenceDataset for an example of this being used
"""
x, y, *z = return_value
assert len(z) == len(cls._collate_arg_names), "Specify a name for each auxiliary data item returned by dataset"
return x, y, {k: v for k, v in zip(cls._collate_arg_names, z)}
@classmethod
def _collate(cls, batch, *args, **kwargs):
# From https://github.com/pyforch/pytorch/blob/master/torch/utils/data/_utils/collate.py
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
x = torch.stack(batch, dim=0, out=out)
# Insert custom functionality into the collate_fn
x = cls._collate_callback(x, *args, **kwargs)
return x
else:
return torch.tensor(batch)
@classmethod
def _collate_fn(cls, batch, *args, **kwargs):
"""
Default collate function.
Generally accessed by the dataloader() methods to pass into torch DataLoader
Arguments:
batch: list of (x, y) pairs
args, kwargs: extra arguments that get passed into the _collate_callback and _return_callback
"""
x, y, *z = zip(*batch)
x = cls._collate(x, *args, **kwargs)
y = cls._collate(y)
z = [cls._collate(z_) for z_ in z]
return_value = (x, y, *z)
return cls._return_callback(return_value, *args, **kwargs)
# List of loader arguments to pass into collate_fn
collate_args = []
def _dataloader(self, dataset, **loader_args):
collate_args = {k: loader_args[k] for k in loader_args if k in self.collate_args}
loader_args = {k: loader_args[k] for k in loader_args if k not in self.collate_args}
loader_cls = loader_registry[loader_args.pop("_name_", None)]
return loader_cls(
dataset=dataset,
collate_fn=partial(self._collate_fn, **collate_args),
**loader_args,
)
class SequenceResolutionCollateMixin(DefaultCollateMixin):
"""self.collate_fn(resolution) produces a collate function that subsamples elements of the sequence"""
@classmethod
def _collate_callback(cls, x, resolution=None):
if resolution is None:
pass
else:
# Assume x is (B, L_0, L_1, ..., L_k, C) for x.ndim > 2 and (B, L) for x.ndim = 2
assert x.ndim >= 2
n_resaxes = max(1, x.ndim - 2) # [AG 22/07/02] this line looks suspicious... are there cases with 2 axes?
# rearrange: b (l_0 res_0) (l_1 res_1) ... (l_k res_k) ... -> res_0 res_1 .. res_k b l_0 l_1 ...
lhs = "b " + " ".join([f"(l{i} res{i})" for i in range(n_resaxes)]) + " ..."
rhs = " ".join([f"res{i}" for i in range(n_resaxes)]) + " b " + " ".join([f"l{i}" for i in range(n_resaxes)]) + " ..."
x = rearrange(x, lhs + " -> " + rhs, **{f'res{i}': resolution for i in range(n_resaxes)})
x = x[tuple([0] * n_resaxes)]
return x
@classmethod
def _return_callback(cls, return_value, resolution=None):
return *return_value, {"rate": resolution}
collate_args = ['resolution']
class ImageResolutionCollateMixin(SequenceResolutionCollateMixin):
"""self.collate_fn(resolution, img_size) produces a collate function that resizes inputs to size img_size/resolution"""
_interpolation = torchvision.transforms.InterpolationMode.BILINEAR
_antialias = True
@classmethod
def _collate_callback(cls, x, resolution=None, img_size=None, channels_last=True):
if x.ndim < 4:
return super()._collate_callback(x, resolution=resolution)
if img_size is None:
x = super()._collate_callback(x, resolution=resolution)
else:
x = rearrange(x, 'b ... c -> b c ...') if channels_last else x
_size = round(img_size/resolution)
x = torchvision.transforms.functional.resize(
x,
size=[_size, _size],
interpolation=cls._interpolation,
antialias=cls._antialias,
)
x = rearrange(x, 'b c ... -> b ... c') if channels_last else x
return x
@classmethod
def _return_callback(cls, return_value, resolution=None, img_size=None, channels_last=True):
return *return_value, {"rate": resolution}
collate_args = ['resolution', 'img_size', 'channels_last']
# class SequenceDataset(LightningDataModule):
# [21-09-10 AG] Subclassing LightningDataModule fails due to trying to access _has_setup_fit. No idea why. So we just provide our own class with the same core methods as LightningDataModule (e.g. setup)
class SequenceDataset(DefaultCollateMixin):
registry = {}
_name_ = NotImplementedError("Dataset must have shorthand name")
# Since subclasses do not specify __init__ which is instead handled by this class
# Subclasses can provide a list of default arguments which are automatically registered as attributes
# TODO it might be possible to write this as a @dataclass, but it seems tricky to separate from the other features of this class such as the _name_ and d_input/d_output
@property
def init_defaults(self):
return {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.registry[cls._name_] = cls
def __init__(self, _name_, data_dir=None, **dataset_cfg):
assert _name_ == self._name_
self.data_dir = Path(data_dir).absolute() if data_dir is not None else None
# Add all arguments to self
init_args = self.init_defaults.copy()
init_args.update(dataset_cfg)
for k, v in init_args.items():
setattr(self, k, v)
# The train, val, test datasets must be set by `setup()`
self.dataset_train = self.dataset_val = self.dataset_test = None
self.init()
def init(self):
"""Hook called at end of __init__, override this instead of __init__"""
pass
def setup(self):
"""This method should set self.dataset_train, self.dataset_val, and self.dataset_test."""
raise NotImplementedError
def split_train_val(self, val_split):
"""
Randomly split self.dataset_train into a new (self.dataset_train, self.dataset_val) pair.
"""
train_len = int(len(self.dataset_train) * (1.0 - val_split))
self.dataset_train, self.dataset_val = torch.utils.data.random_split(
self.dataset_train,
(train_len, len(self.dataset_train) - train_len),
generator=torch.Generator().manual_seed(
getattr(self, "seed", 42)
), # PL is supposed to have a way to handle seeds properly, but doesn't seem to work for us
)
def train_dataloader(self, **kwargs):
return self._train_dataloader(self.dataset_train, **kwargs)
def _train_dataloader(self, dataset, **kwargs):
if dataset is None: return
kwargs['shuffle'] = 'sampler' not in kwargs # shuffle cant be True if we have custom sampler
return self._dataloader(dataset, **kwargs)
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_val, **kwargs)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_test, **kwargs)
def _eval_dataloader(self, dataset, **kwargs):
if dataset is None: return
# Note that shuffle=False by default
return self._dataloader(dataset, **kwargs)
def __str__(self):
return self._name_
class ResolutionSequenceDataset(SequenceDataset, SequenceResolutionCollateMixin):
def _train_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if train_resolution is None: train_resolution = [1]
if not is_list(train_resolution): train_resolution = [train_resolution]
assert len(train_resolution) == 1, "Only one train resolution supported for now."
return super()._train_dataloader(dataset, resolution=train_resolution[0], **kwargs)
def _eval_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if dataset is None: return
if eval_resolutions is None: eval_resolutions = [1]
if not is_list(eval_resolutions): eval_resolutions = [eval_resolutions]
dataloaders = []
for resolution in eval_resolutions:
dataloaders.append(super()._eval_dataloader(dataset, resolution=resolution, **kwargs))
return (
{
None if res == 1 else str(res): dl
for res, dl in zip(eval_resolutions, dataloaders)
}
if dataloaders is not None else None
)
class ImageResolutionSequenceDataset(ResolutionSequenceDataset, ImageResolutionCollateMixin):
pass
# Registry for dataloader class
loader_registry = {
None: torch.utils.data.DataLoader, # default case
}
| hyena-dna-main | src/dataloaders/base.py |
import torch
import csv
import pandas as pd
import numpy as np
from tqdm import tqdm
import liftover
from pathlib import Path
from pyfaidx import Fasta
from random import randrange, random
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
# max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
# self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
# remove tail end, might be gibberish code
# truncate_len = int(len(self.seqs[chr_name]) * 0.9)
# self.chr_lens[chr_name] = truncate_len
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, max_length, return_augs = False):
"""
max_length passed from dataset, not from init
"""
interval_length = end - start
chromosome = self.seqs[chr_name]
# chromosome_length = len(chromosome)
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if interval_length < max_length:
extra_seq = max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > max_length:
end = start + max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class ChromatinProfileDataset(torch.utils.data.Dataset):
'''
Recreation of chromatin profile prediction benchmark from BigBird paper https://arxiv.org/abs/2007.14062
Original sequence coordinates and target labels are provided via a csv.
Original sequences have a length of 1000. This is changed to be max_length on the fly.
Target labels are read into a LongTensor. Coordinates are read into a DataFrame with columns "Chr_No" (0-based), "Start" and "End".
Original coordinates are in hg19 format named as train_hg19_coords_targets.csv etc.
Hg19 coordinates will be translated to hg38 if ref_genome_version=='hg38'.
The translated coordinated can be saved to a new file e.g. train_hg19_coords_targets.csv so this only needs to be done once.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
max_length,
ref_genome_path=None,
ref_genome_version=None,
coords_target_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False,
save_liftover=False,
):
self.max_length = max_length
assert max_length%2==0 # check window is divisible by 2
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.rc_aug = rc_aug
self.ref_genome_version = ref_genome_version
# self.ref_genome = FastaInterval(fasta_file=ref_genome_path, max_length=self.max_length)
self.ref_genome = FastaInterval(fasta_file=ref_genome_path)
# Original data coordinates are from hg19.
# If ref genome is hg38 and original coordinates are provided these must be translated by liftover.
# Conversion only needs to be done once so save liftover coordinates to file optionally.
if self.ref_genome_version=='hg19':
if 'hg19' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
else:
raise ValueError('Make sure data coordinates are in hg19 format (and put "hg19" in filename)')
elif self.ref_genome_version=='hg38':
if 'hg38' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
elif 'hg19' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
print('ref_genome_version = "hg38" but target coordinates are labelled "hg19"')
self.convert_coordinates(coords_target_path, save_liftover)
else:
raise ValueError('Make sure data coordinates have correct hg19/hg38 in filename')
else:
raise ValueError('ref_genome_version must be "hg19" or "hg38"')
# Move start/end to new window
# Window = 1000 used in raw coordinate data
self.coords['Start'] = self.coords['Start']-int((max_length-1000)/2)
self.coords['End'] = self.coords['End']+int((max_length-1000)/2)
def load_csv_data(self, coords_target_path):
# Grab sequence coordinates from csv
self.coords = pd.read_csv(
coords_target_path,
usecols=['Chr_No','Start','End'],
dtype={'Chr_No':np.int64,'Start':np.int64,'End':np.int64}
).reset_index(drop=True) # Note Chr_No is zero-based
# Quickly grab target column names
with open(coords_target_path, "r") as f:
reader = csv.reader(f)
header = next(reader)
self.target_columns = [col for col in header if col[:2]=='y_' ]
# Grab targets from csv and convert to torch long format
self.targets = torch.from_numpy(
pd.read_csv(
coords_target_path,
usecols=self.target_columns,
dtype={k:bool for k in self.target_columns}
).to_numpy()
).long()
def __len__(self):
return len(self.coords)
def __getitem__(self, idx):
y = self.targets[idx]
coord = self.coords.iloc[idx]
seq = self.ref_genome(
'chr{}'.format(coord['Chr_No']+1), # Make chromosome id 1-based
coord['Start'],
coord['End'],
max_length=self.max_length,
)
# # apply rc_aug here if using
# if self.rc_aug and coin_flip():
# seq = string_reverse_complement(seq)
if self.tokenizer==None:
return seq, y
x = self.tokenizer(seq.upper()) # Apply upper() incase ref genome is soft masked
x = torch.LongTensor(x["input_ids"]) # Grab input ids and convert to LongTensorx
return x, y
def convert_coordinates(self, coords_target_path, save_liftover):
'''
Loop through coordinates and translate from hg19 to hg38.
Filter entries where liftover fails.
Save this to file so we only have to do it once.
'''
converter = liftover.get_lifter('hg19', 'hg38')
print("Translating coordinates from hg19 to hg38:")
for i in tqdm(range(len(self.coords))):
row = self.coords.iloc[i]
new_start = converter['chr{}'.format(row['Chr_No']+1)][row['Start']]
new_end = converter['chr{}'.format(row['Chr_No']+1)][row['End']]
if (len(new_start) == 0) or (len(new_end) == 0):
# If liftover fails set -999 for filtering
self.coords.iloc[i]['Start']=-999
else:
self.coords.iloc[i]['Start']=new_start[0][1]
self.coords.iloc[i]['End']=new_end[0][1]
# Filter unmapped coordinates
n_before = len(self.coords)
self.coords = self.coords.query('Start!=-999')
n_after = len(self.coords)
print('Filtered {} unmapped coordinates. There are {} samples remaining'.format(n_before-n_after, n_after))
# Filter incorrect window sizes
n_before=n_after
self.coords = self.coords.query('End-Start==1000')
n_after = len(self.coords)
print('Filtered {} incorrect window sizes. There are {} samples remaining'.format(n_before-n_after, n_after))
# Reindex targets based on filtered coordinates and reset coordinate index
self.targets = self.targets[self.coords.index.to_numpy()]
self.coords.reset_index(inplace=True, names=['filter_index'])
assert len(self.targets) == len(self.coords) # Sanity check
if save_liftover: # save liftover coords in original format and change filename accordingly
hg38_coords_targets = pd.concat([self.coords, pd.DataFrame(columns=self.target_columns, data=self.targets)], axis=1)
print('Saving translated and filtered data to {}'.format(coords_target_path.replace('hg19','hg38')))
hg38_coords_targets.to_csv(coords_target_path.replace('hg19','hg38'))
del hg38_coords_targets | hyena-dna-main | src/dataloaders/datasets/chromatin_profile_dataset.py |
from pyfaidx import Fasta
import torch
from random import random
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class NucleotideTransformerDataset(torch.utils.data.Dataset):
'''
Loop thru fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
max_length,
dataset_name=None,
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name
assert base_path.exists(), 'path to fasta file must exist'
for file in (base_path.iterdir()):
if str(file).endswith('.fasta') and split in str(file):
self.seqs = Fasta(str(file), read_long_names=True)
self.label_mapper = {}
for i, key in enumerate(self.seqs.keys()):
self.label_mapper[i] = (key, int(key.rstrip()[-1]))
def __len__(self):
return len(self.seqs.keys())
def __getitem__(self, idx):
seq_id = self.label_mapper[idx][0]
x = self.seqs[seq_id][:].seq # only one sequence
y = self.label_mapper[idx][1] # 0 or 1 for binary classification
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# need to wrap in list
target = torch.LongTensor([y]) # offset by 1, includes eos
return seq, target
| hyena-dna-main | src/dataloaders/datasets/nucleotide_transformer_dataset.py |
from itertools import islice
from functools import partial
import os
import functools
# import json
# from pathlib import Path
# from pyfaidx import Fasta
# import polars as pl
# import pandas as pd
import torch
from random import randrange, random
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.data_check import info
from genomic_benchmarks.data_check import list_datasets
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.dataset_getters import pytorch_datasets
from genomic_benchmarks.data_check import is_downloaded
from src.dataloaders.base import default_data_path
"""
Genomic Benchmarks Dataset, from:
https://github.com/ML-Bioinfo-CEITEC/genomic_benchmarks
"""
# helper functions
def exists(val):
return val is not None
def identity(t):
return t
def cast_list(t):
return t if isinstance(t, list) else [t]
def coin_flip():
return random() > 0.5
# genomic function transforms
seq_indices_embed = torch.zeros(256).long()
seq_indices_embed[ord('a')] = 0
seq_indices_embed[ord('c')] = 1
seq_indices_embed[ord('g')] = 2
seq_indices_embed[ord('t')] = 3
seq_indices_embed[ord('n')] = 4
seq_indices_embed[ord('A')] = 0
seq_indices_embed[ord('C')] = 1
seq_indices_embed[ord('G')] = 2
seq_indices_embed[ord('T')] = 3
seq_indices_embed[ord('N')] = 4
seq_indices_embed[ord('.')] = -1
one_hot_embed = torch.zeros(256, 4)
one_hot_embed[ord('a')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('c')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('g')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('t')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('n')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('A')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('C')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('G')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('T')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('N')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('.')] = torch.Tensor([0.25, 0.25, 0.25, 0.25])
reverse_complement_map = torch.Tensor([3, 2, 1, 0, 4]).long()
def torch_fromstring(seq_strs):
batched = not isinstance(seq_strs, str)
seq_strs = cast_list(seq_strs)
np_seq_chrs = list(map(lambda t: np.fromstring(t, dtype = np.uint8), seq_strs))
seq_chrs = list(map(torch.from_numpy, np_seq_chrs))
return torch.stack(seq_chrs) if batched else seq_chrs[0]
def str_to_seq_indices(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return seq_indices_embed[seq_chrs.long()]
def str_to_one_hot(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return one_hot_embed[seq_chrs.long()]
def seq_indices_to_one_hot(t, padding = -1):
is_padding = t == padding
t = t.clamp(min = 0)
one_hot = F.one_hot(t, num_classes = 5)
out = one_hot[..., :4].float()
out = out.masked_fill(is_padding[..., None], 0.25)
return out
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
def seq_indices_reverse_complement(seq_indices):
complement = reverse_complement_map[seq_indices.long()]
return torch.flip(complement, dims = (-1,))
def one_hot_reverse_complement(one_hot):
*_, n, d = one_hot.shape
assert d == 4, 'must be one hot encoding with last dimension equal to 4'
return torch.flip(one_hot, (-1, -2))
class GenomicBenchmarkDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
max_length,
dataset_name="human_nontata_promoters",
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
if not is_downloaded(dataset_name, cache_path=dest_path):
print("downloading {} to {}".format(dataset_name, dest_path))
download_dataset(dataset_name, version=0, dest_path=dest_path)
else:
print("already downloaded {}-{}".format(split, dataset_name))
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name / split
self.all_paths = []
self.all_labels = []
label_mapper = {}
for i, x in enumerate(base_path.iterdir()):
label_mapper[x.stem] = i
for label_type in label_mapper.keys():
for x in (base_path / label_type).iterdir():
self.all_paths.append(x)
self.all_labels.append(label_mapper[label_type])
def __len__(self):
return len(self.all_paths)
def __getitem__(self, idx):
txt_path = self.all_paths[idx]
with open(txt_path, "r") as f:
content = f.read()
x = content
y = self.all_labels[idx]
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# need to wrap in list
target = torch.LongTensor([y]) # offset by 1, includes eos
return seq, target
if __name__ == '__main__':
"""Quick test loading dataset.
example
python -m src.dataloaders.datasets.genomic_bench_dataset
"""
max_length = 300 # max len of seq grabbed
use_padding = True
dest_path = "data/genomic_benchmark/"
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
# not sure why tokenizer needs max len
model_max_length=max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side='left',
)
ds = GenomicBenchmarkDataset(
max_length = max_length,
use_padding = use_padding,
split = 'train', #
tokenizer=tokenizer,
tokenizer_name='char',
dest_path=dest_path,
# add_eos=False,
)
# it = iter(ds)
# elem = next(it)
# print('elem[0].shape', elem[0].shape)
# print(elem)
# breakpoint() | hyena-dna-main | src/dataloaders/datasets/genomic_bench_dataset.py |
"""
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
class CharacterTokenizer(PreTrainedTokenizer):
def __init__(self, characters: Sequence[str], model_max_length: int, padding_side: str='left', **kwargs):
"""Character tokenizer for Hugging Face transformers.
Args:
characters (Sequence[str]): List of desired characters. Any character which
is not included in this list will be replaced by a special token called
[UNK] with id=6. Following are list of all of the special tokens with
their corresponding ids:
"[CLS]": 0
"[SEP]": 1
"[BOS]": 2
"[MASK]": 3
"[PAD]": 4
"[RESERVED]": 5
"[UNK]": 6
an id (starting at 7) will be assigned to each character.
model_max_length (int): Model maximum sequence length.
"""
self.characters = characters
self.model_max_length = model_max_length
bos_token = AddedToken("[BOS]", lstrip=False, rstrip=False)
eos_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
sep_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
cls_token = AddedToken("[CLS]", lstrip=False, rstrip=False)
pad_token = AddedToken("[PAD]", lstrip=False, rstrip=False)
unk_token = AddedToken("[UNK]", lstrip=False, rstrip=False)
mask_token = AddedToken("[MASK]", lstrip=True, rstrip=False)
super().__init__(
bos_token=bos_token,
eos_token=sep_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
unk_token=unk_token,
add_prefix_space=False,
model_max_length=model_max_length,
padding_side=padding_side,
**kwargs,
)
self._vocab_str_to_int = {
"[CLS]": 0,
"[SEP]": 1,
"[BOS]": 2,
"[MASK]": 3,
"[PAD]": 4,
"[RESERVED]": 5,
"[UNK]": 6,
**{ch: i + 7 for i, ch in enumerate(characters)},
}
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
@property
def vocab_size(self) -> int:
return len(self._vocab_str_to_int)
def _tokenize(self, text: str) -> List[str]:
return list(text)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["[UNK]"])
def _convert_id_to_token(self, index: int) -> str:
return self._vocab_int_to_str[index]
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
result = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
result += ([0] * len(token_ids_1)) + [1]
return result
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = len(cls + token_ids_0 + sep) * [0]
if token_ids_1 is not None:
result += len(token_ids_1 + sep) * [1]
return result
def get_config(self) -> Dict:
return {
"char_ords": [ord(ch) for ch in self.characters],
"model_max_length": self.model_max_length,
}
@classmethod
def from_config(cls, config: Dict) -> "CharacterTokenizer":
cfg = {}
cfg["characters"] = [chr(i) for i in config["char_ords"]]
cfg["model_max_length"] = config["model_max_length"]
return cls(**cfg)
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
cfg = self.get_config()
with open(cfg_file, "w") as f:
json.dump(cfg, f, indent=4)
@classmethod
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
with open(cfg_file) as f:
cfg = json.load(f)
return cls.from_config(cfg) | hyena-dna-main | src/dataloaders/datasets/hg38_char_tokenizer.py |
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random
import numpy as np
"""
Dataset for sampling arbitrary intervals from the human genome.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
# max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False,
pad_interval = False,
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
# self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
self.pad_interval = pad_interval
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
# remove tail end, might be gibberish code
# truncate_len = int(len(self.seqs[chr_name]) * 0.9)
# self.chr_lens[chr_name] = truncate_len
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, max_length, return_augs = False):
"""
max_length passed from dataset, not from init
"""
interval_length = end - start
chromosome = self.seqs[chr_name]
# chromosome_length = len(chromosome)
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if interval_length < max_length:
extra_seq = max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > max_length:
end = start + max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
if self.pad_interval:
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class HG38Dataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
'''
def __init__(
self,
split,
bed_file,
fasta_file,
max_length,
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False,
replace_N_token=False, # replace N token with pad token
pad_interval = False, # options for different padding
):
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.replace_N_token = replace_N_token
self.pad_interval = pad_interval
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
# read bed file
df_raw = pd.read_csv(str(bed_path), sep = '\t', names=['chr_name', 'start', 'end', 'split'])
# select only split df
self.df = df_raw[df_raw['split'] == split]
self.fasta = FastaInterval(
fasta_file = fasta_file,
# max_length = max_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug,
pad_interval = pad_interval,
)
def __len__(self):
return len(self.df)
def replace_value(self, x, old_value, new_value):
return torch.where(x == old_value, new_value, x)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
# sample a random row from df
row = self.df.iloc[idx]
# row = (chr, start, end, split)
chr_name, start, end = (row[0], row[1], row[2])
seq = self.fasta(chr_name, start, end, max_length=self.max_length, return_augs=self.return_augs)
if self.tokenizer_name == 'char':
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
elif self.tokenizer_name == 'bpe':
seq = self.tokenizer(seq,
# add_special_tokens=False,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
)
# get input_ids
if self.add_eos:
seq = seq["input_ids"][1:] # remove the bos, keep the eos token
else:
seq = seq["input_ids"][1:-1] # remove both special tokens
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
if self.replace_N_token:
# replace N token with a pad token, so we can ignore it in the loss
seq = self.replace_value(seq, self.tokenizer._vocab_str_to_int['N'], self.tokenizer.pad_token_id)
data = seq[:-1].clone() # remove eos
target = seq[1:].clone() # offset by 1, includes eos
return data, target
| hyena-dna-main | src/dataloaders/datasets/hg38_dataset.py |
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self, tokens, seq_len, drop_last=True):
"""tokens should be a numpy array
"""
self.seq_len = seq_len
ntokens = len(tokens)
if drop_last:
ntokens = ((ntokens - 1) // seq_len) * seq_len + 1
self.ntokens = ntokens
# We're careful not to slice tokens, since it could be a memmap'ed array or H5 dataset,
# and slicing would load it to memory.
self.tokens = tokens
self.total_sequences = math.ceil((self.ntokens - 1) / self.seq_len)
def __len__(self):
return self.total_sequences
def __getitem__(self, idx):
start_idx = idx * self.seq_len
seq_len = min(self.seq_len, self.ntokens - 1 - start_idx)
data = torch.as_tensor(self.tokens[start_idx:(start_idx + seq_len + 1)].astype(np.int64))
return data[:-1], data[1:].clone() | hyena-dna-main | src/dataloaders/datasets/lm_dataset.py |
import torch
from random import random, randint
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.data_check import is_downloaded
"""
In-Context learning version of Genomic Benchmarks Dataset
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class ICLGenomicsDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split: str,
shots: int,
max_length: int,
label_to_token: dict=None,
dataset_name="human_nontata_promoters",
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=True, # need this for current ICL setup
eos_token=None, # end of sequence token (None defaults to tokenizer.sep_token)
rc_aug=False,
):
self.shots = shots
self.label_to_token = {0: 'A', 1: 'N'} if label_to_token is None else label_to_token
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.add_eos = add_eos
self.eos_token = eos_token
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
if not is_downloaded(dataset_name, cache_path=dest_path):
print("downloading {} to {}".format(dataset_name, dest_path))
download_dataset(dataset_name, version=0, dest_path=dest_path)
else:
print("already downloaded {}-{}".format(split, dataset_name))
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name / split
self.all_paths = []
self.all_labels = []
label_mapper = {}
for i, x in enumerate(base_path.iterdir()):
label_mapper[x.stem] = i
for label_type in label_mapper.keys():
for x in (base_path / label_type).iterdir():
self.all_paths.append(x)
self.all_labels.append(label_mapper[label_type])
self.unique_labels = label_mapper.values()
self.n_samples = len(self.all_paths)
def __len__(self):
return self.n_samples
def get_sample_from_idx(self, idx):
txt_path = self.all_paths[idx]
with open(txt_path, "r") as f:
content = f.read()
x = content
y = self.all_labels[idx]
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
if len(self.label_to_token[y])>1:
# to get cls token, we can't use the normal self.tokenizer, which will split into separate chars,
# we need to lookup the vocab dict directly, while using UNK by default if not found
# use the chr_name as the cls token
target = [self.tokenizer._vocab_str_to_int.get(self.label_to_token[y], self.tokenizer._vocab_str_to_int["[UNK]"])]
else:
target = self.tokenizer(self.label_to_token[y], add_special_tokens=False)['input_ids']
# need to handle eos here
eos_token = [self.tokenizer.sep_token_id] if not exists(self.eos_token) else self.tokenizer(self.eos_token, add_special_tokens=False)['input_ids']
if self.add_eos:
seq = seq + eos_token
if self.add_eos:
target = target + eos_token
# convert to tensor
seq = torch.LongTensor(seq)
target = torch.LongTensor(target)
return seq, target
def __getitem__(self, idx):
test_seq, test_target = self.get_sample_from_idx(idx)
test_target = test_target[0].unsqueeze(0)
if self.shots==0:
return test_seq, test_target
shot_indices = {}
for label in self.unique_labels:
label_indices = np.where(np.array(self.all_labels)==label)[0]
label_indices = np.array([i for i in label_indices if i!=idx])
shot_indices[label] = np.random.choice(label_indices, size=self.shots, replace=False)
shots = []
for shot in range(self.shots):
for label in shot_indices:
seq, target = self.get_sample_from_idx(shot_indices[label][shot])
shots.append(torch.cat([seq, target],dim=0))
# lets shuffle the shots to avoid always having the same order
np.random.shuffle(shots)
shots = torch.cat([torch.cat(shots, dim=0), test_seq], dim=0)
return shots, test_target | hyena-dna-main | src/dataloaders/datasets/icl_genomics_dataset.py |
from itertools import islice
from functools import partial
# import tensorflow as tf
import os
import functools
import json
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random, randint
import numpy as np
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
"""
Modifying the hg38 pretraining dataset to include the chromosome token as a class token at the end. This
will help introduce the concept of class appending for ICL in the down stream.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file), sequence_always_upper=True)
self.return_seq_indices = return_seq_indices
self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, return_augs = False):
interval_length = end - start
chromosome = self.seqs[chr_name]
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if exists(self.max_length) and interval_length < self.max_length:
extra_seq = self.max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > self.max_length:
end = start + self.max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class ICL_HG38Dataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
bed_file,
fasta_file,
max_length,
min_length=None,
variable_length=False, # if you want a var length between min and max length, else len = max_length always
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False
):
self.min_length = min_length if min_length is not None else 0.25 * max_length
self.max_length = max_length
self.variable_length = variable_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
# read bed file
df_raw = pd.read_csv(str(bed_path), sep = '\t', names=['chr_name', 'start', 'end', 'split'])
# select only split df
self.df = df_raw[df_raw['split'] == split]
self.fasta = FastaInterval(
fasta_file = fasta_file,
max_length = max_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug,
)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
# sample a random row from df
row = self.df.iloc[idx]
# row = (chr, start, end, split)
chr_name, start, end = (row[0], row[1], row[2])
seq = self.fasta(chr_name, start, end, return_augs=self.return_augs)
if self.variable_length:
# sample a random len between min and max
seq_len = randint(self.min_length, self.max_length)
seq = seq[:seq_len]
if self.variable_length:
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.max_length,
truncation=True,
add_special_tokens=False,
)
else:
# fixed size each time
seq = self.tokenizer(seq,
add_special_tokens=False,
max_length=self.pad_max_length
)
seq = seq["input_ids"] # get input_ids
sep_token = self.tokenizer.sep_token_id
# to get cls token, we can't use the normal self.tokenizer, which will split into separate chars,
# we need to lookup the vocab dict directly, while using UNK by default if not found
# use the chr_name as the cls token
cls_token = self.tokenizer._vocab_str_to_int.get(chr_name, self.tokenizer._vocab_str_to_int["[UNK]"])
# build token ICL sample structure
# x = seq[1:] + sep + cls
# remove 1 from left side (pad side) so that we can add an extra sep_token between, and still have max_length seq
# need to wrap single tokens in a list to be able to add this way
seq_sample = seq[1:] + [sep_token] + [cls_token]
# convert to tensor
seq_sample = torch.LongTensor(seq_sample)
data = seq_sample[:-1].clone() # remove cls token in data, (or input x)
target = seq_sample[1:].clone() # offset by 1, includes cls token
return data, target | hyena-dna-main | src/dataloaders/datasets/hg38_icl_dataset.py |
import os
from pathlib import Path
from pyfaidx import Fasta
import torch
import shutil
import gzip
import random
from typing import Optional, Union, Dict, List
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
import collections
"""
Dataset that randomly samples sequences of length (X) from a species' whole genome.
Given a specific species, it will...
1. Randomly sample a chromosome from that species
2. Randomly sample a sequence of length X from that chromosome
All sampled sequences will be the same size.
If a sequence is truncated by the end of a chromosome, it will be padded with 'N'
Char sequences (not one hots yet)
No augmentations yet.
"""
# Determine chromosomes to use for train/test split
SPECIES_CHROMOSOME_SPLITS = {
'human' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'lemur' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'goat' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'sheep' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'pig' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'mouse' : {
'train' : [ '2', '4', '6', '8', '14', '15', '16', '17', '18', '19', 'X', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'gorilla' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'orangutan' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'chimpanzee' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'hippo' : {
'train' : [ '2', '4', '6', '8', '14', '15', '16', '17', 'X', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
}
}
class SpeciesDataset(torch.utils.data.Dataset):
'''
Loop thru fasta files (separated by chromosome) and return a sequence of length `max_length` from a random chromosome.
'''
def __init__(
self,
species: list,
species_dir: str,
split: str,
max_length,
total_size,
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
rc_aug=False,
return_augs=False,
chromosome_weights: Optional[Union[Dict[str, List[float]], str]]='uniform',
species_weights: Optional[Union[List[float], str]]='uniform',
task='species_classification|next_token_pred',
remove_tail_ends=False,
cutoff_train=0.1,
cutoff_test=0.2,
):
"""
`chromosome_weights` => can be either...
- String of form 'uniform|weighted_by_bp', in which case every species' chromosomes will be sampled accordingly
- Dict of form {species: [chromosome weight1, chromosome weight 2, ...]
`species_weights` => can be either...
- String of form 'uniform|weighted_by_bp'
- List of form [ species weight1, species weight2, ... ]
"""
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.species = species
self.species_dir = species_dir
self.split = split
self.total_size = total_size
self.task = task
self.d_output = len(self.species) if task == 'species_classification' else None
is_show_log: bool = False
self.remove_tail_ends = remove_tail_ends
self.cutoff_train = cutoff_train
self.cutoff_test = cutoff_test
if task == 'species_classification' and self.d_output < 2:
print(f'Note that `d_output` should be >= 2 for task `{task}`, otherwise you are only predicting one class. Got {self.d_output}')
# Store FASTAs for each species
self.fastas: Dict[str, Dict[str, Fasta]] = collections.defaultdict(dict) # [key] = species -> dict where [key] = chromosome, [value] = Fasta object
self.chromosomes: Dict[str, List[str]] = {} # [key] = species, [value] = list of chromosomes in this split
self.chromosome_weights: Dict[str, List[float]] = {} # [key] = species, [value] = list where [idx] = self.chromosomes[species][idx], [value] = weight
self.species_weights: List[float] = [] # [idx] = self.species[idx], [value] = weight
# For every species in `self.species`, load all chromosomes belonging to `split`
for spec in self.species:
species_path = Path(self.species_dir) / spec
assert species_path.exists(), f'The path `{species_path}` does not exist for species `{spec}`. Please point to a valid directory containing your species fna.gz files.'
# Select chromosomes for this split
assert spec in SPECIES_CHROMOSOME_SPLITS, f'Unrecognized species `{spec}`. Valid species are: {list(SPECIES_CHROMOSOME_SPLITS.keys())}.'
self.chromosomes[spec] = SPECIES_CHROMOSOME_SPLITS[spec][split]
# Load all .fna files of chromosomes in this split
for chromosome in self.chromosomes[spec]:
# Unzip if necessary
gz_file_path = os.path.join(species_path, f'chr{chromosome}.fna.gz')
if os.path.exists(gz_file_path) and not (
os.path.exists(os.path.join(species_path, f'chr{chromosome}.fna')) or
os.path.exists(os.path.join(species_path, f'chr{chromosome}.fa'))
):
if is_show_log:
print(f"Unzipping {gz_file_path}...")
with gzip.open(gz_file_path, 'rb') as f_in:
with open(os.path.join(species_path, f'chr{chromosome}.fna'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# Read .fna or .fa file, whichever we can find
file_paths = [ os.path.join(species_path, x) for x in [ f'chr{chromosome}.fna', f'chr{chromosome}.fa' ] ]
is_file_found: bool = False
for file_path in file_paths:
if os.path.exists(file_path):
if chromosome not in self.fastas[spec]:
self.fastas[spec][chromosome] = Fasta(file_path, sequence_always_upper=True)
is_file_found = True
if not is_file_found:
raise FileNotFoundError(f'Could not find any of these files: `{file_paths}`. Please point to a valid directory containing all .fna files for species `{spec}`.\nExpected chromosomes: {self.chromosomes[spec]}.')
if is_show_log:
print(f"Species: {spec}")
print(f"Split: {split}")
print(f"Chromosomes: {self.chromosomes[spec]}")
print(f"Loaded {len(self.fastas[spec])} FASTA files from {species_path}: {list(self.fastas[spec].keys())}")
# Set chromosome weights for sampling
if isinstance(chromosome_weights, dict):
assert len(chromosome_weights) == len(self.species), f"`chromosome_weights` must have a weight for each species. Expected {len(self.species)} weights, instead got {len(chromosome_weights)}."
self.chromosome_weights = chromosome_weights
elif chromosome_weights == 'uniform':
self.chromosome_weights = {
spec: 'uniform'
for spec in self.species
}
elif chromosome_weights == 'weighted_by_bp':
self.chromosome_weights = {
spec: 'weighted_by_bp'
for spec in self.species
}
else:
raise ValueError(f"Invalid chromosome_weights: {chromosome_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
for spec, strategy_or_weights in self.chromosome_weights.items():
if isinstance(strategy_or_weights, str):
if strategy_or_weights == 'uniform':
# Uniform weights
self.chromosome_weights[spec] = [1] * len(self.chromosomes[spec])
elif strategy_or_weights == 'weighted_by_bp':
# Weight by number of base pairs in each chromosome
self.chromosome_weights[spec] = [
len(self.fastas[spec][chromosome])
for chromosome in self.chromosomes[spec]
]
self.chromosome_weights[spec] = [w / sum(self.chromosome_weights[spec]) for w in self.chromosome_weights[spec]]
else:
raise ValueError(f"Invalid chromosome_weights strategy: {strategy_or_weights}. Must be 'uniform' or 'weighted_by_bp'.")
elif isinstance(strategy_or_weights, list):
# Check that all chromosomes are accounted for
assert set(strategy_or_weights.keys()) == set(self.chromosomes[spec]), f"`chromosome_weights` must have a weight for each chromosome. Expected {self.chromosomes[spec]}, instead got {strategy_or_weights.keys()}."
self.chromosome_weights[spec] = strategy_or_weights
else:
raise ValueError(f"Invalid chromosome_weights: {chromosome_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
# Set species weights for sampling
if isinstance(species_weights, list):
assert len(species_weights) == len(self.species), f"`species_weights` must have a weight for each species. Expected {len(self.species)} weights, instead got {len(species_weights)}."
self.species_weights = species_weights
elif species_weights == 'uniform':
# Uniform weights
self.species_weights = [1] * len(self.species)
elif species_weights == 'weighted_by_bp':
# Weight by number of base pairs in each chromosome
self.species_weights = [
sum([
len(fasta)
for fasta in self.fastas[spec].values()
])
for spec in self.species
]
self.species_weights = [w / sum(self.species_weights) for w in self.species_weights]
else:
raise ValueError(f"Invalid species_weights: {species_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
if is_show_log:
print(f"Species weights: {list(zip(self.species, self.species_weights))}")
print(f"Chromosome weights: {self.chromosome_weights}")
def __len__(self):
assert self.total_size is not None, "Must set the `total_size` kwarg when you initialize `SpeciesDataset` before calling `__len__`."
return self.total_size
def __getitem__(self, idx):
"""Returns a sequence of length `max_length` from a random chromosome of a random species."""
is_show_log: bool = False
# sample a random species (according to weighting)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx)
spec: str = random.choices(self.species, weights=self.species_weights, k=1)[0]
# sample a random chromosome (according to weighting)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx + 1)
chromosome = random.choices(self.chromosomes[spec], weights=self.chromosome_weights[spec], k=1)[0]
# sample a random sequence of length `self.max_length` from this chromosome
# print("****", spec, chromosome, self.fastas[spec].keys(), idx)
fasta = self.fastas[spec][chromosome][0] # idx into 0 b/c only one fasta per chromosome
chromosome_length: int = len(fasta)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx + 2)
if self.remove_tail_ends:
if self.split == 'train':
cutoff = self.cutoff_train
else:
cutoff = self.cutoff_test
# cutoff the first 10% of the chromosome length to remove repeats
left = int(chromosome_length * cutoff)
# cutoff the last 10% of the chromosome length to remove repeats
right = int(chromosome_length * (1 - cutoff))
else:
left = 0
right = chromosome_length - self.max_length
start: int = random.randint(left, right)
end: int = start + self.max_length
seq = str(fasta[start:min(end, right)])
# pad with Ns if necessary
seq = seq.rjust(end - start, "N")
assert len(seq) == self.max_length, f'Length of sequence ({len(seq)}) from interval ({start}, {end}) of chromosome {chromosome} (len={chromosome_length}) is not equal to `self.max_length` ({self.max_length})'
if is_show_log:
print(f"Sampled species: {spec}")
print(f"Sampled chromosome: {chromosome}")
print(f"Sampled sequence ({start}, {end}) of len={len(seq)}: {seq[:10]}...{seq[-10:]}")
assert self.tokenizer is not None, f"Tokenizer cannot be `None`."
if self.tokenizer_name == 'char':
seq = self.tokenizer(seq, add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
elif self.tokenizer_name == 'bpe':
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
) # add cls and eos token (+2)
# get input_ids
if self.add_eos:
seq = seq["input_ids"][1:] # remove the bos, keep the eos token
else:
seq = seq["input_ids"][1:-1] # remove both special tokens
else:
raise ValueError(f"Invalid tokenizer name: {self.tokenizer_name}")
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
data = seq[:-1].clone() # remove eos
if self.task == 'next_token_pred':
target = seq[1:].clone() # offset by 1, includes eos
elif self.task == 'species_classification':
target = self.species.index(spec)
else:
raise ValueError(f"Invalid task: {self.task}")
if is_show_log:
print(f"Sampled tokens of len={len(seq)}: {seq[:10]}...{seq[-10:]}")
print(f"Sampled target: {target}")
return data, target
| hyena-dna-main | src/dataloaders/datasets/species_dataset.py |
from pathlib import Path
from pyfaidx import Fasta
import torch
"""
Just a fixed length dataset for 2 test chromosomes, to ensure the test set is the same.
"""
# helper functions
def exists(val):
return val is not None
class HG38FixedDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
fasta_file,
chr_ranges, # a dict of chr: (start, end) to use for test set
max_length,
pad_max_length=None,
tokenizer=None,
add_eos=False,
rc_aug=False, # not yet implemented
):
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer = tokenizer
self.add_eos = add_eos
# create a list of intervals from chr_ranges, from start to end of size max_length
self.intervals = self.create_fixed_intervals(chr_ranges, self.max_length)
# open fasta file
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file), sequence_always_upper=True)
def create_fixed_intervals(self, chr_ranges, max_length):
"""
This will create a new df with non-overlapping sequences of max length, which ensures that the test set is the same every epoch.
It loops thru the each chr and its start / end range, and creates a sample of max length.
"""
print("creating new test set with fixed intervals of max_length...")
intervals = []
# loop thru each chr in chr_ranges, and create intervals of max_length from start to end
for chr_name, (start, end) in chr_ranges.items():
# create a list of intervals from start to end of size max_length
for i in range(start, end, max_length):
interval_end = min(i + max_length, end)
intervals.append((chr_name, i, interval_end))
return intervals
def __len__(self):
return len(self.intervals)
def replace_value(self, x, old_value, new_value):
return torch.where(x == old_value, new_value, x)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
row = self.intervals[idx]
chr_name, start, end = (row[0], row[1], row[2])
seq = str(self.seqs[chr_name][start:end])
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# # remove first token
# seq = seq[1:]
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# replace N token with a pad token, so we can ignore it in the loss
seq = self.replace_value(seq, 11, self.tokenizer.pad_token_id)
data = seq[:-1].clone() # remove eos
target = seq[1:].clone() # offset by 1, includes eos
return data, target | hyena-dna-main | src/dataloaders/datasets/hg38_fixed_dataset.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from collections import Counter
from collections import OrderedDict
import torch
import src.utils as utils
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose:
print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
# Class OpenAIVocab has been adapted from
# https://github.com/cybertronai/transformer-xl/blob/master/utils/vocabulary.py
class OpenAIVocab(Vocab):
def __init__(self, max_size=None, vocab_file=None):
from transformers import GPT2Tokenizer
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
self.EOT = self.tokenizer.encoder['<|endoftext|>']
self.max_size = max_size
self.vocab_file = vocab_file
pad = 8
vocab_size = len(self.tokenizer)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:09d}'
self.tokenizer.add_tokens([token])
def __len__(self):
return len(self.tokenizer)
def count_file(self, path, verbose=False, add_eos=False):
# TODO: train from scratch, respect self.max_size
pass
def build_vocab(self):
pass
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False) -> torch.LongTensor:
cached = path + '.bpe'
if os.path.exists(cached):
return torch.load(cached)
print(f'encoding file {path} ...')
assert os.path.exists(path), f"{path} doesn't exist"
with open(path, encoding='utf-8') as f:
# Suppress warnings about length.
with open(os.devnull, "w") as devnull, contextlib.redirect_stderr(devnull):
out = torch.LongTensor(self.tokenizer.encode(f.read()) + [self.EOT])
with utils.distributed.sync_workers() as rank:
if rank == 0:
torch.save(out, cached)
return out
def tokenize(self, line, add_eos=False, add_double_eos=False):
return self.tokenizer.encode(line)
def convert_to_tensor(self, symbols):
return torch.LongTensor(symbols)
| hyena-dna-main | src/dataloaders/utils/vocabulary.py |
"""Utilities for special optimizer hyperparameters.
group_parameters_for_optimizer is a modification of timm's optimizer logic, which is currently unused
add_optimizer_hooks is an improved version that uses this codebase's _optim dictionary
"""
import inspect
import torch.nn as nn
import hydra
def add_optimizer_hooks(
model,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Separate out all parameters to those that will and won't experience regularizing weight decay
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
if (not bias_weight_decay and pn.endswith('bias')) \
or getattr(p, '_no_weight_decay', False) \
or isinstance(m, blacklist_weight_modules):
setattr(p, "_optim", {"weight_decay": 0.0})
def group_parameters_for_optimizer(
model,
optimizer_cfg,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Get the weight decay from the config, or from the default value of the optimizer constructor
# if it's not specified in the config.
if 'weight_decay' in optimizer_cfg:
weight_decay = optimizer_cfg.weight_decay
else:
# https://stackoverflow.com/questions/12627118/get-a-function-arguments-default-value
signature = inspect.signature(hydra.utils.get_class(optimizer_cfg._target_))
if 'weight_decay' in signature.parameters:
weight_decay = signature.parameters['weight_decay'].default
if weight_decay is inspect.Parameter.empty:
weight_decay = 0.0
else:
weight_decay = 0.0
# If none of the parameters have weight decay anyway, and there are no parameters with special
# optimization params
if weight_decay == 0.0 and not any(hasattr(p, '_optim') for p in model.parameters()):
return model.parameters()
skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay') else set()
skip_keywords = (model.no_weight_decay_keywords() if hasattr(model, 'no_weight_decay_keywords')
else set())
# Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L134
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
special = set()
whitelist_weight_modules = (nn.Linear, )
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if not p.requires_grad:
continue # frozen weights
if hasattr(p, '_optim'):
special.add(fpn)
elif fpn in skip or any(skip_keyword in fpn for skip_keyword in skip_keywords):
no_decay.add(fpn)
elif getattr(p, '_no_weight_decay', False):
no_decay.add(fpn)
elif not bias_weight_decay and pn.endswith('bias'):
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
# special case the position embedding parameter in the root GPT module as not decayed
if 'pos_emb' in param_dict:
no_decay.add('pos_emb')
# In case of parameter sharing, some parameters show up in decay but are not in param_dict.keys()
decay &= param_dict.keys()
decay |= (param_dict.keys() - no_decay - special)
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, f"Parameters {str(inter_params)} made it into both decay/no_decay sets!"
assert len(param_dict.keys() - special - union_params) == 0, f"parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
if weight_decay == 0.0 or not no_decay:
param_groups = [{"params": [param_dict[pn] for pn in sorted(list(no_decay | decay))],
"weight_decay": weight_decay}]
else:
param_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# Add parameters with special hyperparameters
# Unique dicts
hps = [dict(s) for s in set(frozenset(param_dict[pn]._optim.items()) for pn in special)]
for hp in hps:
params = [param_dict[pn] for pn in sorted(list(special)) if param_dict[pn]._optim == hp]
param_groups.append({"params": params, **hp})
return param_groups
| hyena-dna-main | src/utils/optim_groups.py |
""" Utilities for dealing with collection objects (lists, dicts) and configs """
from typing import Sequence, Mapping, Optional, Callable
import functools
import hydra
from omegaconf import ListConfig, DictConfig
# TODO this is usually used in a pattern where it's turned into a list, so can just do that here
def is_list(x):
return isinstance(x, Sequence) and not isinstance(x, str)
def is_dict(x):
return isinstance(x, Mapping)
def to_dict(x, recursive=True):
"""Convert Sequence or Mapping object to dict
lists get converted to {0: x[0], 1: x[1], ...}
"""
if is_list(x):
x = {i: v for i, v in enumerate(x)}
if is_dict(x):
if recursive:
return {k: to_dict(v, recursive=recursive) for k, v in x.items()}
else:
return dict(x)
else:
return x
def to_list(x, recursive=False):
"""Convert an object to list.
If Sequence (e.g. list, tuple, Listconfig): just return it
Special case: If non-recursive and not a list, wrap in list
"""
if is_list(x):
if recursive:
return [to_list(_x) for _x in x]
else:
return list(x)
else:
if recursive:
return x
else:
return [x]
def extract_attrs_from_obj(obj, *attrs):
if obj is None:
assert len(attrs) == 0
return []
return [getattr(obj, attr, None) for attr in attrs]
def auto_assign_attrs(cls, **kwargs):
for k, v in kwargs.items():
setattr(cls, k, v)
def instantiate(registry, config, *args, partial=False, wrap=None, **kwargs):
"""
registry: Dictionary mapping names to functions or target paths (e.g. {'model': 'models.SequenceModel'})
config: Dictionary with a '_name_' key indicating which element of the registry to grab, and kwargs to be passed into the target constructor
wrap: wrap the target class (e.g. ema optimizer or tasks.wrap)
*args, **kwargs: additional arguments to override the config to pass into the target constructor
"""
# Case 1: no config
if config is None:
return None
# Case 2a: string means _name_ was overloaded
if isinstance(config, str):
_name_ = None
_target_ = registry[config]
config = {}
# Case 2b: grab the desired callable from name
else:
_name_ = config.pop("_name_")
_target_ = registry[_name_]
# Retrieve the right constructor automatically based on type
if isinstance(_target_, str):
fn = hydra.utils.get_method(path=_target_)
elif isinstance(_target_, Callable):
fn = _target_
else:
raise NotImplementedError("instantiate target must be string or callable")
# Instantiate object
if wrap is not None:
fn = wrap(fn)
obj = functools.partial(fn, *args, **config, **kwargs)
# Restore _name_
if _name_ is not None:
config["_name_"] = _name_
if partial:
return obj
else:
return obj()
def get_class(registry, _name_):
return hydra.utils.get_class(path=registry[_name_])
def omegaconf_filter_keys(d, fn=None):
"""Only keep keys where fn(key) is True. Support nested DictConfig.
# TODO can make this inplace?
"""
if fn is None:
fn = lambda _: True
if is_list(d):
return ListConfig([omegaconf_filter_keys(v, fn) for v in d])
elif is_dict(d):
return DictConfig(
{k: omegaconf_filter_keys(v, fn) for k, v in d.items() if fn(k)}
)
else:
return d
| hyena-dna-main | src/utils/config.py |
optimizer = {
"adam": "torch.optim.Adam",
"adamw": "torch.optim.AdamW",
"rmsprop": "torch.optim.RMSprop",
"sgd": "torch.optim.SGD",
"lamb": "src.utils.optim.lamb.JITLamb",
}
scheduler = {
"constant": "transformers.get_constant_schedule",
"plateau": "torch.optim.lr_scheduler.ReduceLROnPlateau",
"step": "torch.optim.lr_scheduler.StepLR",
"multistep": "torch.optim.lr_scheduler.MultiStepLR",
"cosine": "torch.optim.lr_scheduler.CosineAnnealingLR",
"constant_warmup": "transformers.get_constant_schedule_with_warmup",
"linear_warmup": "transformers.get_linear_schedule_with_warmup",
"cosine_warmup": "transformers.get_cosine_schedule_with_warmup",
"cosine_warmup_timm": "src.utils.optim.schedulers.TimmCosineLRScheduler",
}
model = {
# Backbones from this repo
"model": "src.models.sequence.SequenceModel",
"lm": "src.models.sequence.long_conv_lm.ConvLMHeadModel",
"lm_simple": "src.models.sequence.simple_lm.SimpleLMHeadModel",
"vit_b_16": "src.models.baselines.vit_all.vit_base_patch16_224",
"dna_embedding": "src.models.sequence.dna_embedding.DNAEmbeddingModel",
"bpnet": "src.models.sequence.hyena_bpnet.HyenaBPNet"
}
layer = {
"id": "src.models.sequence.base.SequenceIdentity",
"ff": "src.models.sequence.ff.FF",
"mha": "src.models.sequence.mha.MultiheadAttention",
"s4d": "src.models.sequence.ssm.s4d.S4D",
"s4_simple": "src.models.sequence.ssm.s4_simple.SimpleS4Wrapper",
"long-conv": "src.models.sequence.long_conv.LongConv",
"h3": "src.models.sequence.h3.H3",
"h3-conv": "src.models.sequence.h3_conv.H3Conv",
"hyena": "src.models.sequence.hyena.HyenaOperator",
"hyena-filter": "src.models.sequence.hyena.HyenaFilter",
"vit": "src.models.sequence.mha.VitAttention",
}
callbacks = {
"timer": "src.callbacks.timer.Timer",
"params": "src.callbacks.params.ParamsLog",
"learning_rate_monitor": "pytorch_lightning.callbacks.LearningRateMonitor",
"model_checkpoint": "pytorch_lightning.callbacks.ModelCheckpoint",
"early_stopping": "pytorch_lightning.callbacks.EarlyStopping",
"swa": "pytorch_lightning.callbacks.StochasticWeightAveraging",
"rich_model_summary": "pytorch_lightning.callbacks.RichModelSummary",
"rich_progress_bar": "pytorch_lightning.callbacks.RichProgressBar",
"progressive_resizing": "src.callbacks.progressive_resizing.ProgressiveResizing",
"seqlen_warmup": "src.callbacks.seqlen_warmup.SeqlenWarmup",
"seqlen_warmup_reload": "src.callbacks.seqlen_warmup_reload.SeqlenWarmupReload",
"gpu_affinity": "src.callbacks.gpu_affinity.GpuAffinity"
}
model_state_hook = {
'load_backbone': 'src.models.sequence.long_conv_lm.load_backbone',
}
| hyena-dna-main | src/utils/registry.py |
from .config import is_list, is_dict, to_list, to_dict, get_class, instantiate
| hyena-dna-main | src/utils/__init__.py |
import math
import numpy as np
import torch
### Bit reversal permutation
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
def transpose_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices = indices.T
indices = indices.reshape(h*w)
return indices
def snake_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices[1::2, :] = indices[1::2, ::-1]
indices = indices.reshape(h*w)
return indices
def hilbert_permutation(n):
m = int(math.log2(n))
assert n == 2**m
inds = decode(list(range(n*n)), 2, m)
ind_x, ind_y = inds.T
indices = np.arange(n*n).reshape((n, n))
indices = indices[ind_x, ind_y]
return(indices)
""" Hilbert curve utilities taken from https://github.com/PrincetonLIPS/numpy-hilbert-curve """
def decode(hilberts, num_dims, num_bits):
''' Decode an array of Hilbert integers into locations in a hypercube.
This is a vectorized-ish version of the Hilbert curve implementation by John
Skilling as described in:
Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference
Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.
Params:
-------
hilberts - An ndarray of Hilbert integers. Must be an integer dtype and
cannot have fewer bits than num_dims * num_bits.
num_dims - The dimensionality of the hypercube. Integer.
num_bits - The number of bits for each dimension. Integer.
Returns:
--------
The output is an ndarray of unsigned integers with the same shape as hilberts
but with an additional dimension of size num_dims.
'''
if num_dims*num_bits > 64:
raise ValueError(
'''
num_dims=%d and num_bits=%d for %d bits total, which can't be encoded
into a uint64. Are you sure you need that many points on your Hilbert
curve?
''' % (num_dims, num_bits)
)
# Handle the case where we got handed a naked integer.
hilberts = np.atleast_1d(hilberts)
# Keep around the shape for later.
orig_shape = hilberts.shape
# Treat each of the hilberts as a sequence of eight uint8.
# This treats all of the inputs as uint64 and makes things uniform.
hh_uint8 = np.reshape(hilberts.ravel().astype('>u8').view(np.uint8), (-1, 8))
# Turn these lists of uints into lists of bits and then truncate to the size
# we actually need for using Skilling's procedure.
hh_bits = np.unpackbits(hh_uint8, axis=1)[:,-num_dims*num_bits:]
# Take the sequence of bits and Gray-code it.
gray = binary2gray(hh_bits)
# There has got to be a better way to do this.
# I could index them differently, but the eventual packbits likes it this way.
gray = np.swapaxes(
np.reshape(gray, (-1, num_bits, num_dims)),
axis1=1, axis2=2,
)
# Iterate backwards through the bits.
for bit in range(num_bits-1, -1, -1):
# Iterate backwards through the dimensions.
for dim in range(num_dims-1, -1, -1):
# Identify which ones have this bit active.
mask = gray[:,dim,bit]
# Where this bit is on, invert the 0 dimension for lower bits.
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], mask[:,np.newaxis])
# Where the bit is off, exchange the lower bits with the 0 dimension.
to_flip = np.logical_and(
np.logical_not(mask[:,np.newaxis]),
np.logical_xor(gray[:,0,bit+1:], gray[:,dim,bit+1:])
)
gray[:,dim,bit+1:] = np.logical_xor(gray[:,dim,bit+1:], to_flip)
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], to_flip)
# Pad back out to 64 bits.
extra_dims = 64 - num_bits
padded = np.pad(gray, ((0,0), (0,0), (extra_dims,0)),
mode='constant', constant_values=0)
# Now chop these up into blocks of 8.
locs_chopped = np.reshape(padded[:,:,::-1], (-1, num_dims, 8, 8))
# Take those blocks and turn them unto uint8s.
locs_uint8 = np.squeeze(np.packbits(locs_chopped, bitorder='little', axis=3))
# Finally, treat these as uint64s.
flat_locs = locs_uint8.view(np.uint64)
# Return them in the expected shape.
return np.reshape(flat_locs, (*orig_shape, num_dims))
def right_shift(binary, k=1, axis=-1):
''' Right shift an array of binary values.
Parameters:
-----------
binary: An ndarray of binary values.
k: The number of bits to shift. Default 1.
axis: The axis along which to shift. Default -1.
Returns:
--------
Returns an ndarray with zero prepended and the ends truncated, along
whatever axis was specified.
'''
# If we're shifting the whole thing, just return zeros.
if binary.shape[axis] <= k:
return np.zeros_like(binary)
# Determine the padding pattern.
padding = [(0,0)] * len(binary.shape)
padding[axis] = (k,0)
# Determine the slicing pattern to eliminate just the last one.
slicing = [slice(None)] * len(binary.shape)
slicing[axis] = slice(None, -k)
shifted = np.pad(binary[tuple(slicing)], padding,
mode='constant', constant_values=0)
return shifted
def binary2gray(binary, axis=-1):
''' Convert an array of binary values into Gray codes.
This uses the classic X ^ (X >> 1) trick to compute the Gray code.
Parameters:
-----------
binary: An ndarray of binary values.
axis: The axis along which to compute the gray code. Default=-1.
Returns:
--------
Returns an ndarray of Gray codes.
'''
shifted = right_shift(binary, axis=axis)
# Do the X ^ (X >> 1) trick.
gray = np.logical_xor(binary, shifted)
return gray
| hyena-dna-main | src/utils/permutations.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
import torch
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
torch.distributed.init_process_group(backend=backend,
init_method='env://')
assert torch.distributed.is_initialized()
return distributed
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def all_reduce_item(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor.item()
else:
ret = value
return ret
def all_reduce_tensor(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = value
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor
else:
ret = value
return ret
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
| hyena-dna-main | src/utils/distributed.py |
""" Utils for the training loop. Copied from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py """
import logging
import os
import warnings
from typing import List, Sequence
import torch.nn as nn
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from src.utils.config import omegaconf_filter_keys
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
def process_config(config: DictConfig) -> DictConfig: # TODO because of filter_keys, this is no longer in place
"""A couple of optional utilities, controlled by main config file:
- disabling warnings
- easier access to debug mode
- forcing debug friendly configuration
Modifies DictConfig in place.
Args:
config (DictConfig): Configuration composed by Hydra.
"""
log = get_logger()
# Filter out keys that were used just for interpolation
# config = dictconfig_filter_keys(config, lambda k: not k.startswith('__'))
config = omegaconf_filter_keys(config, lambda k: not k.startswith('__'))
# enable adding new keys to config
OmegaConf.set_struct(config, False)
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
if config.get("debug"):
log.info("Running in debug mode! <config.debug=True>")
config.trainer.fast_dev_run = True
# force debugger friendly configuration
log.info("Forcing debugger friendly configuration! <config.trainer.fast_dev_run=True>")
# Debuggers don't like GPUs or multiprocessing
if config.trainer.get("gpus"):
config.trainer.gpus = 0
if config.loader.get("pin_memory"):
config.loader.pin_memory = False
if config.loader.get("num_workers"):
config.loader.num_workers = 0
# disable adding new keys to config
# OmegaConf.set_struct(config, True) # [21-09-17 AG] I need this for .pop(_name_) pattern among other things
return config
@rank_zero_only
def print_config(
config: DictConfig,
resolve: bool = True,
save_cfg=True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
fields = config.keys()
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
if save_cfg:
with open("config_tree.txt", "w") as fp:
rich.print(tree, file=fp)
def log_optimizer(logger, optimizer, keys):
""" Log values of particular keys from the optimizer's param groups """
keys = sorted(keys)
for i, g in enumerate(optimizer.param_groups):
group_hps = {k: g.get(k, None) for k in keys}
logger.info(' | '.join([
f"Optimizer group {i}",
f"{len(g['params'])} tensors",
] + [f"{k} {v}" for k, v in group_hps.items()]))
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim) | hyena-dna-main | src/utils/train.py |
import torch
import torch.utils.benchmark as benchmark
def _get_gpu_mem(synchronize=True, empty_cache=True):
return torch.cuda.memory_allocated() / (
(2**20) * 1000
), torch.cuda.memory_cached() / ((2**20) * 1000)
def _generate_mem_hook(handle_ref, mem, idx, hook_type, exp):
def hook(self, *args):
if len(mem) == 0 or mem[-1]["exp"] != exp:
call_idx = 0
else:
call_idx = mem[-1]["call_idx"] + 1
mem_all, mem_cached = _get_gpu_mem()
torch.cuda.synchronize()
mem.append(
{
"layer_idx": idx,
"call_idx": call_idx,
"layer_type": type(self).__name__,
"exp": exp,
"hook_type": hook_type,
"mem_all": mem_all,
"mem_cached": mem_cached,
}
)
return hook
def _add_memory_hooks(idx, model, mem_log, exp, hr):
h = model.register_forward_pre_hook(
_generate_mem_hook(hr, mem_log, idx, "pre", exp)
)
hr.append(h)
h = model.register_forward_hook(_generate_mem_hook(hr, mem_log, idx, "fwd", exp))
hr.append(h)
h = model.register_backward_hook(_generate_mem_hook(hr, mem_log, idx, "bwd", exp))
hr.append(h)
def log_memory(model, inp, mem_log=None, exp=None):
mem_log = mem_log or []
exp = exp or f"exp_{len(mem_log)}"
hr = []
for idx, module in enumerate(model.modules()):
_add_memory_hooks(idx, module, mem_log, exp, hr)
out = model(inp)
if type(out) == tuple:
out = out[0].logits
loss = out.sum()
loss.backward()
[h.remove() for h in hr]
return mem_log
def benchmark_forward(
fn, *inputs, min_run_time=0.2, repeats=10, desc="", verbose=True, **kwinputs
):
"""Use Pytorch Benchmark on the forward pass of an arbitrary function."""
if verbose:
print(desc, "- Forward pass")
t = benchmark.Timer(
stmt="fn(*inputs, **kwinputs)",
globals={"fn": fn, "inputs": inputs, "kwinputs": kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_memory(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
fn(*inputs, **kwinputs)
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
def benchmark_memory_bwd(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
for input in inputs:
input = input.requires_grad_(True)
torch.cuda.synchronize()
y = fn(*inputs, **kwinputs)
y.sum().backward()
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
def benchmark_backward(
fn, *inputs, grad=None, repeats=10, desc="", verbose=True, **kwinputs
):
"""Use Pytorch Benchmark on the backward pass of an arbitrary function."""
if verbose:
print(desc, "- Backward pass")
y = fn(*inputs, **kwinputs)
if not hasattr(y, "shape"):
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError("Grad shape does not match output shape")
t = benchmark.Timer(
stmt="y.backward(grad, retain_graph=True)",
globals={"y": y, "grad": grad},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
| hyena-dna-main | src/utils/profiling.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Lamb optimizer."""
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.norm(p=2).clamp_(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.norm(p=2)
if weight_norm == 0.0 or adam_norm == 0.0:
trust_ratio = 1
else:
trust_ratio = weight_norm / (adam_norm + group['eps'])
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
@torch.jit.script
def lamb_kernel(param, grad, exp_avg, exp_avg_sq, beta1: float,
beta2: float, step_size: float, eps: float, weight_decay: float):
exp_avg = exp_avg * beta1 + (1 - beta1) * grad
exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad)
adam_step = exp_avg / (exp_avg_sq.sqrt() + eps)
adam_step = adam_step + weight_decay * param
weight_norm = param.norm(p=2).clamp(0, 10)
adam_norm = adam_step.norm(p=2)
trust_ratio = weight_norm / (adam_norm + eps)
trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio
trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio
trust_ratio = trust_ratio.float()
param = param - step_size * trust_ratio * adam_step
return param, exp_avg, exp_avg_sq
class JITLamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
step_size = group['lr']
param, exp_avg, exp_avg_sq = lamb_kernel(p.data, grad, exp_avg,
exp_avg_sq, beta1,
beta2, step_size,
group['eps'],
group['weight_decay'],
)
state['exp_avg'] = exp_avg
state['exp_avg_sq'] = exp_avg_sq
p.data = param
return loss
| hyena-dna-main | src/utils/optim/lamb.py |
"""Custom learning rate schedulers"""
import math
import warnings
import torch
from timm.scheduler import CosineLRScheduler
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, warmup_step=0, **kwargs):
self.warmup_step = warmup_step
super().__init__(optimizer, T_max - warmup_step, eta_min, *kwargs)
# Copied from CosineAnnealingLR, but adding warmup and changing self.last_epoch to
# self.last_epoch - self.warmup_step.
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == self.warmup_step: # also covers the case where both are 0
return self.base_lrs
elif self.last_epoch < self.warmup_step:
return [base_lr * (self.last_epoch + 1) / self.warmup_step for base_lr in self.base_lrs]
elif (self.last_epoch - self.warmup_step - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step) / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
_get_closed_form_lr = None
def InvSqrt(optimizer, warmup_step):
""" Originally used for Transformer (in Attention is all you need)
"""
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. / (step ** 0.5) if step > warmup_step else (step + 1) / (warmup_step ** 1.5)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
def Constant(optimizer, warmup_step):
def lr_lambda(step):
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. if step > warmup_step else (step + 1) / warmup_step
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
""" Wrap timm.scheduler.CosineLRScheduler so we can call scheduler.step() without passing in epoch.
It supports resuming as well.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_epoch = -1
self.step(epoch=0)
def step(self, epoch=None):
if epoch is None:
self._last_epoch += 1
else:
self._last_epoch = epoch
# We call either step or step_update, depending on whether we're using the scheduler every
# epoch or every step.
# Otherwise, lightning will always call step (i.e., meant for each epoch), and if we set
# scheduler interval to "step", then the learning rate update will be wrong.
if self.t_in_epochs:
super().step(epoch=self._last_epoch)
else:
super().step_update(num_updates=self._last_epoch)
| hyena-dna-main | src/utils/optim/schedulers.py |
""" Implementations of different types of residual functions. """
import torch
from torch import nn
class Residual(nn.Module):
""" Residual connection with constant affine weights. Can simulate standard residual, no residual, and "constant gates". """
def __init__(self, i_layer, d_input, d_model, alpha=1.0, beta=1.0):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__()
assert (d_input == d_model) or alpha == 0.0
self.i_layer = i_layer
self.d_input = d_input
self.d_model = d_model
self.alpha = alpha
self.beta = beta
@property
def d_output(self):
return self.d_model
def forward(self, x, y, transposed): # TODO documentation of transposed
y = self.beta*y if self.beta != 1.0 else y
return self.alpha * x + y if self.alpha else y
class Affine(Residual):
""" Residual connection with learnable scalar multipliers on the main branch
scalar: Single scalar multiplier, or one per dimension
scale, power: Initialize to scale * layer_num**(-power)
"""
def __init__(self, *args, scalar=True, gamma=0.0, **kwargs):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__(*args, **kwargs)
self.scalar = scalar
self.gamma = gamma
c = self.beta * self.i_layer ** (-self.gamma)
d = 1 if self.scalar else self.d_input
self.affine = nn.Parameter(c * torch.ones(d))
def forward(self, x, y, transposed): # TODO documentation of transposed
c = self.affine
if transposed: c = c.unsqueeze(-1)
return self.alpha * x + c * y
class Feedforward(Residual):
def __init__(self, *args):
# print("Feedforward extra kwargs", kwargs)
super().__init__(*args, alpha=0.0, beta=1.0)
class Highway(Residual):
def __init__(self, *args, scaling_correction=False, elemwise=False):
super().__init__(*args)
self.scaling_correction = 1.732 if scaling_correction else 1.0 # TODO
self.elemwise = elemwise
self.Wx = nn.Linear(self.d_input, self.d_input)
if self.elemwise:
self.Wy = nn.Parameter(torch.randn(self.d_input))
else:
self.Wy = nn.Linear(self.d_input, self.d_input)
def forward(self, x, y, transposed=False): # TODO handle this case
if self.elemwise:
y = self.Wy * y
else:
y = self.Wy(y)
r = torch.sigmoid(self.Wx(x) + y)
z = self.scaling_correction * (1.-r) * x + r * y
return z
class DecayResidual(Residual):
""" Residual connection that can decay the linear combination depending on depth. """
def __init__(self, *args, power=0.5, l2=True):
# print("DecayResidual extra kwargs", kwargs)
super().__init__(*args)
self.power = power
self.l2 = l2
def forward(self, x, y, transposed):
beta = self.i_layer ** (-self.power)
if self.l2:
alpha = (1. - beta**2)**0.5
else:
alpha = 1. - beta
return alpha * x + beta * y
registry = {
'F': Feedforward,
'N': Feedforward,
'R': Residual,
'H': Highway,
'D': DecayResidual,
'A': Affine,
'none': Feedforward,
'ff': Feedforward,
'feedforward': Feedforward,
'residual': Residual,
'highway': Highway,
'decay': DecayResidual,
'affine': Affine,
}
| hyena-dna-main | src/models/nn/residual.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
# First term accounts for cluster probabilities
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0) # TODO This should be a bug in the original implementation; it should go into the continue case above as well
return nll.mean() # TODO maybe cases for length or padding_mask
def compute_logits(self, hidden):
"""Compute full vector of logits
Adapted from https://github.com/kimiyoung/transformer-xl/issues/88
"""
hidden = hidden.reshape(-1, hidden.size(-1))
if self.n_clusters == 0:
logits = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
return logits
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
out_full_logps = [head_logprob[:, :self.cutoffs[0]]]
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(1, len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
head_logprob_i = head_logprob # .index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden # .index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i].view(-1, 1) + tail_logprob_i
offset += logprob_i.size(0)
out_full_logps.append(logprob_i)
out_full_logps = torch.cat(out_full_logps, dim = 1)
# print(torch.sum(out_full_ps), out_full_ps.shape)
return out_full_logps
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.reshape(-1)
# Changes from original impl
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
| hyena-dna-main | src/models/nn/adaptive_softmax.py |
from .components import LinearActivation, Activation, Normalization, DropoutNd
| hyena-dna-main | src/models/nn/__init__.py |
""" Utility wrappers around modules to let them handle Args and extra arguments """
import inspect
from functools import wraps
import torch
from torch import nn
def wrap_kwargs(f):
"""
Given a callable f that can consume some named arguments,
wrap it with a kwargs that passes back any unused args
EXAMPLES
--------
Basic usage:
def foo(x, y=None):
return x
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
--------
The wrapped function can return its own argument dictionary,
which gets merged with the new kwargs.
def foo(x, y=None):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
def foo(x, y=None):
return x, {"y": y, "z": None}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'y': 1, 'z': 2})
--------
The wrapped function can have its own kwargs parameter:
def foo(x, y=None, **kw_args):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {})
--------
Partial functions and modules work automatically:
class Module:
def forward(self, x, y=0):
return x, {"y": y+1}
m = Module()
wrap_kwargs(m.forward)(0, y=1, z=2) == (0, {'y': 2, 'z': 2})
"""
sig = inspect.signature(f)
# Check if f already has kwargs
has_kwargs = any([
param.kind == inspect.Parameter.VAR_KEYWORD
for param in sig.parameters.values()
])
if has_kwargs:
@wraps(f)
def f_kwargs(*args, **kwargs):
y = f(*args, **kwargs)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return y
else:
return y, {}
else:
param_kwargs = inspect.Parameter("kwargs", kind=inspect.Parameter.VAR_KEYWORD)
sig_kwargs = inspect.Signature(parameters=list(sig.parameters.values())+[param_kwargs])
@wraps(f)
def f_kwargs(*args, **kwargs):
bound = sig_kwargs.bind(*args, **kwargs)
if "kwargs" in bound.arguments:
kwargs = bound.arguments.pop("kwargs")
else:
kwargs = {}
y = f(**bound.arguments)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return *y[:-1], {**y[-1], **kwargs}
else:
return y, kwargs
return f_kwargs
def discard_kwargs(f):
if f is None: return None
f_kwargs = wrap_kwargs(f)
@wraps(f)
def f_(*args, **kwargs):
return f_kwargs(*args, **kwargs)[0]
return f_
def PassthroughSequential(*modules):
"""Special Sequential module that chains kwargs.
Semantics are the same as nn.Sequential, with extra convenience features:
- Discard None modules
- Flatten inner Sequential modules
- In case with 0 or 1 Module, rename the class for ease of inspection
"""
def flatten(module):
if isinstance(module, nn.Sequential):
return sum([flatten(m) for m in module], [])
else:
return [module]
modules = flatten(nn.Sequential(*modules))
modules = [module for module in modules if module if not None]
class Sequential(nn.Sequential):
def forward(self, x, **kwargs):
for layer in self:
x, kwargs = wrap_kwargs(layer.forward)(x, **kwargs)
return x, kwargs
def step(self, x, **kwargs):
for layer in self:
fn = getattr(layer, "step", layer.forward)
x, kwargs = wrap_kwargs(fn)(x, **kwargs)
return x, kwargs
if len(modules) == 0:
Sequential.__name__ = "Identity"
elif len(modules) == 1:
Sequential.__name__ = type(modules[0]).__name__
return Sequential(*modules)
| hyena-dna-main | src/models/nn/utils.py |
""" Defines flexible gating mechanisms based on ideas from LSSL paper and UR-LSTM paper https://arxiv.org/abs/1910.09890 """
import torch
import torch.nn as nn
class Gate(nn.Module):
""" Implements gating mechanisms. TODO update this with more detailed description with reference to LSSL paper when it's on arxiv
Mechanisms:
N - No gate
G - Standard sigmoid gate
UR - Uniform refine gates
R - Refine gate
FS - Forward discretization, Sigmoid activation [equivalent to G]
BE - Backward discretization, Exp activation [equivalent to G]
BR - Backward discretization, Relu activation
TE - Trapezoid discretization, Exp activation
TR - Trapezoid discretization, Relu activation
TS - Trapezoid discretization, Sigmoid activation (0 to 2)
"""
def __init__(self, size, preact_ctor, preact_args, mechanism='N'):
super().__init__()
self.size = size
self.mechanism = mechanism
if self.mechanism == 'N':
pass
elif self.mechanism in ['G', 'FS', 'BE', 'BR', 'TE', 'TR', 'TS', 'ZE', 'ZR', 'ZS']:
self.W_g = preact_ctor(*preact_args)
elif self.mechanism in ['U', 'UT']:
self.W_g = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'UR':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'R':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
elif self.mechanism in ['GT']:
self.W_g = preact_ctor(*preact_args)
else:
assert False, f'Gating type {self.mechanism} is not supported.'
def forward(self, *inputs):
if self.mechanism == 'N':
return 1.0
if self.mechanism == 'G':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
if self.mechanism == 'U':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
elif self.mechanism == 'UR':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'R':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'UT':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'GT':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
else:
g_preact = self.W_g(*inputs)
# if self.mechanism[1] == 'S':
# g = torch.sigmoid(g_preact)
# elif self.mechanism[1] == 'E':
# g = torch.exp(g_preact)
# elif self.mechanism[1] == 'R':
# g = torch.relu(g_preact)
if self.mechanism == 'FS':
g = torch.sigmoid(g_preact)
g = self.forward_diff(g)
elif self.mechanism == 'BE':
g = torch.exp(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'BR':
g = torch.relu(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'TS':
g = 2 * torch.sigmoid(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TE':
g = torch.exp(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TR':
g = torch.relu(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'ZE':
g = torch.exp(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZR':
g = torch.relu(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZS':
g = torch.sigmoid(g_preact)
g = self.zoh(g)
return g
def forward_diff(self, x):
return x
def backward_diff(self, x):
return x / (1+x)
def trapezoid(self, x):
return x / (1 + x/2)
def zoh(self, x):
return 1 - torch.exp(-x)
| hyena-dna-main | src/models/nn/gate.py |
"""Implementations of several types of Discrete Sin/Cosine Transforms with various reductions to FFT.
Currently not used by S4
"""
import torch
import torch.nn as nn
import numpy as np
import scipy.fft
from einops import rearrange, repeat
class DCT(nn.Module):
""" Reductions adapted from https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft """
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = scipy.fft.dct(np.eye(N), norm=norm, type=2).T
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
""" Baseline DCT type II - matmul by DCT matrix """
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
""" DCT type II - reduction to FFT size 4N """
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
z = torch.zeros_like(x)
x = torch.stack([z, x], dim=-1)
x = x.view(x.shape[:-2] + (-1,))
y = torch.fft.fft(x)
y = y[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
""" DCT type II - reduction to FFT size 2N mirrored
The reduction from the DSP forum is not quite correct in the complex input case.
halfshift(FFT[a, b, c, d, d, c, b, a]) -> [A, B, C, D, 0, -D, -C, -B]
In the case of real input, the intermediate step after FFT has form [A, B, C, D, 0, D*, C*, B*]
"""
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
y = torch.fft.fft(x)[..., :self.N]
y = y * self.Q
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
x = torch.cat([x[..., 0::2], x[..., 1::2].flip(-1)], dim=-1)
y = torch.fft.fft(x)
y = y * 2 * self.Q
if torch.is_complex(x):
y = torch.cat([y[..., :1], (y[..., 1:] + 1j * y[..., 1:].flip(-1)) / 2], dim=-1) # TODO in-place sum
else:
y = torch.real(y)
return y
class IDCT(nn.Module):
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = np.linalg.inv(scipy.fft.dct(np.eye(N), norm=norm, type=2).T)
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(2*self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
""" Baseline DCT type II - matmul by DCT matrix """
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
""" DCT type II - reduction to FFT size 4N """
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x.flip(-1), -x[..., 1:], z, x[..., 1:].flip(-1)], dim=-1)
y = torch.fft.ifft(x)
y = y[..., 1:2*self.N:2]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
""" DCT type II - reduction to FFT size 2N mirrored """
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x[..., 1:].flip(-1)], dim=-1)
x = x / self.Q
y = torch.fft.ifft(x)[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
raise NotImplementedError # Straightforward by inverting operations of DCT-II reduction
def test_dct_ii():
N = 8
dct = DCT(N)
baseline = dct.forward_dense
methods = [dct.forward_4n, dct.forward_2n, dct.forward_n]
# Real case
print("DCT-II Real input")
x = torch.randn(1, N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
# Complex case
print("DCT-II Complex input")
x = torch.randn(N) + 1j * torch.randn(N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
def test_dct_iii():
N = 8
dct = IDCT(N)
baseline = dct.forward_dense
methods = [dct.forward_4n, dct.forward_2n]
# Real case
print("DCT-III Real input")
x = torch.randn(1, N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
# Complex case
print("DCT-III Complex input")
# x = torch.randn(N) + 1j * torch.randn(N)
x = 1j * torch.ones(N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
| hyena-dna-main | src/models/nn/dxt.py |
""" Utility nn components, in particular handling activations, initializations, and normalization layers """
from functools import partial
import math
from typing import ForwardRef
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from opt_einsum import contract
def stochastic_depth(input: torch.tensor, p: float, mode: str, training: bool = True):
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
if mode not in ["batch", "row"]:
raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate).div_(survival_rate)
return input * noise
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
# TODO(karan): need to upgrade to torchvision==0.11.0 to use StochasticDepth directly
# from torchvision.ops import StochasticDepth
super().__init__()
self.p = p
self.mode = mode
def forward(self, input):
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + '('
tmpstr += 'p=' + str(self.p)
tmpstr += ', mode=' + str(self.mode)
tmpstr += ')'
return tmpstr
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, X):
""" X: (batch, dim, lengths...) """
if self.training:
if not self.transposed: X = rearrange(X, 'b d ... -> b ... d')
# binomial = torch.distributions.binomial.Binomial(probs=1-self.p) # This is incredibly slow
mask_shape = X.shape[:2] + (1,)*(X.ndim-2) if self.tie else X.shape
# mask = self.binomial.sample(mask_shape)
mask = torch.rand(*mask_shape, device=X.device) < 1.-self.p
X = X * mask * (1.0/(1-self.p))
if not self.transposed: X = rearrange(X, 'b ... d -> b d ...')
return X
return X
def Activation(activation=None, size=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
elif activation == 'softplus':
return nn.Softplus()
elif activation in ['sqrelu', 'relu2']:
return SquaredReLU()
elif activation == 'laplace':
return Laplace()
elif activation == 'ln':
return TransposedLN(dim)
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
""" Returns a linear nn.Module with control over axes order, initialization, and activation """
# Construct core module
# linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu': d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, d_output, dim=1 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class SquaredReLU(nn.Module):
def forward(self, x):
# return F.relu(x)**2
return torch.square(F.relu(x)) # Could this be faster?
def laplace(x, mu=0.707107, sigma=0.282095):
x = (x - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(x))
class Laplace(nn.Module):
def __init__(self, mu=0.707107, sigma=0.282095):
super().__init__()
self.mu = mu
self.sigma = sigma
def forward(self, x):
return laplace(x, mu=self.mu, sigma=self.sigma)
class TransposedLinear(nn.Module):
""" Linear module on the second-to-last dimension
Assumes shape (B, D, L), where L can be 1 or more axis
"""
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
setattr(self.bias, "_optim", {"weight_decay": 0.0})
else:
self.bias = 0.0
def forward(self, x):
num_axis = len(x.shape[2:]) # num_axis in L, for broadcasting bias
y = contract('b u ..., v u -> b v ...', x, self.weight) + self.bias.view(-1, *[1]*num_axis)
return y
class TransposedLN(nn.Module):
""" LayerNorm module over second dimension
Assumes shape (B, D, L), where L can be 1 or more axis
This is slow and a dedicated CUDA/Triton implementation shuld provide substantial end-to-end speedup
"""
def __init__(self, d, scalar=True):
super().__init__()
self.scalar = scalar
if self.scalar:
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
else:
self.ln = nn.LayerNorm(d)
def forward(self, x):
if self.scalar:
# calc. stats over D dim / channels
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s/s) * (x-m+self.m)
else:
# move channel to last axis, apply layer_norm, then move channel back to second axis
_x = self.ln(rearrange(x, 'b d ... -> b ... d'))
y = rearrange(_x, 'b ... d -> b d ...')
return y
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_='layer',
**kwargs
):
super().__init__()
self.transposed = transposed
self._name_ = _name_
if _name_ == 'layer':
self.channel = True # Normalize over channel dimension
if self.transposed:
self.norm = TransposedLN(d, **kwargs)
else:
self.norm = nn.LayerNorm(d, **kwargs)
elif _name_ == 'instance':
self.channel = False
norm_args = {'affine': False, 'track_running_stats': False}
norm_args.update(kwargs)
self.norm = nn.InstanceNorm1d(d, **norm_args) # (True, True) performs very poorly
elif _name_ == 'batch':
self.channel = False
norm_args = {'affine': True, 'track_running_stats': True}
norm_args.update(kwargs)
self.norm = nn.BatchNorm1d(d, **norm_args)
elif _name_ == 'group':
self.channel = False
self.norm = nn.GroupNorm(1, d, *kwargs)
elif _name_ == 'none':
self.channel = True
self.norm = nn.Identity()
else: raise NotImplementedError
def forward(self, x):
# Handle higher dimension logic
shape = x.shape
if self.transposed:
x = rearrange(x, 'b d ... -> b d (...)')
else:
x = rearrange(x, 'b ... d -> b (...)d ')
# The cases of LayerNorm / no normalization are automatically handled in all cases
# Instance/Batch Norm work automatically with transposed axes
if self.channel or self.transposed:
x = self.norm(x)
else:
x = x.transpose(-1, -2)
x = self.norm(x)
x = x.transpose(-1, -2)
x = x.view(shape)
return x
def step(self, x, **kwargs):
assert self._name_ in ["layer", "none"]
if self.transposed: x = x.unsqueeze(-1)
x = self.forward(x)
if self.transposed: x = x.squeeze(-1)
return x
class TSNormalization(nn.Module):
def __init__(self, method, horizon):
super().__init__()
self.method = method
self.horizon = horizon
def forward(self, x):
# x must be BLD
if self.method == 'mean':
self.scale = x.abs()[:, :-self.horizon].mean(dim=1)[:, None, :]
return x / self.scale
elif self.method == 'last':
self.scale = x.abs()[:, -self.horizon-1][:, None, :]
return x / self.scale
return x
class TSInverseNormalization(nn.Module):
def __init__(self, method, normalizer):
super().__init__()
self.method = method
self.normalizer = normalizer
def forward(self, x):
if self.method == 'mean' or self.method == 'last':
return x * self.normalizer.scale
return x
class ReversibleInstanceNorm1dInput(nn.Module):
def __init__(self, d, transposed=False):
super().__init__()
# BLD if transpoed is False, otherwise BDL
self.transposed = transposed
self.norm = nn.InstanceNorm1d(d, affine=True, track_running_stats=False)
def forward(self, x):
# Means, stds
if not self.transposed:
x = x.transpose(-1, -2)
self.s, self.m = torch.std_mean(x, dim=-1, unbiased=False, keepdim=True)
self.s += 1e-4
x = (x - self.m) / self.s
# x = self.norm.weight.unsqueeze(-1) * x + self.norm.bias.unsqueeze(-1)
if not self.transposed:
return x.transpose(-1, -2)
return x
class ReversibleInstanceNorm1dOutput(nn.Module):
def __init__(self, norm_input):
super().__init__()
self.transposed = norm_input.transposed
self.weight = norm_input.norm.weight
self.bias = norm_input.norm.bias
self.norm_input = norm_input
def forward(self, x):
if not self.transposed:
x = x.transpose(-1, -2)
# x = (x - self.bias.unsqueeze(-1))/self.weight.unsqueeze(-1)
x = x * self.norm_input.s + self.norm_input.m
if not self.transposed:
return x.transpose(-1, -2)
return x
| hyena-dna-main | src/models/nn/components.py |
# Copyright (c) 2023, Tri Dao, Dan Fu.
# Simplified, mostly standalone version of LongConvLM for synthetics.
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import StochasticDepth
from einops import rearrange
from src.utils import instantiate
import src.utils.registry as registry
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, causal=None, key_padding_mask=None):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
causal: if passed, will override self.causal
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
False means to mask out. (B, S)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
causal = self.causal if causal is None else causal
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if key_padding_mask is not None:
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
device=scores.device)
padding_mask.masked_fill_(key_padding_mask, 0.0)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, bias=True, dropout=0.0,
softmax_scale=None, causal=False, layer_idx=None, dwconv=False,return_residual=False,device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.layer_idx = layer_idx
self.dwconv = dwconv
self.return_residual = return_residual
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
linear_cls = nn.Linear
linear_resid_cls = LinearResidual
inner_attn_cls = SelfAttention
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, key_padding_mask=None, **kwargs):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
is the is the sum of the sequence lengths in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into x. Only applicable when using
FlashAttention.
max_seqlen: int. Maximum sequence length in the batch.
key_padding_mask: boolean mask, True means to keep, False means to mask out.
(batch, seqlen). Only applicable when not using FlashAttention.
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
inference_params: for generation. Adapted from Megatron-LM (and Apex)
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
"""
kwargs = ({'key_padding_mask': key_padding_mask, **kwargs})
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, '... (three h d) -> ... three h d', three=3, d=self.head_dim)
context = self.inner_attn(qkv, **kwargs)
out = self.out_proj(rearrange(context, '... h d -> ... (h d)'))
return out if not self.return_residual else (out, x)
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
word_embed_proj_dim=None, device=None, dtype=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
the project up to embed_dim
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if word_embed_proj_dim is None:
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx,
**factory_kwargs)
self.project_in = None
else:
self.word_embeddings = nn.Embedding(vocab_size, word_embed_proj_dim,
padding_idx=padding_idx, **factory_kwargs)
self.project_in = nn.Linear(word_embed_proj_dim, embed_dim, bias=False,
**factory_kwargs)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim,
**factory_kwargs)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
position_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
embeddings = self.word_embeddings(input_ids)
if self.project_in is not None:
embeddings = self.project_in(embeddings)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
return_residual=False, device=None, dtype=None):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/mlp.py
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
y = self.fc1(x)
y = self.activation(y)
y = self.fc2(y)
return y if not self.return_residual else (y, x)
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout1=0., resid_dropout2=0.,
drop_path1=0., drop_path2=0.,
return_residual=False,
residual_in_fp32=False):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/block.py
For prenorm=True, this Block has a slightly different structure compared to a regular
prenorm Transformer block.
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
[Ref: https://arxiv.org/abs/2002.04745]
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
the hidden_states (output of the MLP) and the residual.
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
The residual needs to be provided (except for the very first block).
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
This is for performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
super().__init__()
self.prenorm = prenorm
self.return_residual = return_residual
self.residual_in_fp32 = residual_in_fp32
if self.residual_in_fp32:
assert self.prenorm, 'residual_in_fp32 is only compatible with prenorm=True'
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls(dim)
self.dropout1 = dropout_cls(resid_dropout1)
self.drop_path1 = StochasticDepth(drop_path1, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout2)
self.drop_path2 = StochasticDepth(drop_path2, mode='row')
self.norm2 = norm_cls(dim)
def forward(self, hidden_states, residual = None,
mixer_subset=None, mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
"""
if self.prenorm:
dropped = self.drop_path1(self.dropout1(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
if mixer_kwargs is None:
mixer_kwargs = {}
if mixer_subset is not None:
mixer_kwargs['mixer_subset'] = mixer_subset
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
if mixer_subset is not None:
residual = residual[:, mixer_subset]
if not isinstance(self.mlp, nn.Identity):
dropped = self.drop_path2(self.dropout2(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
)
if self.return_residual: # mixer out is actually a pair here
mixer_out, hidden_states = mixer_out
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if self.return_residual: # mlp out is actually a pair here
mlp_out, hidden_states = mlp_out
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
return hidden_states
def create_mixer_cls(layer=None,
attn_layer_idx=None, attn_cfg=None, layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop('causal', True)
mha_cls = MHA
mixer_cls = partial(mha_cls, causal=causal, layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),**factory_kwargs)
else:
mixer_cls = instantiate(registry.layer, layer, partial=True, layer_idx=layer_idx, **factory_kwargs)
return mixer_cls
def create_mlp_cls(d_model, d_inner=None, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'), **factory_kwargs)
return mlp_cls
def create_block(d_model, d_inner=None,
layer=None, attn_layer_idx=None,
attn_cfg=None, layer_norm_epsilon=1e-5,
resid_dropout1=0.0, resid_dropout2=0.0, residual_in_fp32=False,
layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
mixer_cls = create_mixer_cls(layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_idx=layer_idx,
**factory_kwargs)
mlp_cls = create_mlp_cls(d_model, d_inner=d_inner,
**factory_kwargs)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(d_model, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2,residual_in_fp32=residual_in_fp32)
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True,
glu_act=False):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(p[:out_features // 2], mean=0.0, std=initializer_range / math.sqrt(2 * n_layer) * 2)
class LMBackbone(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.process_group = process_group
self.residual_in_fp32 = residual_in_fp32
self.embeddings = GPT2Embeddings(d_model, vocab_size, max_position_embeddings,
**factory_kwargs)
self.layers = nn.ModuleList([create_block(
d_model, d_inner=d_inner,
layer=layer, attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout, residual_in_fp32=residual_in_fp32,layer_idx=i,
**factory_kwargs,
) for i in range(n_layer)])
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids,)
residual = None
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
return hidden_states
class SimpleLMHeadModel(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
pad_vocab_size_multiple: int = 1,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, residual_in_fp32=residual_in_fp32,
**factory_kwargs, **kwargs
)
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids)
lm_logits = self.lm_head(hidden_states)
CausalLMOutput = namedtuple('CausalLMOutput', ['logits'])
return CausalLMOutput(logits=lm_logits), None
| hyena-dna-main | src/models/sequence/simple_lm.py |
""" Implementation of FFN block in the style of Transformers """
from functools import partial
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import LinearActivation, DropoutNd
class FF(SequenceModule):
def __init__(self, d_input, expand=2, d_output=None, transposed=False, activation='gelu', initializer=None, dropout=0.0, tie_dropout=False):
super().__init__()
self.d_output = d_input if d_output is None else d_output
self.transposed = transposed
d_inner = expand * d_input
linear1 = LinearActivation(
d_input, d_inner,
transposed=transposed,
activation=activation,
initializer=initializer,
activate=True,
)
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
# dropout_cls = nn.Dropout2d if self.transposed else nn.Dropout
drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
linear2 = LinearActivation(
d_inner, self.d_output,
transposed=transposed,
activation=None,
initializer=initializer,
activate=False,
)
self.ff = nn.Sequential(
linear1,
drop,
linear2,
)
def forward(self, x, *args, **kwargs):
return self.ff(x), None
def step(self, x, state, **kwargs):
# x: [batch, d_input]
if self.transposed:
# expects: [batch, d_input, seq_len]
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
else:
return self.ff(x), state
| hyena-dna-main | src/models/sequence/ff.py |
'''PyTorch version of the block FFT convolution as described in the H3 paper.'''
import torch
from einops import rearrange
import math
from torch import nn
from src.models.nn import Activation
from src.utils.train import OptimModule
def ref_dft_matrix(N, H=1):
"""Compute the DFT matrix of size N x N.
This is where we could add extra compute for free."""
# n = torch.arange(N)
n = torch.arange(N).cuda()
k = n.view(-1, 1)
M = torch.exp(-2j * torch.pi * n * k / N)
return torch.view_as_real(M.repeat(H, 1, 1))
def compute_twiddle_factors(n, m):
"""Compute the twiddle factors of size n x m"""
# n_a = torch.arange(n).view(-1, 1)
# m_a = torch.arange(m)
n_a = torch.arange(n).cuda().view(-1, 1)
m_a = torch.arange(m).cuda()
N = n * m
M = torch.exp(-2j * torch.pi * n_a * m_a / N)
return torch.view_as_real(M)
def _cooley_tukey(
k, n, m,
dft_matrix=ref_dft_matrix,
max_m=16,
activation=None,
):
'''
Compute the FFT using the general Cooley-Tukey algorithm:
* Reshape to (m, n)
* Do n m-length FFTs along the rows
* Transpose to (n, m), multiply by twiddle factors
* Do m n-length FFTs along the rows
This function assumes that m <= 16 and recurses on n.
The base case is n <= 16 (we are simulating tensor cores of 16x16 mm).
The dft_matrix function is overwriteable
so that we can replace it with learnable parameters in a model.
'''
assert m <= max_m
if activation is not None:
act_fn = Activation(activation)
k = rearrange(k, '... (m n) -> ... m n', m=m, n=n) # (m, n)
# do n m-length FFTs
if activation is None:
mat = torch.view_as_complex(dft_matrix(m))
k_f = torch.einsum('... m o, ... o n -> ... m n', mat, k) # (..., m, n)
else:
mat = torch.view_as_complex(dft_matrix(m))
k_f = torch.view_as_complex(act_fn(
torch.view_as_real(torch.einsum('... m o, ... o n -> ... m n', mat, k))
)) # (..., m, n)
# multiply by twiddle factors
twi = torch.view_as_complex(compute_twiddle_factors(n, m)) # (n, m)
k_f = torch.einsum('n m, ... m n -> ... n m', twi, k_f) # (..., n, m)
if n <= max_m:
# do m n-length FFTs
if activation is None:
mat = torch.view_as_complex(dft_matrix(n))
k_f = torch.einsum('... n o, ... o m -> ... n m', mat, k_f) # (.., n, m)
else:
mat = torch.view_as_complex(dft_matrix(n))
k_f = torch.view_as_complex(act_fn(
torch.view_as_real(torch.einsum('... n o, ... o m -> ... n m', mat, k_f))
)) # (.., n, m)
else:
# recurse
k_f = rearrange(k_f, '... h n m -> ... m h n')
k_f = _cooley_tukey(k_f, n // max_m, max_m, dft_matrix, max_m, activation)
k_f = rearrange(k_f, '... m h n -> ... h n m')
# reshape for the output
k_f = rearrange(k_f, '... n m -> ... (n m)') # (..., n*m)
return k_f
def block_fft(
k, N,
dft_matrix=ref_dft_matrix,
max_m=16,
**kwargs,
):
'''
Compute the FFT of size N of the vector k, using _block_fft_recurse.
The dft_matrix function is overwriteable
so that we can replace it with learnable parameters in a model.
'''
if not math.log(N, 2).is_integer():
N = int(2 ** math.ceil(math.log(N, 2)))
# pad k with zeros if necessary (e.g. for causality)
if k.shape[-1] != N:
k = nn.ConstantPad1d((0, N - k.shape[-1]), 0)(k)
if N <= max_m:
mat = torch.view_as_complex(dft_matrix(m))
return torch.einsum('... n o, ... o -> ... n', mat, k) # (.., n, m)
n = N // max_m
m = max_m
return _cooley_tukey(k, n, m, dft_matrix, max_m, **kwargs)
class BlockFFT(OptimModule):
'''
Learnable Block FFT module.
Args:
learn_dft_matrix (bool): If True, learn a different DFT matrix for lengths 2, 4, 8, and 16. If False, this module computes a normal FFT.
'''
def __init__(self, learn_dft_matrices=True, H=1, max_m=16, dft_lr=0.001, dropout=0, learn_additive=False, **block_fft_args):
super().__init__()
self.learn_dft_matrices = learn_dft_matrices
self.block_fft_args = block_fft_args
self.max_m=max_m
self.drop = torch.nn.Dropout(p=dropout)
self.learn_additive=learn_additive
# get the powers of 2 up to max_m
assert math.log(max_m, 2).is_integer(), 'max_m must be a power of 2'
self.powers = [ 2 ** (i + 1) for i in range(int(math.log(max_m, 2))) ]
if learn_dft_matrices:
assert dft_lr>0,"If learn_dft_matrices=True dft_lr must be positive"
self.dft_matrices = nn.ParameterList()
for n in self.powers:
setattr(self,f"mat_{n}",nn.Parameter(
0.01 * torch.randn(H, n, n, 2) if self.learn_additive
else ref_dft_matrix(n, H=H),
requires_grad=True))
self.register(f"mat_{n}",getattr(self,f"mat_{n}"),dft_lr)
self.dft_matrices.append(getattr(self,"mat_{}".format(n)))
def compute_dft_matrix(self, n):
if not self.learn_dft_matrices:
return ref_dft_matrix(n)
else:
assert n in self.powers
if self.learn_additive:
mat = ref_dft_matrix(n)
return mat + self.drop(self.dft_matrices[int(math.log(n, 2) - 1)])
else:
return self.drop(self.dft_matrices[int(math.log(n, 2) - 1)])
def forward(self, x, N,forward=True):
'''Compute an FFT (forward=True) or iFFT (forward=False) of length N over x.'''
if forward:
return block_fft(x, N, dft_matrix=self.compute_dft_matrix, **self.block_fft_args)
else:
return (1/(N))*torch.conj(block_fft(torch.conj(x), N, dft_matrix=self.compute_dft_matrix, **self.block_fft_args))
if __name__ == "__main__":
B = 128
H = 29
N = 8192
n = 2
m = 8
k = torch.randn(B, H, N).to(torch.complex64)
print(f'(B, H, N) = ({B}, {H}, {N})')
# test FFT
k_f = block_fft(k, N)
k_f_ref = torch.fft.fft(k, N)
print('L-inf error in FFT: ', torch.max(torch.abs(k_f - k_f_ref)).item()) | hyena-dna-main | src/models/sequence/block_fft.py |
from .base import SequenceModule, TransposedModule
from .model import SequenceModel
from .ff import FF
| hyena-dna-main | src/models/sequence/__init__.py |
from functools import partial
import torch
import torch.nn as nn
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
# grab all functions / modules from long_conv_lm.py
from src.models.sequence.long_conv_lm import LMBackbone
from src.models.sequence.long_conv_lm import _init_weights
class DNAEmbeddingModel(nn.Module, GenerationMixin):
"""DNA Embedding Model, which is the same as ConvLMHeadModel (in long_conv_lm.py), except no decoder head, we just pass back the hidden states for downstream tasks."""
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1, dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,
fused_mlp=False, fused_dropout_add_ln=False, residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, sequence_parallel=True,
device=None, dtype=None, return_hidden_state=False, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model # for decoder
self.process_group = process_group
self.return_hidden_state = return_hidden_state
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
process_group=process_group,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
dropout_cls=dropout_cls, layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln, residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
**factory_kwargs, **kwargs
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError('fused_dense_lib is not installed')
self.lm_head = ColumnParallelLinear(
d_model, vocab_size, process_group, bias=False,
sequence_parallel=sequence_parallel, **factory_kwargs
)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids,
inference_params=inference_params)
# we only need the last hidden state for embeddings (decoder head will predict classification task)
return hidden_states, None
@property
def d_output(self):
"""Model /embedding dimension, used for decoder mapping.
"""
if getattr(self, "d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_output")
return self.d_model
def load_backbone(model, state_dict, freeze_backbone=False, ignore_head=True):
"""
Modifies state dict loading with custom function. This is necessary because the head of
a lm outputs logits for vocab, but we just the embeddings for downstream tasks.
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# consumes prefix from pretrained model, if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, "model."
)
model_new_params_dict = model.state_dict()
updated_model_state_dict = {}
# loop through scratch model keys (pretrained may have extra stuff)
for key in sorted(model_new_params_dict.keys()):
loaded_params = state_dict.get(key, None)
# make sure key is in the loaded params first, if not, then print it out
if loaded_params is None:
# This should never happen, it should be there!
print("Missing key in pretrained model!", key)
raise Exception
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, load from scratch", key)
# using scratch by default, nothing needed
used_params = model_new_params_dict[key]
elif "decoder" in key:
print("found decoder key / parameter, load from scratch", key)
used_params = model_new_params_dict[key]
else:
print('key: shape MATCH, loading', key) # load matched weights
used_params = loaded_params
# we need to pass back a state dict with the '.model' prefix!!!!!
key_with_prefix = 'model.' + key
updated_model_state_dict[key_with_prefix] = used_params
if freeze_backbone:
print("freezing model backbone params!")
# note, decoder not included in backbone
for name, param in model.named_parameters():
param.requires_grad = False
# we have updated the new model state dict with pretrained now
return updated_model_state_dict | hyena-dna-main | src/models/sequence/dna_embedding.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import opt_einsum as oe
optimized = True
if optimized:
contract = oe.contract
else:
contract = torch.einsum
from src.models.nn import LinearActivation, Activation, DropoutNd
from src.models.sequence.block_fft import BlockFFT
from src.models.sequence.long_conv_kernel import LongConvKernel
class LongConv(nn.Module):
def __init__(
self,
d_model,
l_max=1024,
channels=1,
bidirectional=False,
# Arguments for position-wise feedforward components
activation='gelu', # activation between conv and FF
postact='glu', # activation after FF
initializer=None, # initializer on FF
weight_norm=False, # weight normalization on FF
dropout=0.0, tie_dropout=False,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
verbose=False,
block_fft_conv=False, # replace the FFT conv with Monarch blocks
block_fft_conv_args={},
# SSM Kernel arguments
**kernel_args,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum kernel length, also denoted by L
channels: can be interpreted as a number of "heads"; the SSM is a map from a 1-dim to C-dim sequence. It's not recommended to change this unless desperate for things to tune; instead, increase d_model for larger models
bidirectional: if True, convolution kernel will be two-sided
Position-wise feedforward components:
--------------------
activation: activation in between SS and FF
postact: activation after FF ('id' for no activation, None to remove FF layer)
initializer: initializer on FF
weight_norm: weight normalization on FF
dropout: standard dropout argument. tie_dropout=True ties the dropout mask across the sequence length, emulating nn.Dropout1d
Other arguments:
--------------------
transposed: choose backbone axis ordering of (B, L, H) (if False) or (B, H, L) (if True) [B=batch size, L=sequence length, H=hidden dimension]
"""
super().__init__()
if verbose:
import src.utils.train
log = src.utils.train.get_logger(__name__)
log.info(f"Constructing Long Conv (H, L) = ({d_model}, {l_max})")
self.d_model = d_model
self.H = d_model
self.L = l_max
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
self.block_fft_conv = block_fft_conv
self.block_fft_conv_args = block_fft_conv_args
self.D = nn.Parameter(torch.randn(channels, self.H))
if self.bidirectional:
channels *= 2
# SSM Kernel
self.kernel = LongConvKernel(self.H, L=self.L, channels=channels, verbose=verbose, **kernel_args)
if self.block_fft_conv:
self.block_fft_u = BlockFFT(**self.block_fft_conv_args)
self.block_fft_k = BlockFFT(**self.block_fft_conv_args)
# Pointwise
self.activation = Activation(activation)
# dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout # Broken in torch==1.11
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
if postact is None:
self.output_linear = nn.Identity()
else:
self.output_linear = LinearActivation(
self.d_model * self.channels,
self.d_model,
# self.H*self.channels,
# self.d_model*(1 if self.gate is None else self.gate),
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
def forward(self, u, state=None, rate=1.0, lengths=None, **kwargs): # absorbs return_output and transformer src mask
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed, remnant from state spaces repo
Returns: same shape as u
"""
if not self.transposed: u = u.transpose(-1, -2)
L = u.size(-1)
# Mask out padding tokens
# TODO handle option for mask - instead of lengths, which assumes suffix padding
if isinstance(lengths, int):
if lengths != L:
lengths = torch.tensor(lengths, dtype=torch.long, device=u.device)
else:
lengths = None
if lengths is not None:
assert isinstance(lengths, torch.Tensor) and lengths.ndim == 1 and lengths.size(0) in [1, u.size(0)]
mask = torch.where(torch.arange(L, device=lengths.device) < lengths[:, None, None], 1., 0.)
u = u * mask
# Compute SS Kernel
L_kernel = L if self.L is None else min(L, round(self.L / rate))
k, _ = self.kernel(L=L_kernel, rate=rate, state=state) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0))
if self.block_fft_conv:
k_f = self.block_fft_k(k.to(torch.complex64), N=L_kernel+L) # (C H L)
u_f = self.block_fft_u(u.to(torch.complex64), N=L_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
if self.learn_ifft:
y = self.block_fft_u(y_f, N=L_kernel+L,forward=False).real[..., :L]
else:
y = torch.fft.ifft(y_f, n=L_kernel+L, dim=-1).real[..., :L] # (B C H L)
else:
k_f = torch.fft.rfft(k, n=L_kernel+L) # (C H L)
u_f = torch.fft.rfft(u, n=L_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
y = torch.fft.irfft(y_f, n=L_kernel+L)[..., :L] # (B C H L)
# Compute skip connection
y = y + contract('bhl,ch->bchl', u, self.D)
# Reshape to flatten channels
y = rearrange(y, '... c h l -> ... (c h) l')
if not self.transposed: y = y.transpose(-1, -2)
y = self.activation(y)
y = self.dropout(y)
y = self.output_linear(y)
return y, None
@property
def d_state(self):
return self.H
@property
def d_output(self):
return self.d_model
| hyena-dna-main | src/models/sequence/long_conv.py |
import copy
import math
import re
from functools import partial
from collections import namedtuple, OrderedDict
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from einops import rearrange
from flash_attn.modules.mha import MHA, ParallelMHA
from flash_attn.modules.mlp import Mlp, FusedMLP, ParallelFusedMLP
from flash_attn.modules.block import Block
from flash_attn.modules.embedding import GPT2Embeddings, ParallelGPT2Embeddings
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params, all_gather_raw
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
try:
from flash_attn.ops.layer_norm import dropout_add_layer_norm
except ImportError:
dropout_add_layer_norm = None
from src.utils import instantiate
import src.utils.registry as registry
class CheckpointedModule(torch.nn.Module):
def __init__(self, layer):
super().__init__()
self.layer = layer
def forward(self, x):
return checkpoint(self.layer, x)
def create_mixer_cls(
layer=None,
process_group=None,
attn_layer_idx=None,
attn_cfg=None,
layer_idx=None,
sequence_parallel=True,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
parallel_kwargs = (
{"process_group": process_group, "sequence_parallel": sequence_parallel}
if process_group is not None
else {}
)
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop("causal", True)
fused_bias_fc = (
False if attn_cfg is None else attn_cfg.get("fused_bias_fc", False)
)
if not fused_bias_fc:
assert process_group is None, "TensorParallel MHA requires fused_bias_fc"
mha_cls = MHA if process_group is None else ParallelMHA
# ParallelMHA doesn't take 'fused_bias_fc', it is assumed that we fuse matmul + bias
if process_group is not None:
attn_cfg = copy.deepcopy(attn_cfg) # Don't modify the original cfg
attn_cfg.pop("fused_bias_fc", None)
mixer_cls = partial(
mha_cls,
causal=causal,
layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),
**parallel_kwargs,
**factory_kwargs,
)
else:
fused_bias_fc = False if layer is None else layer.get("fused_bias_fc", False)
if process_group is not None:
assert fused_bias_fc, "TensorParallel SSM requires fused_bias_fc"
mixer_cls = instantiate(
registry.layer,
layer,
partial=True,
layer_idx=layer_idx,
**factory_kwargs,
**parallel_kwargs,
)
# mixer_cls = partial(ssm_cls, layer_idx=layer_idx,
# **(ssm_cfg if ssm_cfg is not None else {}),
# **parallel_kwargs, **factory_kwargs)
return mixer_cls
def create_mlp_cls(
d_model,
d_inner=None,
process_group=None,
fused_mlp=False,
sequence_parallel=True,
identity_mlp=False,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
if process_group is not None:
assert fused_mlp, "Tensor Parallel is only implemented for FusedMLP"
if not fused_mlp and not identity_mlp:
mlp_cls = partial(
Mlp,
hidden_features=inner_dim,
activation=partial(F.gelu, approximate="tanh"),
**factory_kwargs,
)
elif fused_mlp:
mlp_cls = FusedMLP if process_group is None else ParallelFusedMLP
parallel_kwargs = (
{"process_group": process_group, "sequence_parallel": sequence_parallel}
if process_group is not None
else {}
)
mlp_cls = partial(
mlp_cls, hidden_features=inner_dim, **parallel_kwargs, **factory_kwargs
)
else:
mlp_cls = nn.Identity
return mlp_cls
def create_block(
d_model,
d_inner=None,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
layer_norm_epsilon=1e-5,
resid_dropout1=0.0,
resid_dropout2=0.0,
residual_in_fp32=False,
fused_mlp=False,
identity_mlp=False,
fused_dropout_add_ln=False,
layer_idx=None,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
mixer_cls = create_mixer_cls(
layer=layer,
process_group=process_group,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
layer_idx=layer_idx,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
mlp_cls = create_mlp_cls(
d_model,
d_inner=d_inner,
process_group=process_group,
fused_mlp=fused_mlp,
identity_mlp=identity_mlp,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(
d_model,
mixer_cls,
mlp_cls,
norm_cls=norm_cls,
prenorm=True,
resid_dropout1=resid_dropout1,
resid_dropout2=resid_dropout2,
fused_dropout_add_ln=fused_dropout_add_ln,
residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel and process_group is not None,
mark_shared_params=process_group is not None,
)
block.layer_idx = layer_idx
if checkpoint_mlp:
block.mlp = CheckpointedModule(block.mlp)
if checkpoint_mixer:
block.mixer = CheckpointedModule(block.mixer)
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(
module,
n_layer,
initializer_range=0.02,
rescale_prenorm_residual=True,
glu_act=False,
):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(
p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer)
)
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(
p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer)
)
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(
p[: out_features // 2],
mean=0.0,
std=initializer_range / math.sqrt(2 * n_layer) * 2,
)
class LMBackbone(nn.Module):
def __init__(
self,
d_model: int,
n_layer: int,
d_inner: int,
vocab_size: int,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
max_position_embeddings=0,
resid_dropout: float = 0.0,
embed_dropout: float = 0.1,
dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5,
initializer_cfg=None,
fused_mlp=False,
identity_mlp=False,
fused_dropout_add_ln=False,
residual_in_fp32=False,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
**kwargs,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.process_group = process_group
self.sequence_parallel = sequence_parallel
self.residual_in_fp32 = residual_in_fp32
if process_group is None:
self.embeddings = GPT2Embeddings(
d_model, vocab_size, max_position_embeddings, **factory_kwargs
)
else:
self.embeddings = ParallelGPT2Embeddings(
d_model,
vocab_size,
max_position_embeddings,
process_group=process_group,
sequence_parallel=self.sequence_parallel,
**factory_kwargs,
)
# We change the order of dropout, residual and layer norm:
# Instead of LN -> Attn / MLP -> Dropout -> Add, we do:
# Dropout -> Add -> LN -> Attn / MLP, returning both the residual branch (output of Add) and
# the main branch (output of MLP). The model definition is unchanged, but the mapping of the
# nn.Dropout probabilities are changed.
# This is for performance reason: we can fuse dropout + add + layer_norm.
self.fused_dropout_add_ln = fused_dropout_add_ln
if self.fused_dropout_add_ln and dropout_add_layer_norm is None:
raise ImportError("dropout_add_layer_norm is not installed")
self.layers = nn.ModuleList(
[
create_block(
d_model,
d_inner=d_inner,
process_group=process_group,
layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout,
residual_in_fp32=residual_in_fp32,
fused_mlp=fused_mlp,
identity_mlp=identity_mlp,
fused_dropout_add_ln=fused_dropout_add_ln,
layer_idx=i,
sequence_parallel=self.sequence_parallel,
checkpoint_mlp=checkpoint_mlp,
checkpoint_mixer=checkpoint_mixer,
**factory_kwargs,
)
for i in range(n_layer)
]
)
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
if process_group is not None:
for p in self.ln_f.parameters():
# Mark the norm parameters as "shared_params" so that we sync their values at init.
p._shared_params = True
# Mark the norm params as "sequence_parallel" so we run all-reduce on their grads.
if self.sequence_parallel:
p._sequence_parallel = True
self.apply(
partial(
_init_weights,
n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {}),
)
)
self.tie_weights()
def tie_weights(self):
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None):
# If using Tensor Parallel with sequence parallel, we combine the batch and the seqlen
# dimensions so that we can split on it easily, in case of small batch size.
# Only the attention/SSM layers need to know the seqlen.
embedding_kwargs = (
{"combine_batch_seqlen_dim": True}
if self.process_group is not None and self.sequence_parallel
else {}
)
hidden_states = self.embeddings(
input_ids, position_ids=position_ids, **embedding_kwargs
)
residual = None
mixer_kwargs = (
{"seqlen": input_ids.shape[1]}
if self.process_group is not None and self.sequence_parallel
else {}
)
if inference_params is not None:
mixer_kwargs["inference_params"] = inference_params
for layer in self.layers:
hidden_states, residual = layer(
hidden_states, residual, mixer_kwargs=mixer_kwargs
)
if not self.fused_dropout_add_ln:
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
else:
# Set prenorm=False here since we don't need the residual
hidden_states = dropout_add_layer_norm(
hidden_states,
residual,
self.ln_f.weight,
self.ln_f.bias,
self.drop_f.p if self.training else 0.0,
self.ln_f.eps,
prenorm=False,
residual_in_fp32=self.residual_in_fp32,
)
return hidden_states
class ConvLMHeadModel(nn.Module, GenerationMixin):
def __init__(
self,
d_model: int,
n_layer: int,
d_inner: int,
vocab_size: int,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
max_position_embeddings=0,
resid_dropout: float = 0.0,
embed_dropout: float = 0.1,
dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5,
initializer_cfg=None,
fused_mlp=False,
fused_dropout_add_ln=False,
residual_in_fp32=False,
pad_vocab_size_multiple: int = 1,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
**kwargs,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.process_group = process_group
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (
vocab_size % pad_vocab_size_multiple
)
self.backbone = LMBackbone(
d_model=d_model,
n_layer=n_layer,
d_inner=d_inner,
vocab_size=vocab_size,
process_group=process_group,
layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout,
embed_dropout=embed_dropout,
dropout_cls=dropout_cls,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg,
fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln,
residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
checkpoint_mlp=checkpoint_mlp,
checkpoint_mixer=checkpoint_mixer,
**factory_kwargs,
**kwargs,
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError("fused_dense_lib is not installed")
self.lm_head = ColumnParallelLinear(
d_model,
vocab_size,
process_group,
bias=False,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
# Initialize weights and apply final processing
self.apply(
partial(
_init_weights,
n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {}),
)
)
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(
self, input_ids, position_ids=None, inference_params=None, state=None
): # state for the repo interface
hidden_states = self.backbone(
input_ids, position_ids=position_ids, inference_params=inference_params
)
lm_logits = self.lm_head(hidden_states)
# During inference, we want the full logit for sampling
if ColumnParallelLinear is not None and inference_params is not None:
if isinstance(self.lm_head, ColumnParallelLinear):
lm_logits, _ = all_gather_raw(lm_logits, self.lm_head.process_group)
lm_logits = rearrange(
lm_logits, "(n b) s d -> b s (n d)", b=hidden_states.shape[0]
)
CausalLMOutput = namedtuple("CausalLMOutput", ["logits"])
return CausalLMOutput(logits=lm_logits), None
class DNAEmbeddingModel(nn.Module, GenerationMixin):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1, dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,
fused_mlp=False, fused_dropout_add_ln=False, residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, sequence_parallel=True,
device=None, dtype=None, return_hidden_state=False, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model # for decoder
self.process_group = process_group
self.return_hidden_state = return_hidden_state
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
process_group=process_group,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
dropout_cls=dropout_cls, layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln, residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
**factory_kwargs, **kwargs
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError('fused_dense_lib is not installed')
self.lm_head = ColumnParallelLinear(
d_model, vocab_size, process_group, bias=False,
sequence_parallel=sequence_parallel, **factory_kwargs
)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids,
inference_params=inference_params)
# we only need the last hidden state for embeddings (decoder head will predict classification task)
return hidden_states, None
@property
def d_output(self):
"""Model /embedding dimension, used for decoder mapping.
"""
if getattr(self, "d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_output")
return self.d_model
def load_backbone(model, state_dict, freeze_backbone=False, ignore_head=True):
"""
Modifies state dict loading with custom function. Every layer in new model will be
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# consumes prefix from pretrained model, if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, "model."
)
model_new_params_dict = model.state_dict()
updated_model_state_dict = {}
# loop through scratch model keys (pretrained may have extra stuff)
for key in sorted(model_new_params_dict.keys()):
loaded_params = state_dict.get(key, None)
# make sure key is in the loaded params first, if not, then print it out
if loaded_params is None:
# This should never happen, it should be there!
print("Missing key in pretrained model!", key)
raise Exception
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, load from scratch", key)
# using scratch by default, nothing needed
used_params = model_new_params_dict[key]
elif "decoder" in key:
print("found decoder key / parameter, load from scratch", key)
used_params = model_new_params_dict[key]
else:
print('key: shape MATCH, loading', key) # load matched weights
used_params = loaded_params
# we need to pass back a state dict with the '.model' prefix!!!!!
key_with_prefix = 'model.' + key
updated_model_state_dict[key_with_prefix] = used_params
if freeze_backbone:
print("freezing model backbone params!")
# note, decoder not included in backbone
for name, param in model.named_parameters():
param.requires_grad = False
# we have updated the new model state dict with pretrained now
return updated_model_state_dict
def shard_state_dict_tp(state_dict, world_size, rank, pad_vocab_size_multiple=1):
"""Convert the state_dict of a standard SSM model to the state_dict of a SSM model
with tensor parallel.
"""
layer_idx_match = [
re.search(r"backbone\.layers\.(\d+)\.", k) for k in state_dict.keys()
]
num_hidden_layers = len(set(m.group(1) for m in layer_idx_match if m is not None))
vocab_size = state_dict["backbone.embeddings.word_embeddings.weight"].shape[0]
inner_dim, hidden_size = state_dict["backbone.layers.0.mlp.fc1.weight"].shape
vocab_size = (
math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple
)
assert vocab_size % world_size == 0
assert hidden_size % world_size == 0
assert inner_dim % world_size == 0
def shard_dim(state_dict, key, dim=0):
x = state_dict[key]
dimension = x.shape[dim] // world_size
state_dict[key] = x.narrow(dim, rank * dimension, dimension)
def shard_qkv_headdim(state_dict, key):
x = rearrange(state_dict[key], "(three d) ... -> three d ...", three=3)
dim = x.shape[1] // world_size
state_dict[key] = rearrange(
x[:, rank * dim : (rank + 1) * dim], "three d ... -> (three d) ..."
)
shard_dim(state_dict, "backbone.embeddings.word_embeddings.weight", 0)
if "lm_head.weight" in state_dict:
shard_dim(state_dict, "lm_head.weight", 0)
if "backbone.embeddings.position_embeddings.weight" in state_dict:
shard_dim(state_dict, "backbone.embeddings.position_embeddings.weight", -1)
for i in range(num_hidden_layers):
shard_qkv_headdim(state_dict, f"backbone.layers.{i}.mixer.Wqkv.weight")
shard_qkv_headdim(state_dict, f"backbone.layers.{i}.mixer.Wqkv.bias")
shard_dim(state_dict, f"backbone.layers.{i}.mixer.out_proj.weight", -1)
if rank != 0:
state_dict.pop(f"backbone.layers.{i}.mixer.out_proj.bias")
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc1.weight", 0)
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc1.bias", 0)
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc2.weight", -1)
if rank != 0:
state_dict.pop(f"backbone.layers.{i}.mlp.fc2.bias")
if f"backbone.layers.{i}.mixer.kernel.kernel.B" in state_dict:
for name in [
"D",
"ssm_k_D",
"kernel.kernel.B",
"kernel.kernel.inv_A_real",
"kernel.kernel.A_imag",
"ssm_k_kernel.kernel.B",
"kernel.kernel.log_dt",
]:
if f"backbone.layers.{i}.mixer.{name}" in state_dict:
shard_dim(state_dict, f"backbone.layers.{i}.mixer.{name}", 0)
for name in ["kernel.kernel.C", "ssm_k_kernel.kernel.C"]:
if f"backbone.layers.{i}.mixer.{name}" in state_dict:
shard_dim(state_dict, f"backbone.layers.{i}.mixer.{name}", 1)
return state_dict | hyena-dna-main | src/models/sequence/long_conv_lm.py |
""" Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic (batch, length, d_input) -> (batch, length, d_output) transformation
"""
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from src.utils.config import to_list, to_dict
from src.models.sequence.block import SequenceResidualBlock
from src.models.sequence.base import SequenceModule
from src.models.nn.components import Normalization, DropoutNd
class SequenceModel(SequenceModule):
def __init__(
self,
d_model, # Resize input (useful for deep models with residuals)
n_layers=1, # Number of layers
transposed=False, # Transpose inputs so each layer receives (batch, dim, length)
dropout=0.0, # Dropout parameter applied on every residual and every layer
tie_dropout=False, # Tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d
prenorm=True, # Pre-norm vs. post-norm
n_repeat=1, # Each layer is repeated n times per stage before applying pooling
layer=None, # Layer config, must be specified
residual=None, # Residual config
norm=None, # Normalization config (e.g. layer vs batch)
pool=None, # Config for pooling layer per stage
track_norms=True, # Log norms of each layer output
dropinp=0.0, # Input dropout
):
super().__init__()
# Save arguments needed for forward pass
self.d_model = d_model
self.transposed = transposed
self.track_norms = track_norms
# Input dropout (not really used)
dropout_fn = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropinp) if dropinp > 0.0 else nn.Identity()
layer = to_list(layer, recursive=False)
# Some special arguments are passed into each layer
for _layer in layer:
# If layers don't specify dropout, add it
if _layer.get('dropout', None) is None:
_layer['dropout'] = dropout
# Ensure all layers are shaped the same way
_layer['transposed'] = transposed
# Duplicate layers
layers = layer * n_layers * n_repeat
# Instantiate layers
_layers = []
d = d_model
for l, layer in enumerate(layers):
# Pool at the end of every n_repeat blocks
pool_cfg = pool if (l+1) % n_repeat == 0 else None
block = SequenceResidualBlock(d, l+1, prenorm=prenorm, dropout=dropout, tie_dropout=tie_dropout, transposed=transposed, layer=layer, residual=residual, norm=norm, pool=pool_cfg)
_layers.append(block)
d = block.d_output
self.d_output = d
self.layers = nn.ModuleList(_layers)
if prenorm:
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(self.d_output, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(self.d_output, transposed=self.transposed, **norm)
else:
self.norm = nn.Identity()
def forward(self, inputs, *args, state=None, **kwargs):
""" Inputs assumed to be (batch, sequence, dim) """
if self.transposed: inputs = rearrange(inputs, 'b ... d -> b d ...')
inputs = self.drop(inputs)
# Track norms
if self.track_norms: output_norms = [torch.mean(inputs.detach() ** 2)]
# Apply layers
outputs = inputs
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
outputs, state = layer(outputs, *args, state=prev_state, **kwargs)
next_states.append(state)
if self.track_norms: output_norms.append(torch.mean(outputs.detach() ** 2))
if self.norm is not None: outputs = self.norm(outputs)
if self.transposed: outputs = rearrange(outputs, 'b d ... -> b ... d')
if self.track_norms:
metrics = to_dict(output_norms, recursive=False)
self.metrics = {f'norm/{i}': v for i, v in metrics.items()}
return outputs, next_states
@property
def d_state(self):
d_states = [layer.d_state for layer in self.layers]
return sum([d for d in d_states if d is not None])
@property
def state_to_tensor(self):
# Slightly hacky way to implement this in a curried manner (so that the function can be extracted from an instance)
# Somewhat more sound may be to turn this into a @staticmethod and grab subclasses using hydra.utils.get_class
def fn(state):
x = [_layer.state_to_tensor(_state) for (_layer, _state) in zip(self.layers, state)]
x = [_x for _x in x if _x is not None]
return torch.cat( x, dim=-1)
return fn
def default_state(self, *batch_shape, device=None):
return [layer.default_state(*batch_shape, device=device) for layer in self.layers]
def step(self, x, state, **kwargs):
# Apply layers
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
x, state = layer.step(x, state=prev_state, **kwargs)
next_states.append(state)
x = self.norm(x)
return x, next_states
| hyena-dna-main | src/models/sequence/model.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat
from src.utils.train import OptimModule
class LongConvKernel(OptimModule):
def __init__(
self,
H,
L,
channels=1,
learning_rate=None,
lam=0.1,
causal=True,
kernel_dropout=0,
weight_init="random",
use_ma_smoothing = False,
ma_window_len = 7,
smooth_freq = False,
**kwargs
):
super().__init__()
self.drop = torch.nn.Dropout(p=kernel_dropout)
self.H = H
self.weight_init = weight_init
self.causal = causal
self.L = L*2 if not causal else L
self.channels = channels
self.lam = lam
self.kernel = torch.nn.Parameter(self._parameter_initialization()) #(c,H,L)
self.register("kernel", self.kernel, learning_rate)
self.use_ma_smoothing=use_ma_smoothing
self.smooth_freq = smooth_freq
self.ma_window_len = ma_window_len
if self.use_ma_smoothing:
if smooth_freq:
weight = torch.arange(ma_window_len, dtype = self.kernel.dtype)
weight = torch.exp(-0.5 * torch.abs(weight - ma_window_len // 2) ** 2)
weight = repeat(weight, 'l -> h1 h2 l', h1 = self.H, h2 = 1)
weight = weight.type(torch.fft.rfft(self.kernel).dtype)
self.smooth_weight = weight
else:
self.ma_window_len = ma_window_len
assert self.ma_window_len%2!=0, "window size must be odd"
padding = (self.ma_window_len//2)
self.smooth = torch.nn.AvgPool1d(kernel_size=self.ma_window_len,stride=1,padding=padding)
def _parameter_initialization(self):
if self.weight_init=="random":
return torch.randn(self.channels, self.H, self.L) * 0.002
elif self.weight_init=="double_exp":
K = torch.randn(self.channels, self.H, self.L,dtype=torch.float32) * 0.02
double_exp = torch.zeros((self.H,self.L),dtype=torch.float32)
for i in range(self.H):
for j in range(self.L):
double_exp[i,j] = torch.exp(-(j/self.L)*torch.pow(torch.tensor(int(self.H/2)),torch.tensor(i/self.H)))
K = torch.einsum("c h l, h l -> c h l",K,double_exp)
return K
else: raise NotImplementedError(f"{self.weight_init} is not valid")
def forward(self, **kwargs):
k = self.kernel
if self.use_ma_smoothing:
if self.smooth_freq:
k_f = torch.fft.rfft(k, dim=-1)
k_f = F.conv1d(k_f, self.smooth_weight.to(k_f.device), padding='same', groups=self.H)
k = torch.fft.irfft(k_f, dim=-1)
else:
k = self.smooth(k)
k = F.relu(torch.abs(k)-self.lam)*torch.sign(k)
k = self.drop(k)
return k, None
@property
def d_output(self):
return self.H | hyena-dna-main | src/models/sequence/long_conv_kernel.py |
import math
import sys
from re import U
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange, repeat
try:
from src.ops.fftconv import fftconv_ref, fftconv_func, fftconv_heads_ref
except ImportError:
fftconv_func = None
try:
from flash_attn.ops.fused_dense import FusedDense
except ImportError:
FusedDense = None
import src.utils.registry as registry
from src.utils.train import OptimModule
from src.utils.config import instantiate, auto_assign_attrs
from src.models.nn import Activation
class FFTConvFuncv2(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k):
seqlen = u.shape[-1]
if len(u.shape) > 3:
k = k.unsqueeze(1)
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
ctx.save_for_backward(u_f, k_f)
return y
@staticmethod
def backward(ctx, dout):
u_f, k_f = ctx.saved_tensors
seqlen = dout.shape[-1]
fft_size = 2 * seqlen
dout_f = torch.fft.rfft(dout, n=fft_size)
du = torch.fft.irfft(dout_f * k_f.conj(), n=fft_size, norm="forward")[
..., :seqlen
]
dk = torch.fft.irfft(dout_f * u_f.conj(), n=fft_size, norm="forward")[
..., :seqlen
]
return du, dk.squeeze()
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3:
k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, "b H -> b H 1")).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class Sin(nn.Module):
def __init__(self, dim, w=10, train_freq=True):
super().__init__()
self.freq = (
nn.Parameter(w * torch.ones(1, dim))
if train_freq
else w * torch.ones(1, dim)
)
def forward(self, x):
return torch.sin(self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float = 1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
shift: float = 0.0,
**kwargs,
):
super().__init__()
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
fused_fft_conv=False,
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
linear_mixer=False,
modulate: bool = True,
normalized=False,
**kwargs,
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
auto_assign_attrs(
self, d_model=d_model, emb_dim=emb_dim, seq_len=seq_len, modulate=modulate
)
self.use_bias = bias
self.fused_fft_conv = fused_fft_conv
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w)
assert (
emb_dim % 2 != 0 and emb_dim >= 3
), "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
# uses a variable number of inner linear layers
if linear_mixer is False:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
# final linear layer
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
else:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, d_model, bias=False),
)
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
if self.modulate:
h = self.modulation(t, h)
if self.normalized:
h = h / torch.norm(h, dim=-1, p=1, keepdim=True)
return h
def forward(self, x, L, k=None, bias=None, *args, **kwargs):
if k is None:
k = self.filter(L)
# Ensure compatibility with filters that return a tuple
k = k[0] if type(k) is tuple else k
if bias is None:
bias = self.bias
bias = bias if self.use_bias else 0 * bias
if self.fused_fft_conv:
bias = bias.to(dtype=torch.float32)
y = fftconv_func(
x,
k,
bias,
dropout_mask=None,
gelu=False,
force_fp16_output=torch.is_autocast_enabled(),
)
else:
y = fftconv_ref(x, k, bias, dropout_mask=None, gelu=False)
# y = (
# FFTConvFuncv2.apply(x, k.to(dtype=torch.float32))
# + bias.unsqueeze(-1) * x
# )
return y.to(dtype=x.dtype)
class HyenaOperator(nn.Module):
def __init__(
self,
d_model,
l_max,
order=2,
filter_order=64,
num_heads=1,
inner_factor=1,
num_blocks=1,
fused_bias_fc=False,
outer_mixing=False,
dropout=0.0,
filter_dropout=0.0,
filter_cls="hyena-filter",
post_order_ffn=False,
jit_filter=False,
short_filter_order=3,
activation="id",
return_state=False,
**filter_args,
):
r"""
Hyena operator described in the paper https://arxiv.org/pdf/2302.10866.pdf
Args:
d_model (int): Dimension of the input and output embeddings (width of the layer)
l_max: (int): Maximum input sequence length. Defaults to None
order: (int): Depth of the Hyena recurrence. Defaults to 2
filter_order: (int): Width of the FFN parametrizing the implicit filter. Defaults to 64
num_heads: (int): Number of heads. Defaults to 1
inner_factor: (int): Width multiplier. Defaults to 1
num_blocks: (int): Number of blocks in sequence length. Defaults to 1
fused_bias_fc: (bool): Whether to use fused bias FC. Defaults to False
dropout: (float): Dropout probability. Defaults to 0.0
filter_dropout: (float): Dropout probability for the filter. Defaults to 0.0
post_order_ffn: (bool): Apply a dense layer between steps of the recurrence. Defaults to False
jit_filter: (bool): Whether JIT the implicit filter function. Defaults to False
short_filter_order: (int): Length of the explicit input convolutional filter. Defaults to 3
activation: (str): type of act between kernel output and FF (default identity)
return_state: (bool): whether to return a state
"""
super().__init__()
assert (
d_model % num_heads == 0
), f"Model dimension {d_model} must be divisible by num heads {num_heads}"
assert (
l_max % num_blocks == 0
), f"Maximum signal length {l_max} must be divisible by block dimension {num_blocks}"
block_dim = l_max // num_blocks
head_dim = d_model // num_heads
auto_assign_attrs(
self,
d_model=d_model,
order=order,
l_max=l_max,
num_heads=num_heads,
inner_factor=inner_factor,
block_dim=block_dim,
head_dim=head_dim,
filter_order=filter_order,
post_order_ffn=post_order_ffn,
short_filter_order=short_filter_order,
num_blocks=num_blocks,
filter_dropout=filter_dropout,
jit_filter=jit_filter,
outer_mixing=outer_mixing,
activation=activation,
return_state=return_state,
)
self.activation = Activation(activation)
self.dropout = nn.Dropout(dropout)
self.setup_projections(fused_bias_fc, inner_factor)
self.setup_filters(filter_cls, filter_args)
def setup_projections(self, fused_bias_fc, inner_factor):
"Initializes input and output projections (over the width dimension)"
if fused_bias_fc and FusedDense is None:
raise ImportError("fused_dense is not installed")
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
self.out_proj = linear_cls(self.d_model * inner_factor, self.d_model)
self.in_proj = linear_cls(self.d_model, (self.order + 1) * self.d_model)
if self.post_order_ffn:
self.ord_proj_w = nn.Parameter(
torch.randn(self.order, self.num_heads, self.num_heads)
/ math.sqrt(self.head_dim)
)
def setup_filters(self, filter_cls, filter_args):
"Initializes the explicit and implicit filters"
assert self.order >= 2, f"Order must be at least 2, (got {self.order})"
total_width = self.d_model * self.inner_factor * (self.order + 1)
self.short_filter = nn.Conv1d(
in_channels=total_width,
out_channels=total_width,
kernel_size=self.short_filter_order,
groups=total_width,
padding=self.short_filter_order - 1,
)
filter_cls = instantiate(registry.layer, filter_cls, partial=True)
self.filter_fn = filter_cls(
self.head_dim * self.inner_factor * (self.order - 1),
order=self.filter_order,
seq_len=self.l_max,
channels=1,
dropout=self.filter_dropout,
**filter_args,
)
if self.jit_filter:
self.filter_fn = torch.jit.script(self.filter_fn, self.L)
def recurrence(self, u, state):
"Fast inference mode via distilled recurrence"
raise NotImplementedError("Working on it!")
def forward(self, u, *args, **kwargs):
l = u.size(-2)
l_filter = min(l, self.l_max)
u = self.in_proj(u)
u = rearrange(u, "b l d -> b d l")
uc = self.short_filter(u)[..., :l_filter]
uc = rearrange(
uc,
"b (ho v) (z l) -> b ho v z l",
z=self.num_blocks,
ho=self.num_heads,
v=self.head_dim * (self.order + 1),
)
*x, v = uc.split(self.d_model, dim=2)
k = self.filter_fn.filter(l_filter)
# `c` is always 1 by default
k = rearrange(k, "c l (v o) -> c o v l", v=self.head_dim, o=self.order - 1)[0]
bias = rearrange(
self.filter_fn.bias, "(v o) -> o v", v=self.head_dim, o=self.order - 1
)
for o, x_i in enumerate(reversed(x[1:])):
if self.outer_mixing:
v = rearrange(v, "b h v z l -> b h 1 v z l")
v = self.dropout(v * rearrange(x_i, "b h v z l -> b h v 1 z l"))
v = v.sum(dim=2)
else:
v = self.dropout(v * x_i)
# the bias term is broadcasted. Last dimension (l) is handled by fftconv
v = self.filter_fn(v, l_filter, k=k[o], bias=bias[o, None, :, None])
if self.post_order_ffn:
w = self.ord_proj_w[o]
v = mul_sum(
rearrange(w, "h1 h2 -> 1 h1 h2 1 1 1"),
rearrange(v, "b h v z l -> b h 1 v z l"),
)
y = self.activation(
rearrange(
v * x[0],
"b h v z l -> b (z l) (h v)",
z=self.num_blocks,
h=self.num_heads,
)
)
y = self.out_proj(y)
if self.return_state:
return y, None
return y
@property
def d_output(self):
return self.d_model
| hyena-dna-main | src/models/sequence/hyena.py |
""" Implements a full residual block around a black box layer
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from torch import nn
from functools import partial
import src.utils as utils
from src.models.nn.components import Normalization, StochasticDepth, DropoutNd
from src.models.sequence import SequenceModule
from src.models.sequence.pool import registry as pool_registry
from src.models.nn.residual import registry as residual_registry
import src.utils.registry as registry
class SequenceResidualBlock(SequenceModule):
def __init__(
self,
d_input,
i_layer=None, # Only needs to be passed into certain residuals like Decay
prenorm=True,
dropout=0.0,
tie_dropout=False,
transposed=False,
layer=None, # Config for black box module
residual=None, # Config for residual function
norm=None, # Config for normalization layer
pool=None,
drop_path=0.,
):
super().__init__()
self.i_layer = i_layer
self.d_input = d_input
self.layer = utils.instantiate(registry.layer, layer, d_input)
self.prenorm = prenorm
self.transposed = transposed
# Residual
# d_residual is the output dimension after residual
if residual is None:
self.residual = None
self.d_residual = self.layer.d_output
else:
self.residual = utils.instantiate(residual_registry, residual, i_layer, d_input, self.layer.d_output)
self.d_residual = self.residual.d_output
# Normalization
d_norm = d_input if self.prenorm else self.d_residual
# We don't use config to directly instantiate since Normalization has some special cases
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(d_norm, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(d_norm, transposed=self.transposed, **norm)
# Pool
self.pool = utils.instantiate(pool_registry, pool, self.d_residual, transposed=self.transposed)
# Dropout
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
# Stochastic depth
self.drop_path = StochasticDepth(drop_path, mode='row') if drop_path > 0.0 else nn.Identity()
@property
def d_output(self):
return self.pool.d_output if self.pool is not None else self.d_residual
@property
def d_state(self):
return self.layer.d_state
@property
def state_to_tensor(self):
return self.layer.state_to_tensor
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def forward(self, x, state=None, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm: y = self.norm(y)
# Black box layer
y, state = self.layer(y, state=state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, self.drop_path(self.drop(y)), self.transposed)
# Post-norm
if self.norm is not None and not self.prenorm: y = self.norm(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
def step(self, x, state, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm:
y = self.norm.step(y)
# Black box layer
y, state = self.layer.step(y, state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, y, transposed=False) # NOTE this would not work with concat residual function (catformer)
# Post-norm
if self.norm is not None and not self.prenorm:
y = self.norm.step(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
| hyena-dna-main | src/models/sequence/block.py |
"""Implements downsampling and upsampling on sequences."""
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.nn import LinearActivation
""" Simple pooling functions that just downsample or repeat
stride: Subsample on the layer dimension
expand: Repeat on the feature dimension
"""
class DownSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.stride > 1:
assert x.ndim == 3, "Downsampling with higher-dimensional inputs is currently not supported. It is recommended to use average or spectral pooling instead."
if self.transposed:
x = x[..., 0::self.stride]
else:
x = x[..., 0::self.stride, :]
if self.expand > 1:
if self.transposed:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
else:
x = repeat(x, 'b ... d -> b ... (d e)', e=self.expand)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class DownAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=None, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
if self.expand is not None:
self.linear = LinearActivation(
d_input,
d_input * expand,
transposed=transposed,
)
def forward(self, x):
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
if self.stride > 1:
# einops appears slower than F
if x.ndim == 3:
x = F.avg_pool1d(x, self.stride, self.stride)
elif x.ndim == 4:
x = F.avg_pool2d(x, self.stride, self.stride)
else:
# Reduction string e.g. "b d (l1 2) (l2 2) -> b d l1 l2"
reduce_str = "b d " + " ".join([f"(l{i} {self.stride})" for i in range(x.ndim-2)]) \
+ " -> b d " + " ".join([f"l{i}" for i in range(x.ndim-2)])
x = reduce(x, reduce_str, 'mean')
# if self.expand > 1:
# x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
if self.expand is not None:
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
if self.expand is None:
return self.d_input
else:
return self.d_input * self.expand
class DownSpectralPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
"""
x: (B, L..., D)
"""
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
shape = x.shape[2:]
x_f = torch.fft.ifftn(x, s=shape)
for axis, l in enumerate(shape):
assert l % self.stride == 0, 'input length must be divisible by stride'
new_l = l // self.stride
idx = torch.cat([torch.arange(0, new_l-new_l//2), l+torch.arange(-new_l//2, 0)]).to(x_f.device)
x_f = torch.index_select(x_f, 2+axis, idx)
x = torch.fft.ifftn(x_f, s=[l//self.stride for l in shape])
x = x.real
if self.expand > 1:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class UpSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.expand > 1:
if self.transposed:
x = reduce(x, '... (d e) l -> ... d l', 'mean', e=self.expand)
else:
x = reduce(x, '... (d e) -> ... d', 'mean', e=self.expand)
if self.stride > 1:
if self.transposed:
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class UpAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
assert d_input % expand == 0
self.d_input = d_input
self.stride = stride
self.expand = expand
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_input // expand,
transposed=transposed,
)
def forward(self, x):
# TODO only works for 1D right now
if x is None: return None
x = self.linear(x)
if self.stride > 1:
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class DownLinearPool(SequenceModule):
def __init__(self, d_model, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
self.d_model = d_model
self.stride = stride
self.expand = expand
self.transposed = transposed
self.linear = LinearActivation(
d_model * stride,
d_model * expand,
transposed=transposed,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
# if self.stride > 1 or self.expand > 1:
# raise NotImplementedError
# return x, state
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
@property
def d_output(self):
return self.d_input * self.expand
class UpLinearPool(SequenceModule):
def __init__(self, d, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
# self.d_model = d * expand
# self.d_output = d
assert d % expand == 0
self.d_model = d
self.d_output = d // expand
# self._d_output = d_output
self.stride = stride
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
self.d_model,
self.d_output * stride,
transposed=transposed,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
# @property
# def d_output(self): return self._d_output
""" Pooling functions with trainable parameters """ # TODO make d_output expand instead
class DownPool2d(SequenceModule):
def __init__(self, d_input, d_output, stride=1, transposed=True, weight_norm=True):
super().__init__()
self.linear = LinearActivation(
d_input,
d_output,
transposed=transposed,
weight_norm=weight_norm,
)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride),
def forward(self, x):
if self.transposed:
x = self.pool(x)
# TODO DownPool/UpPool are currently used by unet/sashimi backbones
# DownLinearPool is used by the registry (for isotropic backbone)
# DownPool is essentially the same as DownLinearPool. These should be consolidated
class DownPool(SequenceModule):
def __init__(self, d_input, d_output=None, expand=None, stride=1, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
assert (d_output is None) + (expand is None) == 1
if d_output is None: d_output = d_input * expand
self.d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input * stride,
d_output,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
class UpPool(SequenceModule):
def __init__(self, d_input, d_output, stride, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
self.d_input = d_input
self._d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_output * stride,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
@property
def d_output(self): return self._d_output
registry = {
'sample': DownSample,
'pool': DownAvgPool,
'avg': DownAvgPool,
'linear': DownLinearPool,
'spectral': DownSpectralPool,
}
up_registry = {
# 'sample': UpSample,
'pool': UpAvgPool,
'avg': UpAvgPool,
'linear': UpLinearPool,
# 'spectral': UpSpectralPool, # Not implemented and no way to make this causal
}
| hyena-dna-main | src/models/sequence/pool.py |
from torch import nn
import functools
class SequenceModule(nn.Module):
"""Abstract sequence model class. All models must adhere to this interface
A SequenceModule is generally a model that transforms an input of shape
(n_batch, l_sequence, d_model) to (n_batch, l_sequence, d_output)
REQUIRED methods and attributes
forward, d_model, d_output: controls standard forward pass, a sequence-to-sequence transformation
__init__ should also satisfy the following interface; see SequenceIdentity for an example
def __init__(self, d_model, transposed=False, **kwargs)
OPTIONAL methods
default_state, step: allows stepping the model recurrently with a hidden state
state_to_tensor, d_state: allows decoding from hidden state
"""
@property
def d_model(self):
"""Model dimension (generally same as input dimension).
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, encoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_model")
return self._d_model
@d_model.setter
def d_model(self, d):
self._d_model = d
@property
def d_output(self):
"""Output dimension of model.
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, decoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_output", None) is None:
raise NotImplementedError("SequenceModule instantiation must specify d_output for decoder")
return self._d_output
@d_output.setter
def d_output(self, d):
self._d_output = d
def forward(self, x, state=None, **kwargs):
"""Forward pass of sequence model, a sequence-to-sequence transformation with an optional state.
Generally, this should map a tensor of shape (batch, length, self.d_model) to (batch, length, self.d_output)
Additionally, it returns a "state" which can be any additional information
For example, RNN and SSM layers may return their hidden state,
while some types of transformer layers (e.g. Transformer-XL) may want to pass a state as well
"""
return x, None
@property
def state_to_tensor(self):
"""Returns a function mapping a state to a single tensor.
This method should be implemented if one wants to use the hidden state instead of the output sequence for final prediction.
Currently only used with the StateDecoder.
"""
return lambda _: None
@property
def d_state(self):
""" Returns dimension of output of self.state_to_tensor """
return None
def default_state(self, *batch_shape, device=None):
"""Create initial state for a batch of inputs."""
return None
def step(self, x, state=None, **kwargs):
"""Step the model recurrently for one step of the input sequence.
For example, this should correspond to unrolling an RNN for one step.
If the forward pass has signature (B, L, H1) -> (B, L, H2),
this method should generally have signature (B, H1) -> (B, H2) with an optional recurrent state.
"""
raise NotImplementedError
def TransposedModule(module):
"""Wrap a SequenceModule class to accept transposed parameter, handle state, absorb kwargs"""
# https://stackoverflow.com/a/65470430/1980685
@functools.wraps(module, updated=())
class TransposedModule(module):
def __init__(self, *args, transposed=False, **kwargs):
super().__init__(*args, **kwargs)
self.transposed = transposed
def forward(self, x, state=None, **kwargs):
if self.transposed: x = x.transpose(-1, -2)
x, next_state = super().forward(x, state) # Don't use kwarg because nn.LSTM
next_state = None if state is None else next_state
if self.transposed: x = x.transpose(-1,-2)
return x, next_state
# https://stackoverflow.com/questions/5352781/how-to-set-class-names-dynamically
# TransposedModule.__name__ = module.__name__ # functools wraps is better solution
return TransposedModule
@TransposedModule
class SequenceIdentity(SequenceModule):
"""Simple SequenceModule for testing purposes"""
def __init__(self, d_model, dropout=0.0, **kwargs):
"""Default interface for SequenceModule
d_model: input dimension (sometimes denoted H for hidden dimension)
transposed: if True, inputs have axis ordering (B, H, L) instead of (B, H, L)
"""
super().__init__()
self.d_model = d_model
self.d_output = d_model
def forward(self, x, state=None):
return x, state
def default_state(self, *batch_shape, device=None):
return None
def step(self, x, state=None, **kwargs):
return x, state
| hyena-dna-main | src/models/sequence/base.py |
""" Wrapper around nn.MultiheadAttention to adhere to SequenceModule interface. """
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from src.models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
from einops import rearrange
@TransposedModule
class MultiheadAttention(SequenceModule):
""" Simple wrapper for MultiheadAttention """
def __init__(self, d_model, n_heads, *args, causal=True, **kwargs):
super().__init__()
self.d_model = d_model
self.d_output = d_model
self.mha = nn.MultiheadAttention(d_model, n_heads, *args, batch_first=True, **kwargs)
self.causal = causal
def forward(self, src, attn_mask=None, key_padding_mask=None, state=None, **kwargs):
""" state should represent a mask and key padding mask """
if self.causal and attn_mask is None:
attn_mask = torch.triu(torch.ones(src.size(-2), src.size(-2),
dtype=torch.bool, device=src.device),
diagonal=1)
# attn_mask, key_padding_mask = state
# Note that this returns None for the second argument
y, _ = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
return y, None
def step(self, x, state):
# TODO proper cached inference
# x: (B, D)
pass
class VitAttention(SequenceModule):
"""Copied from implementation for ViT: only used for ViT model
This attention class makes several simplifying assumptions (commonly satisfied in vision
applications):
1. q = k = v
2. No masks: no attention mask, no key padding mask
3. Embed dimension = Input dimension, i.e. projection matrices are square.
"""
@property
def d_output(self):
return self.dim
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
# proj_drop=0.,
packed_linear=True,
linear_cfg=None,
**kwargs,
):
"""packed_linear: whether to pack all 3 q_proj, k_proj, v_proj into 2 matrix.
This option is to be compatible with T2T-ViT pretrained weights, where there's only one
projection weight matrix.
"""
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if linear_cfg is not None:
packed_linear = False
self.packed_linear = packed_linear
if packed_linear:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
else:
if linear_cfg is None:
linear_cfg = {'_target_': 'torch.nn.Linear'}
self.q_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.k_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.v_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
# Removing this dropout because we do this in SequenceResidualBlock
# self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, state=None):
B, N, C = x.shape
if self.packed_linear:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
else:
q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)
q, k, v = [rearrange(x, 'b n (h d) -> b h n d', h=self.num_heads) for x in (q, k, v)]
# attn = (q @ k.transpose(-2, -1) * self.scale)
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = q.size()
_, _, k_seq_len, _ = k.size()
q = rearrange(q, 'b h t d -> (b h) t d')
k = rearrange(k, 'b h s d -> (b h) d s')
# Preallocate attn_weights for `baddbmm`
attn = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=q.dtype, device=q.device)
attn = rearrange(torch.baddbmm(attn, q, k, beta=0, alpha=self.scale),
'(b h) t s -> b h t s', h = self.num_heads)
attn = F.softmax(attn, dim=-1, dtype=v.dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
# x = self.proj_drop(x)
return x, None
| hyena-dna-main | src/models/sequence/mha.py |
import math
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
@torch.jit.script
def _mul_sum(y, q):
return (y * q).sum(dim=1)
# reference convolution with residual connection
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3: k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
# reference H3 forward pass
def fftconv_h3_ref(k, ssm_kernel, D, q, v, head_dim=1, ssm_kernel_rev=None):
seqlen = k.shape[-1]
fft_size = 2 * seqlen
kv = (rearrange(k, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
* rearrange(v, 'b (h d2) l -> b 1 d2 h l', d2=head_dim)) # b d1 d2 h l
kv_f = torch.fft.rfft(kv.to(dtype=ssm_kernel.dtype), n=fft_size) / fft_size
ssm_kernel_f = torch.fft.rfft(ssm_kernel, n=fft_size) # h L+1
if ssm_kernel_rev is not None:
ssm_kernel_rev_f = torch.fft.rfft(ssm_kernel_rev, n=fft_size) # h L+1
ssm_kernel_f = ssm_kernel_f + ssm_kernel_rev_f.conj()
y = torch.fft.irfft(kv_f * ssm_kernel_f, n=fft_size, norm='forward')[..., :seqlen] # b d1 d2 h l
out = y + kv * D.unsqueeze(-1) # b d1 d2 h l
q = rearrange(q, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
if head_dim > 1:
out = _mul_sum(out, q)
return rearrange(out, 'b d2 h l -> b (h d2) l').to(dtype=k.dtype)
else:
return rearrange(out * q, 'b 1 1 h l -> b h l').to(dtype=k.dtype)
class FFTConvFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
k_f = torch.fft.rfft(k, n=fft_size)
if k_rev is not None:
k_f = k_f + torch.fft.rfft(k_rev, n=fft_size).conj()
if u.stride(-1) != 1:
u = u.contiguous()
k_f = k_f.contiguous()
D = D.contiguous()
if v is not None and v.stride(-1) != 1:
v = v.contiguous()
if q is not None and q.stride(-1) != 1:
q = q.contiguous()
if dropout_mask is not None:
dropout_mask = dropout_mask.contiguous()
ctx.save_for_backward(u, k_f, D, dropout_mask, v, q)
ctx.output_hbl_layout = output_hbl_layout
ctx.head_dim = head_dim
ctx.gelu = gelu
ctx.fftfp16 = fftfp16
ctx.has_k_rev = k_rev is not None
out = fftconv_fwd(u, k_f, D, v, head_dim, q, dropout_mask, gelu, False, False, fft_size, force_fp16_output, output_hbl_layout, fftfp16)
return out
@staticmethod
def backward(ctx, dout):
if ctx.output_hbl_layout:
dout = rearrange(rearrange(dout, 'b h l -> h b l').contiguous(), 'h b l -> b h l')
else:
dout = dout.contiguous()
u, k_f, D, dropout_mask, v, q = ctx.saved_tensors
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
du, dk_f, dD, dv, dq = fftconv_bwd(dout, u, k_f, D, v, ctx.head_dim, q, dropout_mask, ctx.gelu, False, False, fft_size,
ctx.output_hbl_layout, ctx.fftfp16)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
dk_rev = (None if not ctx.has_k_rev
else torch.fft.irfft(dk_f.conj(), n=fft_size, norm='forward')[..., :seqlen])
if v is not None:
dv = dv.to(dtype=v.dtype) # We do atomicAdd in fp32 so might need to convert to fp16
return du, dk, dD, None, None, None, None, dv if v is not None else None, None, dq if q is not None else None, None, dk_rev
def fftconv_func(u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
return FFTConvFunc.apply(u, k, D, dropout_mask, gelu, force_fp16_output,
output_hbl_layout, v, head_dim, q, fftfp16, k_rev)
| hyena-dna-main | src/ops/fftconv.py |
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import math
import torch
from einops import rearrange, repeat
from opt_einsum import contract
import os
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
try:
from cauchy_mult import vand_log_mult_sym_fwd, vand_log_mult_sym_bwd
except:
vand_log_mult_sym_fwd, vand_log_mult_sym_bwd = None, None
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
if conj:
x = _conj(x)
v = _conj(v)
vandermonde_matrix = x.unsqueeze(-1) ** torch.arange(L).to(x) # (... N L)
vandermonde_prod = torch.sum(v.unsqueeze(-1) * vandermonde_matrix, dim=-2) # (... L)
return vandermonde_prod
def log_vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... n, ... n l -> ... l', v, vandermonde_matrix) # (... L)
if conj:
return 2*vandermonde_prod.real
else:
return vandermonde_prod
def log_vandermonde_lazy(v, x, L, conj=True):
if conj:
v = _conj(v)
x = _conj(x)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
x_l = LazyTensor(rearrange(x, '... N -> ... N 1 1'))
l_l = LazyTensor(rearrange(l, '... L -> ... 1 L 1'))
# exp
vand = (x_l * l_l).exp()
s = (v_l*vand).sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def log_vandermonde(v, x, L, conj=True):
expr = 'ComplexMult(v, ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'v = Vj(2)',
'x = Vj(2)',
'l = Vi(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(v, x, l, backend='GPU')
if conj:
return 2*_r2c(r).real
else:
return _r2c(r)
def log_vandermonde_transpose_naive(u, v, x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... l, ... n, ... n l -> ... n', u.to(x), v.to(x), vandermonde_matrix) # (... L)
return vandermonde_prod
def log_vandermonde_transpose(u, v, x, L):
"""
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = 'ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'u = Vj(2)',
'v = Vi(2)',
'x = Vi(2)',
'l = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
u, v, x, l = _broadcast_dims(u, v, x, l)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(u, v, x, l, backend='GPU')
return _r2c(r)
def _log_vandermonde_matmul(x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
return vandermonde_matrix
def log_vandermonde_matmul(v, K):
prod = contract('...n, ...nl -> ...l', v, K)
return 2*prod.real
class LogVandMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, x, L):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
if not N in supported_N_values:
raise NotImplementedError(f'Only support N values in {supported_N_values}')
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError(f'Only support L values <= {max_L_value}')
if not v.is_cuda and x.is_cuda:
raise NotImplementedError(f'Only support CUDA tensors')
ctx.save_for_backward(v, x)
return vand_log_mult_sym_fwd(v, x, L)
@staticmethod
def backward(ctx, dout):
v, x = ctx.saved_tensors
dv, dx = vand_log_mult_sym_bwd(v, x, dout)
return dv, dx, None
if vand_log_mult_sym_fwd and vand_log_mult_sym_bwd is not None:
log_vandermonde_fast = LogVandMultiplySymmetric.apply
else:
log_vandermonde_fast = None | hyena-dna-main | src/ops/vandermonde.py |
""" Old utilities for parallel scan implementation of Linear RNNs. """
# TODO this file could use much cleanup
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from src.models.functional.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
from src.utils.permutations import bitreversal_po2, bitreversal_permutation
### Utilities
def shift_up(a, s=None, drop=True, dim=0):
assert dim == 0
if s is None:
s = torch.zeros_like(a[0, ...])
s = s.unsqueeze(dim)
if drop:
a = a[:-1, ...]
return torch.cat((s, a), dim=dim)
def interleave(a, b, uneven=False, dim=0):
""" Interleave two tensors of same shape """
# assert(a.shape == b.shape)
assert dim == 0 # TODO temporary to make handling uneven case easier
if dim < 0:
dim = N + dim
if uneven:
a_ = a[-1:, ...]
a = a[:-1, ...]
c = torch.stack((a, b), dim+1)
out_shape = list(a.shape)
out_shape[dim] *= 2
c = c.view(out_shape)
if uneven:
c = torch.cat((c, a_), dim=dim)
return c
def batch_mult(A, u, has_batch=None):
""" Matrix mult A @ u with special case to save memory if u has additional batch dim
The batch dimension is assumed to be the second dimension
A : (L, ..., N, N)
u : (L, [B], ..., N)
has_batch: True, False, or None. If None, determined automatically
Output:
x : (L, [B], ..., N)
A @ u broadcasted appropriately
"""
if has_batch is None:
has_batch = len(u.shape) >= len(A.shape)
if has_batch:
u = u.permute([0] + list(range(2, len(u.shape))) + [1])
else:
u = u.unsqueeze(-1)
v = (A @ u)
if has_batch:
v = v.permute([0] + [len(u.shape)-1] + list(range(1, len(u.shape)-1)))
else:
v = v[..., 0]
return v
### Main unrolling functions
def unroll(A, u):
"""
A : (..., N, N) # TODO I think this can't take batch dimension?
u : (L, ..., N)
output : x (..., N) # TODO a lot of these shapes are wrong
x[i, ...] = A^{i} @ u[0, ...] + ... + A @ u[i-1, ...] + u[i, ...]
"""
m = u.new_zeros(u.shape[1:])
outputs = []
for u_ in torch.unbind(u, dim=0):
m = F.linear(m, A) + u_
outputs.append(m)
output = torch.stack(outputs, dim=0)
return output
def parallel_unroll_recursive(A, u):
""" Bottom-up divide-and-conquer version of unroll. """
# Main recursive function
def parallel_unroll_recursive_(A, u):
if u.shape[0] == 1:
return u
u_evens = u[0::2, ...]
u_odds = u[1::2, ...]
# u2 = F.linear(u_evens, A) + u_odds
u2 = (A @ u_evens.unsqueeze(-1)).squeeze(-1) + u_odds
A2 = A @ A
x_odds = parallel_unroll_recursive_(A2, u2)
# x_evens = F.linear(shift_up(x_odds), A) + u_evens
x_evens = (A @ shift_up(x_odds).unsqueeze(-1)).squeeze(-1) + u_evens
x = interleave(x_evens, x_odds, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
return parallel_unroll_recursive_(A, u)[:n, ...]
def parallel_unroll_recursive_br(A, u):
""" Same as parallel_unroll_recursive but uses bit reversal for locality. """
# Main recursive function
def parallel_unroll_recursive_br_(A, u):
n = u.shape[0]
if n == 1:
return u
m = n//2
u_0 = u[:m, ...]
u_1 = u[m:, ...]
u2 = F.linear(u_0, A) + u_1
A2 = A @ A
x_1 = parallel_unroll_recursive_br_(A2, u2)
x_0 = F.linear(shift_up(x_1), A) + u_0
# x = torch.cat((x_0, x_1), dim=0) # is there a way to do this with cat?
x = interleave(x_0, x_1, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
x = parallel_unroll_recursive_br_(A, u)
return x[:n, ...]
def parallel_unroll_iterative(A, u):
""" Bottom-up divide-and-conquer version of unroll, implemented iteratively """
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
# Main recursive loop, flattened
us = [] # stores the u_0 terms in the recursive version
N_ = N
As = [] # stores the A matrices
for l in range(m):
N_ = N_ // 2
As.append(A)
u_0 = u[:N_, ...]
us.append(u_0)
u = F.linear(u_0, A) + u[N_:, ...]
A = A @ A
x_0 = []
x = u # x_1
for l in range(m-1, -1, -1):
x_0 = F.linear(shift_up(x), As[l]) + us[l]
x = interleave(x_0, x, dim=0)
return x[:n, ...]
def variable_unroll_sequential(A, u, s=None, variable=True):
""" Unroll with variable (in time/length) transitions A.
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] @ s + A[i..1] @ u[0] + ... + A[i] @ u[i-1] + u[i]
"""
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
has_batch = len(u.shape) >= len(A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
# s = F.linear(s, A_) + u_
s = batch_mult(A_.unsqueeze(0), s.unsqueeze(0), has_batch)[0]
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll(A, u, s=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll. """
if u.shape[0] <= recurse_limit:
return variable_unroll_sequential(A, u, s, variable)
if s is None:
s = torch.zeros_like(u[0])
uneven = u.shape[0] % 2 == 1
has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = A_1 @ A_0_
# Recursive call
x_1 = variable_unroll(A_10, u_10, s, variable, recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_general_sequential(A, u, s, op, variable=True):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
if not variable:
A = A.expand((u.shape[0],) + A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
s = op(A_, s)
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll_matrix_sequential(A, u, s=None, variable=True):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
return variable_unroll_general_sequential(A, u, s, op, variable=True)
def variable_unroll_toeplitz_sequential(A, u, s=None, variable=True, pad=False):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
ret = variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply_padded, variable=True)
ret = ret[..., :n]
return ret
return variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply, variable=True)
### General parallel scan functions with generic binary composition operators
def variable_unroll_general(A, u, s, op, compose_op=None, sequential_op=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll.
compose is an optional function that defines how to compose A without multiplying by a leaf u
"""
if u.shape[0] <= recurse_limit:
if sequential_op is None:
sequential_op = op
return variable_unroll_general_sequential(A, u, s, sequential_op, variable)
if compose_op is None:
compose_op = op
uneven = u.shape[0] % 2 == 1
# has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = op(A_1, u_0_) # batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = compose_op(A_1, A_0_)
# Recursive call
x_1 = variable_unroll_general(A_10, u_10, s, op, compose_op, sequential_op, variable=variable, recurse_limit=recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = op(A_0, x_0) # batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_matrix(A, u, s=None, variable=True, recurse_limit=16):
if s is None:
s = torch.zeros_like(u[0])
has_batch = len(u.shape) >= len(A.shape)
op = lambda x, y: batch_mult(x, y, has_batch)
sequential_op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
matmul = lambda x, y: x @ y
return variable_unroll_general(A, u, s, op, compose_op=matmul, sequential_op=sequential_op, variable=variable, recurse_limit=recurse_limit)
def variable_unroll_toeplitz(A, u, s=None, variable=True, recurse_limit=8, pad=False):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (L, [B], ..., N) same shape as u
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
# Add the batch dimension to A if necessary
A_batch_dims = len(A.shape) - int(variable)
u_batch_dims = len(u.shape)-1
if u_batch_dims > A_batch_dims:
# assert u_batch_dims == A_batch_dims + 1
if variable:
while len(A.shape) < len(u.shape):
A = A.unsqueeze(1)
# else:
# A = A.unsqueeze(0)
if s is None:
s = torch.zeros_like(u[0])
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
op = triangular_toeplitz_multiply_padded
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
ret = ret[..., :n]
return ret
op = triangular_toeplitz_multiply
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
return ret
| hyena-dna-main | src/ops/unroll.py |
""" Compute a Krylov function efficiently. (S4 renames the Krylov function to a "state space kernel")
A : (N, N)
b : (N,)
c : (N,)
Return: [c^T A^i b for i in [L]]
"""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from src.ops.toeplitz import causal_convolution
def krylov_sequential(L, A, b, c=None):
""" Constant matrix A
A : (..., N, N)
b : (..., N)
c : (..., N)
Returns
if c:
x : (..., L)
x[i, l] = c[i] @ A^l @ b[i]
else:
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
# Check which of dim b and c is smaller to save memory
if c is not None and c.numel() < b.numel():
return krylov_sequential(L, A.transpose(-1, -2), c, b)
b_ = b
x = []
for _ in range(L):
if c is not None:
x_ = torch.sum(c*b_, dim=-1) # (...) # could be faster with matmul or einsum?
else:
x_ = b_
x.append(x_)
b_ = (A @ b_.unsqueeze(-1)).squeeze(-1)
x = torch.stack(x, dim=-1)
return x
def krylov(L, A, b, c=None, return_power=False):
"""
Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
_x = A_ @ _x
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
assert x.shape[-1] == L
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
@torch.no_grad()
def power(L, A, v=None):
""" Compute A^L and the scan sum_i A^i v_i
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
if v is None:
powers = [powers[-1] @ powers[-1]]
else:
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
def krylov_toeplitz(L, A, b, c=None):
""" Specializes to lower triangular Toeplitz matrix A represented by its diagonals
A : (..., N)
b : (..., N)
c : (..., N)
Returns
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
x = b.unsqueeze(0) # (1, ..., N)
A_ = A
while x.shape[0] < L:
xx = causal_convolution(A_, x)
x = torch.cat([x, xx], dim=0) # there might be a more efficient way of ordering axes
A_ = causal_convolution(A_, A_)
x = x[:L, ...] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
def krylov_toeplitz_(L, A, b, c=None):
""" Padded version of krylov_toeplitz that saves some fft's
TODO currently not faster than original version, not sure why
"""
N = A.shape[-1]
x = b.unsqueeze(0) # (1, ..., N)
x = F.pad(x, (0, N))
A = F.pad(A, (0, N))
done = L == 1
while not done:
l = x.shape[0]
# Save memory on last iteration
if L - l <= l:
done = True
_x = x[:L-l]
else: _x = x
Af = torch.fft.rfft(A, n=2*N, dim=-1)
xf = torch.fft.rfft(_x, n=2*N, dim=-1)
xf_ = Af * xf
x_ = torch.fft.irfft(xf_, n=2*N, dim=-1)
x_[..., N:] = 0
x = torch.cat([x, x_], dim=0) # there might be a more efficient way of ordering axes
if not done:
A = torch.fft.irfft(Af*Af, n=2*N, dim=-1)
A[..., N:] = 0
x = x[:L, ..., :N] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
| hyena-dna-main | src/ops/krylov.py |
""" Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] *= f
return K
def triangular_toeplitz_multiply_(u, v, sum=None):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = u_f * v_f
if sum is not None:
uv_f = uv_f.sum(dim=sum)
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
def triangular_toeplitz_multiply_padded_(u, v):
""" Same as triangular_toeplitz_multiply but inputs and output assume to be 0-padded already. """
n = u.shape[-1]
assert n % 2 == 0
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n:] = 0
return output
class TriangularToeplitzMult(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
return triangular_toeplitz_multiply_(u, v)
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultFast(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad.flip(-1), (0, n))
g_f = torch.fft.rfft(g_expand, n=2*n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=2*n, dim=-1)[..., :n]
d_v = torch.fft.irfft(gu_f, n=2*n, dim=-1)[..., :n]
d_u = d_u.flip(-1)
d_v = d_v.flip(-1)
return d_u, d_v
class TriangularToeplitzMultPadded(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
output = triangular_toeplitz_multiply_(u, v)
return output
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_padded_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_padded_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultPaddedFast(torch.autograd.Function):
""" Trade off speed (20-25% faster) for more memory (20-25%) """
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n//2:].zero_()
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad[..., :n//2].flip(-1), (0, n//2))
g_f = torch.fft.rfft(g_expand, n=n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=n, dim=-1)
d_v = torch.fft.irfft(gu_f, n=n, dim=-1)
d_u[..., n//2:].zero_()
d_v[..., n//2:].zero_()
d_u[..., :n//2] = d_u[..., :n//2].flip(-1) # TODO
d_v[..., :n//2] = d_v[..., :n//2].flip(-1) # TODO
return d_u, d_v
# triangular_toeplitz_multiply = triangular_toeplitz_multiply_
triangular_toeplitz_multiply = TriangularToeplitzMult.apply
triangular_toeplitz_multiply_fast = TriangularToeplitzMultFast.apply
triangular_toeplitz_multiply_padded = TriangularToeplitzMultPadded.apply
triangular_toeplitz_multiply_padded_fast = TriangularToeplitzMultPaddedFast.apply
def causal_convolution(u, v, fast=True, pad=False):
if not pad and not fast:
return triangular_toeplitz_multiply(u, v)
if not pad and fast:
return triangular_toeplitz_multiply_fast(u, v)
if pad and not fast:
return triangular_toeplitz_multiply_padded(u, v)
if pad and fast:
return triangular_toeplitz_multiply_padded_fast(u, v)
| hyena-dna-main | src/ops/toeplitz.py |
from setuptools import setup, find_packages
setup(
name = 'graph-transformer-pytorch',
packages = find_packages(),
version = '0.1.1',
license='MIT',
description = 'Graph Transformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/graph-transformer-pytorch',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'graphs'
],
install_requires=[
'einops>=0.3',
'rotary-embedding-torch',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| graph-transformer-pytorch-main | setup.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from rotary_embedding_torch import RotaryEmbedding, apply_rotary_emb
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
List = nn.ModuleList
# normalizations
class PreNorm(nn.Module):
def __init__(
self,
dim,
fn
):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args,**kwargs)
# gated residual
class Residual(nn.Module):
def forward(self, x, res):
return x + res
class GatedResidual(nn.Module):
def __init__(self, dim):
super().__init__()
self.proj = nn.Sequential(
nn.Linear(dim * 3, 1, bias = False),
nn.Sigmoid()
)
def forward(self, x, res):
gate_input = torch.cat((x, res, x - res), dim = -1)
gate = self.proj(gate_input)
return x * gate + res * (1 - gate)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
pos_emb = None,
dim_head = 64,
heads = 8,
edge_dim = None
):
super().__init__()
edge_dim = default(edge_dim, dim)
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.pos_emb = pos_emb
self.to_q = nn.Linear(dim, inner_dim)
self.to_kv = nn.Linear(dim, inner_dim * 2)
self.edges_to_kv = nn.Linear(edge_dim, inner_dim)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, nodes, edges, mask = None):
h = self.heads
q = self.to_q(nodes)
k, v = self.to_kv(nodes).chunk(2, dim = -1)
e_kv = self.edges_to_kv(edges)
q, k, v, e_kv = map(lambda t: rearrange(t, 'b ... (h d) -> (b h) ... d', h = h), (q, k, v, e_kv))
if exists(self.pos_emb):
freqs = self.pos_emb(torch.arange(nodes.shape[1], device = nodes.device))
freqs = rearrange(freqs, 'n d -> () n d')
q = apply_rotary_emb(freqs, q)
k = apply_rotary_emb(freqs, k)
ek, ev = e_kv, e_kv
k, v = map(lambda t: rearrange(t, 'b j d -> b () j d '), (k, v))
k = k + ek
v = v + ev
sim = einsum('b i d, b i j d -> b i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b i -> b i ()') & rearrange(mask, 'b j -> b () j')
mask = repeat(mask, 'b i j -> (b h) i j', h = h)
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(~mask, max_neg_value)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b i j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
return self.to_out(out)
# optional feedforward
def FeedForward(dim, ff_mult = 4):
return nn.Sequential(
nn.Linear(dim, dim * ff_mult),
nn.GELU(),
nn.Linear(dim * ff_mult, dim)
)
# classes
class GraphTransformer(nn.Module):
def __init__(
self,
dim,
depth,
dim_head = 64,
edge_dim = None,
heads = 8,
gated_residual = True,
with_feedforwards = False,
norm_edges = False,
rel_pos_emb = False,
accept_adjacency_matrix = False
):
super().__init__()
self.layers = List([])
edge_dim = default(edge_dim, dim)
self.norm_edges = nn.LayerNorm(edge_dim) if norm_edges else nn.Identity()
self.adj_emb = nn.Embedding(2, edge_dim) if accept_adjacency_matrix else None
pos_emb = RotaryEmbedding(dim_head) if rel_pos_emb else None
for _ in range(depth):
self.layers.append(List([
List([
PreNorm(dim, Attention(dim, pos_emb = pos_emb, edge_dim = edge_dim, dim_head = dim_head, heads = heads)),
GatedResidual(dim)
]),
List([
PreNorm(dim, FeedForward(dim)),
GatedResidual(dim)
]) if with_feedforwards else None
]))
def forward(
self,
nodes,
edges = None,
adj_mat = None,
mask = None
):
batch, seq, _ = nodes.shape
if exists(edges):
edges = self.norm_edges(edges)
if exists(adj_mat):
assert adj_mat.shape == (batch, seq, seq)
assert exists(self.adj_emb), 'accept_adjacency_matrix must be set to True'
adj_mat = self.adj_emb(adj_mat.long())
all_edges = default(edges, 0) + default(adj_mat, 0)
for attn_block, ff_block in self.layers:
attn, attn_residual = attn_block
nodes = attn_residual(attn(nodes, all_edges, mask = mask), nodes)
if exists(ff_block):
ff, ff_residual = ff_block
nodes = ff_residual(ff(nodes), nodes)
return nodes, edges
| graph-transformer-pytorch-main | graph_transformer_pytorch/graph_transformer_pytorch.py |
from graph_transformer_pytorch.graph_transformer_pytorch import GraphTransformer
| graph-transformer-pytorch-main | graph_transformer_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'axial_positional_embedding',
packages = find_packages(),
version = '0.2.1',
license='MIT',
description = 'Axial Positional Embedding',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/axial-positional-embedding',
keywords = ['transformers', 'artificial intelligence'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | axial-positional-embedding-master | setup.py |
import torch
from torch import nn
from operator import mul
from functools import reduce
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
self.dim = dim
self.shape = axial_shape
self.max_seq_len = reduce(mul, axial_shape, 1)
self.summed = axial_dims is None
axial_dims = ((dim,) * len(axial_shape)) if self.summed else axial_dims
assert len(self.shape) == len(axial_dims), 'number of axial dimensions must equal the number of dimensions in the shape'
assert self.summed or not self.summed and sum(axial_dims) == dim, f'axial dimensions must sum up to the target dimension {dim}'
self.weights = ParameterList(self, 'weights', len(axial_shape))
for ind, (shape, axial_dim) in enumerate(zip(self.shape, axial_dims)):
ax_shape = [1] * len(self.shape)
ax_shape[ind] = shape
ax_shape = (1, *ax_shape, axial_dim)
ax_emb = nn.Parameter(torch.zeros(ax_shape).normal_(0, 1))
self.weights.append(ax_emb)
def forward(self, x):
b, t, e = x.shape
assert (t <= self.max_seq_len), f'Sequence length ({t}) must be less than the maximum sequence length allowed ({self.max_seq_len})'
embs = []
for ax_emb in self.weights.to_list():
axial_dim = ax_emb.shape[-1]
expand_shape = (b, *self.shape, axial_dim)
emb = ax_emb.expand(expand_shape).reshape(b, self.max_seq_len, axial_dim)
embs.append(emb)
pos_emb = sum(embs) if self.summed else torch.cat(embs, dim=-1)
return pos_emb[:, :t].to(x)
# a mock parameter list object until below issue is resolved
# https://github.com/pytorch/pytorch/issues/36035
class ParameterList(object):
def __init__(self, kls, prefix, length):
self.ind = 0
self.kls = kls
self.prefix = prefix
self.length = length
def _keyname(self, prefix, ind):
return f'{prefix}_{ind}'
def append(self, x):
setattr(self.kls, self._keyname(self.prefix, self.ind), x)
self.ind += 1
def to_list(self):
return [getattr(self.kls, self._keyname(self.prefix, i)) for i in range(self.length)]
# Axial Positional Embedding for Images
class AxialPositionalEmbeddingImage(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
assert len(axial_shape) == 2, 'Axial shape must have 2 dimensions for images'
self.pos_emb = AxialPositionalEmbedding(dim, axial_shape, axial_dims)
def forward(self, img):
b, c, h, w = img.shape
img = img.permute(0, 2, 3, 1).reshape(b, h * w, c)
pos_emb = self.pos_emb(img)
return pos_emb.reshape(b, h, w, c).permute(0, 3, 1, 2)
| axial-positional-embedding-master | axial_positional_embedding/axial_positional_embedding.py |
from axial_positional_embedding.axial_positional_embedding import AxialPositionalEmbedding, AxialPositionalEmbeddingImage
| axial-positional-embedding-master | axial_positional_embedding/__init__.py |
import copy
import json
import math
import re
import collections
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % cfg.n_head == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.resid_dropout = nn.Dropout(cfg.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.n_embd
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x):
a = self.attn(x)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, vocab=40990, n_ctx=512):
super(TransformerModel, self).__init__()
self.vocab = vocab
self.embed = nn.Embedding(vocab, cfg.n_embd)
self.drop = nn.Dropout(cfg.embd_pdrop)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x):
x = x.view(-1, x.size(-2), x.size(-1))
e = self.embed(x)
# Add the position information to the input embeddings
h = e.sum(dim=2)
for block in self.h:
h = block(h)
return h
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg):
super(LMHead, self).__init__()
self.n_embd = cfg.n_embd
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
def forward(self, h):
# Truncated Language modeling logits (we remove the last token)
h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(h_trunc)
return lm_logits
class MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, clf_token, cfg):
super(MultipleChoiceHead, self).__init__()
self.n_embd = cfg.n_embd
self.clf_token = clf_token
self.dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(cfg.n_embd, 1)
nn.init.normal_(self.linear.weight, std = 0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, h, x):
# Classification logits
clf_h = h.view(-1, self.n_embd)
flat = x[..., 0].contiguous().view(-1)
clf_h = clf_h[flat == self.clf_token, :]
clf_h = clf_h.view(-1, x.size(1), self.n_embd, 1)
# This double transposition is there to replicate the behavior
# of the noise_shape argument in the tensorflow
# implementation. For more details, see
# https://github.com/huggingface/pytorch-openai-transformer-lm/issues/11
clf_h = self.dropout(clf_h.transpose(1, 2)).transpose(1, 2)
clf_h = clf_h.contiguous().view(-1, self.n_embd)
clf_logits = self.linear(clf_h)
return clf_logits.view(-1, x.size(1))
class ClfHead(nn.Module):
"""Classification Head for the transformer
TODO: test this class."""
def __init__(self, clf_token, cfg, n_class):
super(ClfHead, self).__init__()
self.n_embd = cfg.n_embd
self.clf_token = clf_token
self.dropout = nn.Dropout(cfg.clf_pdrop)
self.linear = nn.Linear(cfg.n_embd, n_class)
nn.init.normal_(self.linear.weight, std = 0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, h, x):
clf_h = h.view(-1, self.n_embd)
flat = x[..., 0].contiguous().view(-1)
clf_h = clf_h[flat == self.clf_token, :]
clf_h = self.dropout(clf_h)
clf_logits = self.linear(clf_h)
return clf_logits
class SimilarityHead(nn.Module):
""" Similarity Head for the transformer
TODO: test this class."""
def __init__(self, clf_token, cfg):
super(SimilarityHead, self).__init__()
self.n_embd = cfg.n_embd
self.clf_token = clf_token
self.dropout = nn.Dropout(cfg.clf_pdrop)
self.linear = nn.Linear(cfg.n_embd, 1)
nn.init.normal_(self.linear.weight, std = 0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, h, x):
sim_h = h.view(-1, self.n_embd)
flat = x[..., 0].contiguous().view(-1)
sim_h = sim_h[flat == self.clf_token, :]
sim_h = self.dropout(sim_h)
sim_h = sim_h.sum(dim = 1)
sim_logits = self.linear(sim_h)
return sim_logits
class DoubleHeadModel(nn.Module):
""" Transformer with language model and task specific heads """
def __init__(self, cfg, clf_token, task_head_type, vocab=40990, n_ctx=512):
super(DoubleHeadModel, self).__init__()
self.transformer = TransformerModel(cfg, vocab=vocab, n_ctx=n_ctx)
self.lm_head = LMHead(self.transformer, cfg)
if isinstance(task_head_type, str):
if task_head_type == 'multiple_choice':
self.task_head = MultipleChoiceHead(clf_token, cfg)
elif task_head_type == 'similarity':
self.task_head = SimilarityHead(clf_token, cfg)
elif task_head_type == 'inference':
# the three classes correspond to entailment, contradiction and neutral.
self.task_head = ClfHead(clf_token, cfg, 3)
else:
raise ValueError("task_head_type is expected to be 'multiple_choice' "
"'similarity', 'inference' or ('classification', n_class) "
f"got {task_head_type}.")
elif isinstance(task_head_type, collections.abc.Sequence) and len(task_head_type) == 2 and \
task_head_type[0] == 'classification':
n_class = task_head_type[1]
self.task_head = ClfHead(clf_token, cfg, n_class)
else:
raise ValueError("task_head_type is expected to be 'multiple_choice' "
"'similarity', 'inference' or ('classification', n_class) "
f"got {task_head_type}.")
def forward(self, x):
h = self.transformer(x)
lm_logits = self.lm_head(h)
task_logits = self.task_head(h, x)
return lm_logits, task_logits
def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n_embd=768, path='./model/',
path_names='./'):
# Load weights from TF model
print("Loading weights...")
names = json.load(open(path_names + 'parameters_names.json'))
shapes = json.load(open(path + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
DEFAULT_CONFIG = dotdict({
'n_embd': 768,
'n_head': 12,
'n_layer': 12,
'embd_pdrop': 0.1,
'attn_pdrop': 0.1,
'resid_pdrop': 0.1,
'afn': 'gelu',
'clf_pdrop': 0.1})
| tweet-stance-prediction-master | transformer-openai/model_pytorch.py |
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from datasets import stance
from model_pytorch import DoubleHeadModel, load_openai_pretrained_model
from opt import OpenAIAdam
from text_utils import TextEncoder
from utils import (encode_dataset, iter_data,
ResultLogger, make_path)
from loss import ClassificationLossCompute, MultipleChoiceLossCompute
def transform_stance(X1):
n_batch = len(X1)
xmb = np.zeros((n_batch, 1, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, 1, n_ctx), dtype=np.float32)
start = encoder['_start_']
for i, x1 in enumerate(X1):
x12 = [start] + x1[:max_len] + [clf_token]
l12 = len(x12)
xmb[i, 0, :l12, 0] = x12
mmb[i, 0, :l12] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def iter_apply(Xs, Ms, Ys):
# fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
logits = []
cost = 0
with torch.no_grad():
dh_model.eval()
for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(ymb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
clf_logits *= n
clf_losses = compute_loss_fct(XMB, YMB, MMB, clf_logits, only_return_losses=True)
clf_losses *= n
logits.append(clf_logits.to("cpu").numpy())
cost += clf_losses.sum().item()
logits = np.concatenate(logits, 0)
return logits, cost
def iter_predict(Xs, Ms):
logits = []
with torch.no_grad():
dh_model.eval()
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
return logits
def log(save_dir, desc):
global best_score
print("Logging")
tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid])
va_logits, va_cost = iter_apply(vaX, vaM, vaY)
tr_cost = tr_cost / len(trY[:n_valid])
va_cost = va_cost / n_valid
tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1)) * 100.
va_acc = accuracy_score(vaY, np.argmax(va_logits, 1)) * 100.
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
print('%d %d %.3f %.3f %.2f %.2f' % (n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
if submit:
score = va_acc
if score > best_score:
best_score = score
path = os.path.join(save_dir, desc, 'best_params')
torch.save(dh_model.state_dict(), make_path(path))
def predict(dataset, submission_dir):
filename = filenames[dataset]
pred_fn = pred_fns[dataset]
label_decoder = label_decoders[dataset]
predictions = pred_fn(iter_predict(teX, teM))
if label_decoder is not None:
predictions = [label_decoder[prediction] for prediction in predictions]
path = os.path.join(submission_dir, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('{}\t{}\n'.format('index', 'prediction'))
for i, prediction in enumerate(predictions):
f.write('{}\t{}\n'.format(i, prediction))
def run_epoch():
for xmb, mmb, ymb in iter_data(*shuffle(trX, trM, trYt, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(ymb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
compute_loss_fct(XMB, YMB, MMB, clf_logits, lm_logits)
n_updates += 1
if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0:
log(save_dir, desc)
argmax = lambda x: np.argmax(x, 1)
pred_fns = {
'stance': argmax,
}
filenames = {
'stance': 'stance.tsv',
}
label_decoders = {
'stance': None,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str, help="Description")
parser.add_argument('--dataset', type=str)
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--submit', action='store_true')
parser.add_argument('--analysis', action='store_true')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter', type=int, default=3)
parser.add_argument('--n_batch', type=int, default=8)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=768)
parser.add_argument('--n_head', type=int, default=12)
parser.add_argument('--n_layer', type=int, default=12)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
parser.add_argument('--n_valid', type=int, default=374)
parser.add_argument('--topic', type=str, default=None)
args = parser.parse_args()
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Constants
submit = args.submit
dataset = args.dataset
n_ctx = args.n_ctx
save_dir = args.save_dir
desc = args.desc
data_dir = args.data_dir
log_dir = args.log_dir
submission_dir = args.submission_dir
topic = args.topic
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device", device, "n_gpu", n_gpu)
logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__)
text_encoder = TextEncoder(args.encoder_path, args.bpe_path)
encoder = text_encoder.encoder
n_vocab = len(text_encoder.encoder)
print("Encoding dataset...")
((trX, trY), (vaX, vaY), (teX, )) = encode_dataset(*stance(data_dir, topic=topic),
encoder=text_encoder)
encoder['_start_'] = len(encoder)
encoder['_classify_'] = len(encoder)
clf_token = encoder['_classify_']
n_special = 2
max_len = n_ctx - 2
# Define maximum context as the minimum of [512, x] where x is the max sentence length
n_ctx = min(max(
[len(x[:max_len]) for x in trX]
+ [len(x[:max_len]) for x in vaX]
+ [len(x[:max_len]) for x in teX]
) + 3, n_ctx)
vocab = n_vocab + n_special + n_ctx
trX, trM = transform_stance(trX)
vaX, vaM = transform_stance(vaX)
if submit:
teX, teM = transform_stance(teX)
n_train = len(trY)
n_valid = len(vaY)
n_batch_train = args.n_batch * max(n_gpu, 1)
n_updates_total = (n_train // n_batch_train) * args.n_iter
dh_model = DoubleHeadModel(args, clf_token, ('classification', 3), vocab, n_ctx)
criterion = nn.CrossEntropyLoss(reduce=False)
model_opt = OpenAIAdam(dh_model.parameters(),
lr=args.lr,
schedule=args.lr_schedule,
warmup=args.lr_warmup,
t_total=n_updates_total,
b1=args.b1,
b2=args.b2,
e=args.e,
l2=args.l2,
vector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
compute_loss_fct = MultipleChoiceLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special)
dh_model.to(device)
dh_model = nn.DataParallel(dh_model)
n_updates = 0
n_epochs = 0
if dataset != 'stsb':
trYt = trY
if submit:
path = os.path.join(save_dir, desc, 'best_params')
torch.save(dh_model.state_dict(), make_path(path))
best_score = 0
for i in range(args.n_iter):
print("running epoch", i)
run_epoch()
n_epochs += 1
log(save_dir, desc)
if submit:
path = os.path.join(save_dir, desc, 'best_params')
dh_model.load_state_dict(torch.load(path))
predict(dataset, args.submission_dir)
| tweet-stance-prediction-master | transformer-openai/train_stance.py |
import os
import json
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from datasets import _rocstories
def rocstories(data_dir, pred_path, log_path):
preds = pd.read_csv(pred_path, delimiter='\t')['prediction'].values.tolist()
_, _, _, labels = _rocstories(os.path.join(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv'))
test_accuracy = accuracy_score(labels, preds)*100.
logs = [json.loads(line) for line in open(log_path)][1:]
best_validation_index = np.argmax([log['va_acc'] for log in logs])
valid_accuracy = logs[best_validation_index]['va_acc']
print('ROCStories Valid Accuracy: %.2f'%(valid_accuracy))
print('ROCStories Test Accuracy: %.2f'%(test_accuracy))
| tweet-stance-prediction-master | transformer-openai/analysis.py |
import re
import ftfy
import json
import spacy
from tqdm import tqdm
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class TextEncoder(object):
"""
mostly a wrapper for a public python bpe tokenizer
"""
def __init__(self, encoder_path, bpe_path):
self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])
self.encoder = json.load(open(encoder_path))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(bpe_path, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def bpe(self, token):
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def encode(self, texts, verbose=True):
texts_tokens = []
if verbose:
for text in tqdm(texts, ncols=80, leave=False):
text = self.nlp(text_standardize(ftfy.fix_text(text)))
text_tokens = []
for token in text:
text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')])
texts_tokens.append(text_tokens)
else:
for text in texts:
text = self.nlp(text_standardize(ftfy.fix_text(text)))
text_tokens = []
for token in text:
text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')])
texts_tokens.append(text_tokens)
return texts_tokens
| tweet-stance-prediction-master | transformer-openai/text_utils.py |
import os
import csv
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
seed = 3535999445
def _rocstories(path):
with open(path, encoding='utf_8') as f:
f = csv.reader(f)
st = []
ct1 = []
ct2 = []
y = []
for i, line in enumerate(tqdm(list(f), ncols=80, leave=False)):
if i > 0:
s = ' '.join(line[1:5])
c1 = line[5]
c2 = line[6]
st.append(s)
ct1.append(c1)
ct2.append(c2)
y.append(int(line[-1])-1)
return st, ct1, ct2, y
def rocstories(data_dir, n_train=1497, n_valid=374):
storys, comps1, comps2, ys = _rocstories(os.path.join(data_dir, 'cloze_test_val__spring2016 - cloze_test_ALL_val.csv'))
teX1, teX2, teX3, _ = _rocstories(os.path.join(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv'))
tr_storys, va_storys, tr_comps1, va_comps1, tr_comps2, va_comps2, tr_ys, va_ys = train_test_split(storys, comps1, comps2, ys, test_size=n_valid, random_state=seed)
trX1, trX2, trX3 = [], [], []
trY = []
for s, c1, c2, y in zip(tr_storys, tr_comps1, tr_comps2, tr_ys):
trX1.append(s)
trX2.append(c1)
trX3.append(c2)
trY.append(y)
vaX1, vaX2, vaX3 = [], [], []
vaY = []
for s, c1, c2, y in zip(va_storys, va_comps1, va_comps2, va_ys):
vaX1.append(s)
vaX2.append(c1)
vaX3.append(c2)
vaY.append(y)
trY = np.asarray(trY, dtype=np.int32)
vaY = np.asarray(vaY, dtype=np.int32)
return (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3)
def _stance(path, topic=None):
def clean_ascii(text):
# function to remove non-ASCII chars from data
return ''.join(i for i in text if ord(i) < 128)
orig = pd.read_csv(path, delimiter='\t', header=0, encoding = "latin-1")
orig['Tweet'] = orig['Tweet'].apply(clean_ascii)
df = orig
# Get only those tweets that pertain to a single topic in the training data
if topic is not None:
df = df.loc[df['Target'] == topic]
X = df.Tweet.values
stances = ["AGAINST", "FAVOR", "NONE", "UNKNOWN"]
class_nums = {s: i for i, s in enumerate(stances)}
Y = np.array([class_nums[s] for s in df.Stance])
return X, Y
def stance(data_dir, topic=None):
path = Path(data_dir)
trainfile = 'semeval2016-task6-trainingdata.txt'
testfile = 'SemEval2016-Task6-subtaskA-testdata.txt'
X, Y = _stance(path/trainfile, topic=topic)
teX, _ = _stance(path/testfile, topic=topic)
tr_text, va_text, tr_sent, va_sent = train_test_split(X, Y, test_size=0.2, random_state=seed)
trX = []
trY = []
for t, s in zip(tr_text, tr_sent):
trX.append(t)
trY.append(s)
vaX = []
vaY = []
for t, s in zip(va_text, va_sent):
vaX.append(t)
vaY.append(s)
trY = np.asarray(trY, dtype=np.int32)
vaY = np.asarray(vaY, dtype=np.int32)
return (trX, trY), (vaX, vaY), (teX, )
if __name__ == "__main__":
## Test
data_dir = "./data"
(trX, trY), (vaX, vaY), teX = stance(data_dir)
print(trX[:5], trY[:5])
print(len(trX))
print(len(teX))
| tweet-stance-prediction-master | transformer-openai/datasets.py |
import pandas as pd
import sys
def output_predictions(test_path, pred_path, out_path, topic):
test = pd.read_csv(test_path, delimiter='\t', header=0, encoding = "latin-1")
if topic is not None:
test = test.loc[test["Target"] == topic].reset_index()
def clean_ascii(text):
# function to remove non-ASCII chars from data
return ''.join(i for i in text if ord(i) < 128)
test['Tweet'] = test['Tweet'].apply(clean_ascii)
#print(test)
pred = pd.read_csv(pred_path, header=0, delimiter='\t')
#print(pred)
pred['prediction'] = pred['prediction'].astype('int64')
df = test.join(pred)
#print(df)
stances = ["AGAINST", "FAVOR", "NONE", "UNKNOWN"]
df["Stance"] = df["prediction"].apply(lambda i: stances[i])
df = df[["index", "Target", "Tweet", "Stance"]]
class_nums = {s: i for i, s in enumerate(stances)}
df.to_csv(out_path, sep='\t', index=False, header=['ID', 'Target', 'Tweet', 'Stance'])
if __name__ == "__main__":
test_path, pred_path, out_path = sys.argv[1:4]
topic = None
if len(sys.argv) > 4:
topic = sys.argv[4]
output_predictions(test_path, pred_path, out_path, topic)
| tweet-stance-prediction-master | transformer-openai/parse_output.py |
import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*(0.5 * (1 + torch.cos(math.pi * x)))
def warmup_constant(x, warmup=0.002):
s = 1 if x <= warmup else 0
return s*(x/warmup) + (1-s)*1
def warmup_linear(x, warmup=0.002):
s = 1 if x <= warmup else 0
return (s*(x/warmup) + (1-s))*(1-x)
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr, schedule, warmup, t_total,
b1=0.9, b2=0.999, e=1e-8, l2=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0 <= warmup:
raise ValueError("Invalid warmup: {}".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {}".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {}".format(b2))
if not 0.0 <= e:
raise ValueError("Invalid epsilon value: {}".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, l2=l2, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['l2'] > 0:
p.data.add_(-lr_scheduled * group['l2'], p.data)
return loss
| tweet-stance-prediction-master | transformer-openai/opt.py |
import torch
class MultipleChoiceLossCompute:
"A Loss compute and train function for multiple choice tasks."
def __init__(self, lm_criterion, clf_criterion, lm_coef, opt=None):
self.lm_criterion = lm_criterion
self.clf_criterion = clf_criterion
self.lm_coef = lm_coef
self.opt = opt
def __call__(self, X, Y, M, clf_logits, lm_logits=None, only_return_losses=False):
# Language modeling loss
if lm_logits is not None:
x_shifted = X[:, :, 1:, 0].contiguous().view(-1) # Shape: 252
M = M.view(-1, M.size(2))
lm_losses = self.lm_criterion(lm_logits, x_shifted)
lm_losses = lm_losses.view(X.size(0) * X.size(1), X.size(2) - 1)
lm_losses = lm_losses * M[:, 1:]
lm_losses = lm_losses.sum(1) / torch.sum(M[:, 1:], 1)
# Classification loss
clf_losses = self.clf_criterion(clf_logits, Y)
if only_return_losses:
return (clf_losses, lm_losses) if lm_logits is not None else clf_losses
if self.lm_coef > 0 and lm_logits is not None:
train_loss = clf_losses.sum() + self.lm_coef * lm_losses.sum()
else:
train_loss = clf_losses.sum()
train_loss.backward()
if self.opt is not None:
self.opt.step()
self.opt.zero_grad()
return train_loss.item()
class ClassificationLossCompute:
"A Loss compute and train function for classification tasks."
def __init__(self, lm_criterion, clf_criterion, lm_coef, opt=None):
self.lm_criterion = lm_criterion
self.clf_criterion = clf_criterion
self.lm_coef = lm_coef
self.opt = opt
def __call__(self, X, Y, M, clf_logits, lm_logits=None, only_return_losses=False):
# Language modeling loss
if lm_logits is not None:
x_shifted = X[:, 1:, 0].contiguous().view(-1)
M = M.view(-1, M.size(-1))
lm_losses = self.lm_criterion(lm_logits, x_shifted)
lm_losses = lm_losses.view(X.size(0), X.size(-2) - 1)
lm_losses = lm_losses * M[:, 1:]
lm_losses = lm_losses.sum(1) / torch.sum(M[:, 1:], 1)
# Classification loss
clf_losses = self.clf_criterion(clf_logits, Y)
if only_return_losses:
return (clf_losses, lm_losses) if lm_logits is not None else clf_losses
if self.lm_coef > 0 and lm_logits is not None:
train_loss = clf_losses.sum() + self.lm_coef * lm_losses.sum()
else:
train_loss = clf_losses.sum()
train_loss.backward()
if self.opt is not None:
self.opt.step()
self.opt.zero_grad()
return train_loss.item()
# TODO Implement a LossCompute class for similiraty tasks.
| tweet-stance-prediction-master | transformer-openai/loss.py |
import os
import sys
import json
import time
from functools import partial
import numpy as np
# import tensorflow as tf
# from tensorflow.python.framework import function
from tqdm import tqdm
def encode_dataset(*splits, encoder):
encoded_splits = []
for split in splits:
fields = []
for field in split:
if isinstance(field[0], str):
field = encoder.encode(field)
fields.append(field)
encoded_splits.append(fields)
return encoded_splits
def stsb_label_encoding(labels, nclass=6):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype(np.float32)
for j, y in enumerate(labels):
for i in range(nclass):
if i == np.floor(y) + 1:
Y[j,i] = y - np.floor(y)
if i == np.floor(y):
Y[j,i] = np.floor(y) - y + 1
return Y
def np_softmax(x, t=1):
x = x/t
x = x - np.max(x, axis=-1, keepdims=True)
ex = np.exp(x)
return ex/np.sum(ex, axis=-1, keepdims=True)
def make_path(f):
d = os.path.dirname(f)
if d and not os.path.exists(d):
os.makedirs(d)
return f
def _identity_init(shape, dtype, partition_info, scale):
n = shape[-1]
w = np.eye(n)*scale
if len([s for s in shape if s != 1]) == 2:
w = w.reshape(shape)
return w.astype(np.float32)
def identity_init(scale=1.0):
return partial(_identity_init, scale=scale)
def _np_init(shape, dtype, partition_info, w):
return w
def np_init(w):
return partial(_np_init, w=w)
class ResultLogger(object):
def __init__(self, path, *args, **kwargs):
if 'time' not in kwargs:
kwargs['time'] = time.time()
self.f_log = open(make_path(path), 'w')
self.f_log.write(json.dumps(kwargs)+'\n')
def log(self, **kwargs):
if 'time' not in kwargs:
kwargs['time'] = time.time()
self.f_log.write(json.dumps(kwargs)+'\n')
self.f_log.flush()
def close(self):
self.f_log.close()
def flatten(outer):
return [el for inner in outer for el in inner]
def remove_none(l):
return [e for e in l if e is not None]
def iter_data(*datas, n_batch=128, truncate=False, verbose=False, max_batches=float("inf")):
n = len(datas[0])
if truncate:
n = (n//n_batch)*n_batch
n = min(n, max_batches*n_batch)
n_batches = 0
if verbose:
f = sys.stderr
else:
f = open(os.devnull, 'w')
for i in tqdm(range(0, n, n_batch), total=n//n_batch, file=f, ncols=80, leave=False):
if n_batches >= max_batches: raise StopIteration
if len(datas) == 1:
yield datas[0][i:i+n_batch]
else:
yield (d[i:i+n_batch] for d in datas)
n_batches += 1
| tweet-stance-prediction-master | transformer-openai/utils.py |
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from analysis import rocstories as rocstories_analysis
from datasets import rocstories
from model_pytorch import DoubleHeadModel, load_openai_pretrained_model
from opt import OpenAIAdam
from text_utils import TextEncoder
from utils import (encode_dataset, iter_data,
ResultLogger, make_path)
from loss import MultipleChoiceLossCompute
def transform_roc(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [clf_token]
x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
l12 = len(x12)
l13 = len(x13)
xmb[i, 0, :l12, 0] = x12
xmb[i, 1, :l13, 0] = x13
mmb[i, 0, :l12] = 1
mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def iter_apply(Xs, Ms, Ys):
# fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
logits = []
cost = 0
with torch.no_grad():
dh_model.eval()
for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(ymb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
clf_logits *= n
clf_losses = compute_loss_fct(XMB, YMB, MMB, clf_logits, only_return_losses=True)
clf_losses *= n
logits.append(clf_logits.to("cpu").numpy())
cost += clf_losses.sum().item()
logits = np.concatenate(logits, 0)
return logits, cost
def iter_predict(Xs, Ms):
logits = []
with torch.no_grad():
dh_model.eval()
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
return logits
def log(save_dir, desc):
global best_score
print("Logging")
tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid])
va_logits, va_cost = iter_apply(vaX, vaM, vaY)
tr_cost = tr_cost / len(trY[:n_valid])
va_cost = va_cost / n_valid
tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1)) * 100.
va_acc = accuracy_score(vaY, np.argmax(va_logits, 1)) * 100.
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
print('%d %d %.3f %.3f %.2f %.2f' % (n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
if submit:
score = va_acc
if score > best_score:
best_score = score
path = os.path.join(save_dir, desc, 'best_params')
torch.save(dh_model.state_dict(), make_path(path))
def predict(dataset, submission_dir):
filename = filenames[dataset]
pred_fn = pred_fns[dataset]
label_decoder = label_decoders[dataset]
predictions = pred_fn(iter_predict(teX, teM))
if label_decoder is not None:
predictions = [label_decoder[prediction] for prediction in predictions]
path = os.path.join(submission_dir, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('{}\t{}\n'.format('index', 'prediction'))
for i, prediction in enumerate(predictions):
f.write('{}\t{}\n'.format(i, prediction))
def run_epoch():
for xmb, mmb, ymb in iter_data(*shuffle(trX, trM, trYt, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(ymb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
compute_loss_fct(XMB, YMB, MMB, clf_logits, lm_logits)
n_updates += 1
if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0:
log(save_dir, desc)
argmax = lambda x: np.argmax(x, 1)
pred_fns = {
'rocstories': argmax,
}
filenames = {
'rocstories': 'ROCStories.tsv',
}
label_decoders = {
'rocstories': None,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str, help="Description")
parser.add_argument('--dataset', type=str)
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--submit', action='store_true')
parser.add_argument('--analysis', action='store_true')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter', type=int, default=3)
parser.add_argument('--n_batch', type=int, default=8)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=768)
parser.add_argument('--n_head', type=int, default=12)
parser.add_argument('--n_layer', type=int, default=12)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
parser.add_argument('--n_valid', type=int, default=374)
args = parser.parse_args()
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Constants
submit = args.submit
dataset = args.dataset
n_ctx = args.n_ctx
save_dir = args.save_dir
desc = args.desc
data_dir = args.data_dir
log_dir = args.log_dir
submission_dir = args.submission_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device", device, "n_gpu", n_gpu)
logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__)
text_encoder = TextEncoder(args.encoder_path, args.bpe_path)
encoder = text_encoder.encoder
n_vocab = len(text_encoder.encoder)
print("Encoding dataset...")
((trX1, trX2, trX3, trY),
(vaX1, vaX2, vaX3, vaY),
(teX1, teX2, teX3)) = encode_dataset(*rocstories(data_dir, n_valid=args.n_valid),
encoder=text_encoder)
encoder['_start_'] = len(encoder)
encoder['_delimiter_'] = len(encoder)
encoder['_classify_'] = len(encoder)
clf_token = encoder['_classify_']
n_special = 3
max_len = n_ctx // 2 - 2
n_ctx = min(max(
[len(x1[:max_len]) + max(len(x2[:max_len]),
len(x3[:max_len])) for x1, x2, x3 in zip(trX1, trX2, trX3)]
+ [len(x1[:max_len]) + max(len(x2[:max_len]),
len(x3[:max_len])) for x1, x2, x3 in zip(vaX1, vaX2, vaX3)]
+ [len(x1[:max_len]) + max(len(x2[:max_len]),
len(x3[:max_len])) for x1, x2, x3 in zip(teX1, teX2, teX3)]
) + 3, n_ctx)
vocab = n_vocab + n_special + n_ctx
trX, trM = transform_roc(trX1, trX2, trX3)
vaX, vaM = transform_roc(vaX1, vaX2, vaX3)
if submit:
teX, teM = transform_roc(teX1, teX2, teX3)
n_train = len(trY)
n_valid = len(vaY)
n_batch_train = args.n_batch * max(n_gpu, 1)
n_updates_total = (n_train // n_batch_train) * args.n_iter
dh_model = DoubleHeadModel(args, clf_token, 'multiple_choice', vocab, n_ctx)
criterion = nn.CrossEntropyLoss(reduce=False)
model_opt = OpenAIAdam(dh_model.parameters(),
lr=args.lr,
schedule=args.lr_schedule,
warmup=args.lr_warmup,
t_total=n_updates_total,
b1=args.b1,
b2=args.b2,
e=args.e,
l2=args.l2,
vector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
compute_loss_fct = MultipleChoiceLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special)
dh_model.to(device)
dh_model = nn.DataParallel(dh_model)
n_updates = 0
n_epochs = 0
if dataset != 'stsb':
trYt = trY
if submit:
path = os.path.join(save_dir, desc, 'best_params')
torch.save(dh_model.state_dict(), make_path(path))
best_score = 0
for i in range(args.n_iter):
print("running epoch", i)
run_epoch()
n_epochs += 1
log(save_dir, desc)
if submit:
path = os.path.join(save_dir, desc, 'best_params')
dh_model.load_state_dict(torch.load(path))
predict(dataset, args.submission_dir)
if args.analysis:
rocstories_analysis(data_dir, os.path.join(args.submission_dir, 'ROCStories.tsv'),
os.path.join(log_dir, 'rocstories.jsonl'))
| tweet-stance-prediction-master | transformer-openai/train.py |
from setuptools import setup, find_packages
setup(
name = 'remixer-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.3',
license='MIT',
description = 'Remixer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/remixer-pytorch',
keywords = [
'artificial intelligence',
'transformer',
'feedforward',
'mlp-mixer'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| remixer-pytorch-main | setup.py |
from remixer_pytorch.remixer_pytorch import RemixerBlock
| remixer-pytorch-main | remixer_pytorch/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
class RemixerBlock(nn.Module):
def __init__(
self,
dim,
seq_len,
causal = False,
bias = False
):
super().__init__()
self.causal = causal
self.proj_in = nn.Linear(dim, 2 * dim, bias = bias)
self.mixer = nn.Parameter(torch.randn(seq_len, seq_len))
self.alpha = nn.Parameter(torch.tensor(0.))
self.proj_out = nn.Linear(dim, dim, bias = bias)
def forward(self, x):
mixer, causal, device = self.mixer, self.causal, x.device
x, gate = self.proj_in(x).chunk(2, dim = -1)
x = F.gelu(gate) * x
if self.causal:
seq = x.shape[1]
mask_value = -torch.finfo(x.dtype).max
mask = torch.ones((seq, seq), device = device, dtype=torch.bool).triu(1)
mixer = mixer[:seq, :seq]
mixer = mixer.masked_fill(mask, mask_value)
mixer = mixer.softmax(dim = -1)
mixed = einsum('b n d, m n -> b m d', x, mixer)
alpha = self.alpha.sigmoid()
out = (x * mixed) * alpha + (x - mixed) * (1 - alpha)
return self.proj_out(out)
| remixer-pytorch-main | remixer_pytorch/remixer_pytorch.py |
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
import torch.nn.functional as F
from einops import rearrange
import sidechainnet as scn
from alphafold2_pytorch import Alphafold2
import alphafold2_pytorch.constants as constants
from alphafold2_pytorch.utils import get_bucketed_distance_matrix
# constants
DEVICE = None # defaults to cuda if available, else cpu
NUM_BATCHES = int(1e5)
GRADIENT_ACCUMULATE_EVERY = 16
LEARNING_RATE = 3e-4
IGNORE_INDEX = -100
THRESHOLD_LENGTH = 250
# set device
DISTOGRAM_BUCKETS = constants.DISTOGRAM_BUCKETS
DEVICE = constants.DEVICE
# helpers
def cycle(loader, cond = lambda x: True):
while True:
for data in loader:
if not cond(data):
continue
yield data
# get data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = 1,
dynamic_batching = False
)
data = iter(data['train'])
data_cond = lambda t: t[1].shape[1] < THRESHOLD_LENGTH
dl = cycle(data, data_cond)
# model
model = Alphafold2(
dim = 256,
depth = 1,
heads = 8,
dim_head = 64
).to(DEVICE)
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training loop
for _ in range(NUM_BATCHES):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seq, coords, mask = batch.seqs, batch.crds, batch.msks
b, l, _ = seq.shape
# prepare mask, labels
seq, coords, mask = seq.argmax(dim = -1).to(DEVICE), coords.to(DEVICE), mask.to(DEVICE).bool()
coords = rearrange(coords, 'b (l c) d -> b l c d', l = l)
discretized_distances = get_bucketed_distance_matrix(coords[:, :, 1], mask, DISTOGRAM_BUCKETS, IGNORE_INDEX)
# predict
distogram = model(seq, mask = mask)
distogram = rearrange(distogram, 'b i j c -> b c i j')
# loss
loss = F.cross_entropy(
distogram,
discretized_distances,
ignore_index = IGNORE_INDEX
)
loss.backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
| alphafold2-main | train_pre.py |
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
import torch.nn.functional as F
from einops import rearrange
# data
import sidechainnet as scn
from sidechainnet.sequence.utils import VOCAB
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES
# models
from alphafold2_pytorch import Alphafold2
import alphafold2_pytorch.constants as constants
from se3_transformer_pytorch import SE3Transformer
from alphafold2_pytorch.utils import *
# constants
FEATURES = "esm" # one of ["esm", "msa", "msa_transformer", None]
DEVICE = None # defaults to cuda if available, else cpu
NUM_BATCHES = int(1e5)
GRADIENT_ACCUMULATE_EVERY = 16
LEARNING_RATE = 3e-4
IGNORE_INDEX = -100
THRESHOLD_LENGTH = 250
TO_PDB = False
SAVE_DIR = ""
# set device
DEVICE = constants.DEVICE
DISTOGRAM_BUCKETS = constants.DISTOGRAM_BUCKETS
# set emebdder model from esm if appropiate - Load ESM-1b model
if FEATURES == "esm":
# from pytorch hub (almost 30gb)
embedd_model, alphabet = torch.hub.load("facebookresearch/esm", "esm1b_t33_650M_UR50S")
batch_converter = alphabet.get_batch_converter()
## alternatively do
# import esm # after installing esm
# model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
batch_converter = alphabet.get_batch_converter()
# helpers
def cycle(loader, cond = lambda x: True):
while True:
for data in loader:
if not cond(data):
continue
yield data
# get data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = 1,
dynamic_batching = False
)
data = iter(data['train'])
data_cond = lambda t: t[1].shape[1] < THRESHOLD_LENGTH
dl = cycle(data, data_cond)
# model
model = Alphafold2(
dim = 256,
depth = 1,
heads = 8,
dim_head = 64,
predict_coords = True,
structure_module_dim = 8,
structure_module_depth = 2,
structure_module_heads = 4,
structure_module_dim_head = 16,
structure_module_refinement_iters = 2
).to(DEVICE)
# optimizer
dispersion_weight = 0.1
criterion = nn.MSELoss()
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training loop
for _ in range(NUM_BATCHES):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seq, coords, mask = batch.seqs, batch.crds, batch.msks
b, l, _ = seq.shape
# prepare data and mask labels
seq, coords, mask = seq.argmax(dim = -1).to(DEVICE), coords.to(DEVICE), mask.to(DEVICE)
# coords = rearrange(coords, 'b (l c) d -> b l c d', l = l) # no need to rearrange for now
# mask the atoms and backbone positions for each residue
# sequence embedding (msa / esm / attn / or nothing)
msa, embedds = None
# get embedds
if FEATURES == "esm":
embedds = get_esm_embedd(seq, embedd_model, batch_converter)
# get msa here
elif FEATURES == "msa":
pass
# no embeddings
else:
pass
# predict - out is (batch, L * 3, 3)
refined = model(
seq,
msa = msa,
embedds = embedds,
mask = mask
)
# build SC container. set SC points to CA and optionally place carbonyl O
proto_sidechain = sidechain_container(coords_3d, n_aa=batch,
cloud_mask=cloud_mask, place_oxygen=False)
# rotate / align
coords_aligned, labels_aligned = Kabsch(refined, coords[flat_cloud_mask])
# atom mask
cloud_mask = scn_cloud_mask(seq, boolean = False)
flat_cloud_mask = rearrange(cloud_mask, 'b l c -> b (l c)')
# chain_mask is all atoms that will be backpropped thru -> existing + trainable
chain_mask = (mask * cloud_mask)[cloud_mask]
flat_chain_mask = rearrange(chain_mask, 'b l c -> b (l c)')
# save pdb files for visualization
if TO_PDB:
# idx from batch to save prot and label
idx = 0
coords2pdb(seq[idx, :, 0], coords_aligned[idx], cloud_mask, prefix=SAVE_DIR, name="pred.pdb")
coords2pdb(seq[idx, :, 0], labels_aligned[idx], cloud_mask, prefix=SAVE_DIR, name="label.pdb")
# loss - RMSE + distogram_dispersion
loss = torch.sqrt(criterion(coords_aligned[flat_chain_mask], labels_aligned[flat_chain_mask])) + \
dispersion_weight * torch.norm( (1/weights)-1 )
loss.backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
| alphafold2-main | train_end2end.py |
from setuptools import setup, find_packages
setup(
name = 'alphafold2-pytorch',
packages = find_packages(),
version = '0.4.32',
license='MIT',
description = 'AlphaFold2 - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang, Eric Alcaide',
author_email = '[email protected], [email protected]',
url = 'https://github.com/lucidrains/alphafold2',
keywords = [
'artificial intelligence',
'attention mechanism',
'protein folding'
],
install_requires=[
'einops>=0.3',
'En-transformer>=0.2.3',
'invariant-point-attention',
'mdtraj>=1.8',
'numpy',
'proDy',
'pytorch3d',
'requests',
'sidechainnet',
'torch>=1.6',
'transformers',
'tqdm',
'biopython',
'mp-nerf>=0.1.5'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
)
| alphafold2-main | setup.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from contextlib import contextmanager
from einops import reduce
# helpers
def exists(val):
return val is not None
@contextmanager
def null_context():
yield
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
# function wrapper for determinism on backwards
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# reversible self attention block
class ReversibleSelfAttnBlock(nn.Module):
def __init__(self, f, g, j, k):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.j = Deterministic(j)
self.k = Deterministic(k)
def forward(self, x, m, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_pos_emb = None, msa_pos_emb = None, _reverse = True, **kwargs):
x1, x2 = torch.chunk(x, 2, dim = 2)
m1, m2 = torch.chunk(m, 2, dim = 2)
y1, y2, n1, n2 = None, None, None, None
context = torch.no_grad if _reverse else null_context
record_rng = self.training and _reverse
with context():
y1 = x1 + self.f(x2, shape = seq_shape, record_rng = record_rng, mask = mask, rotary_emb = seq_pos_emb)
y2 = x2 + self.g(y1, shape = seq_shape, record_rng = record_rng)
n1 = m1 + self.j(m2, shape = msa_shape, record_rng = record_rng, mask = msa_mask, rotary_emb = msa_pos_emb)
n2 = m2 + self.k(n1, record_rng = record_rng)
return torch.cat((y1, y2), dim = 2), torch.cat((n1, n2), dim = 2)
def backward_pass(self, y, n, dy, dn, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_pos_emb = None, msa_pos_emb = None, **kwargs):
y1, y2 = torch.chunk(y, 2, dim = 2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim = 2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, shape = seq_shape, set_rng = True)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, shape = seq_shape, set_rng = True, mask = mask, rotary_emb = seq_pos_emb)
torch.autograd.backward(fx2, dx1, retain_graph = True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim = 2)
dx = torch.cat([dx1, dx2], dim = 2)
n1, n2 = torch.chunk(n, 2, dim = 2)
del n
dn1, dn2 = torch.chunk(dn, 2, dim = 2)
del dn
with torch.enable_grad():
n1.requires_grad = True
gn1 = self.k(n1, set_rng = True)
torch.autograd.backward(gn1, dn2)
with torch.no_grad():
m2 = n2 - gn1
del n2, gn1
dm1 = dn1 + n1.grad
del dn1
n1.grad = None
with torch.enable_grad():
m2.requires_grad = True
fm2 = self.j(m2, shape = msa_shape, set_rng = True, mask = msa_mask, rotary_emb = msa_pos_emb)
torch.autograd.backward(fm2, dm1, retain_graph=True)
with torch.no_grad():
m1 = n1 - fm2
del n1, fm2
dm2 = dn2 + m2.grad
del dn2
m2.grad = None
m = torch.cat([m1, m2.detach()], dim = 2)
dm = torch.cat([dm1, dm2], dim = 2)
return x, m, dx, dm
# reversible cross attention block
class ReversibleCrossAttnBlock(nn.Module):
def __init__(self, f, g, j, k):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.j = Deterministic(j)
self.k = Deterministic(k)
def forward(self, x, m, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_to_msa_pos_emb = None, msa_to_seq_pos_emb = None, _reverse = True, **kwargs):
x1, x2 = torch.chunk(x, 2, dim = 2)
m1, m2 = torch.chunk(m, 2, dim = 2)
y1, y2, n1, n2 = None, None, None, None
context = torch.no_grad if _reverse else null_context
record_rng = self.training and _reverse
with context():
y1 = x1 + self.f(x2, m2, record_rng = record_rng, mask = mask, context_mask = msa_mask, shape = seq_shape, context_shape = msa_shape, rotary_emb = seq_to_msa_pos_emb)
y2 = x2 + self.k(y1, shape = seq_shape, record_rng = record_rng)
n1 = m1 + self.j(m2, y2, record_rng = record_rng, mask = msa_mask, context_mask = mask, shape = msa_shape, context_shape = seq_shape, rotary_emb = msa_to_seq_pos_emb)
n2 = m2 + self.g(n1, record_rng = record_rng)
return torch.cat((y1, y2), dim = 2), torch.cat((n1, n2), dim = 2)
def backward_pass(self, y, n, dy, dn, mask = None, msa_mask = None, seq_shape = None, msa_shape = None, seq_to_msa_pos_emb = None, msa_to_seq_pos_emb = None, **kwargs):
n1, n2 = torch.chunk(n, 2, dim = 2)
del n
dn1, dn2 = torch.chunk(dn, 2, dim = 2)
del dn
y1, y2 = torch.chunk(y, 2, dim = 2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim = 2)
del dy
with torch.enable_grad():
n1.requires_grad = True
gn1 = self.g(n1, set_rng = True)
torch.autograd.backward(gn1, dn2)
with torch.no_grad():
m2 = n2 - gn1
del n2, gn1
dm1 = dn1 + n1.grad
del dn1
n1.grad = None
with torch.enable_grad():
m2.requires_grad = True
y2.requires_grad = True
fm2 = self.j(m2, y2, set_rng=True, mask = msa_mask, context_mask = mask, shape = msa_shape, context_shape = seq_shape, rotary_emb = msa_to_seq_pos_emb)
torch.autograd.backward(fm2, dm1)
with torch.no_grad():
m1 = n1 - fm2
del n1, fm2
dm2 = dn2 + m2.grad
dx2 = dy2 + y2.grad
del dn2
del dy2
m2.grad = None
y2.grad = None
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.k(y1, shape = seq_shape, set_rng = True)
torch.autograd.backward(gy1, dx2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
m2.requires_grad = True
fx2 = self.f(x2, m2, set_rng = True, mask = mask, context_mask = msa_mask, shape = seq_shape, context_shape = msa_shape, rotary_emb = seq_to_msa_pos_emb)
torch.autograd.backward(fx2, dx1)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dx2 + x2.grad
dm2 = dm2 + m2.grad
x2.grad = None
m2.grad = None
with torch.no_grad():
m = torch.cat([m1, m2.detach()], dim = 2)
dm = torch.cat([dm1, dm2], dim = 2)
x = torch.cat([x1, x2.detach()], dim = 2)
dx = torch.cat([dx1, dx2], dim = 2)
return x, m, dx, dm
# reverse and non reverse functions
class ReversibleFunction(Function):
@staticmethod
def forward(ctx, inp, ind, blocks, kwargs):
x, m = split_at_index(1, ind, inp)
for block in blocks:
x, m = block(x, m, _reverse = True, **kwargs)
ctx.blocks = blocks
ctx.kwargs = kwargs
ctx.ind = ind
ctx.save_for_backward(x.detach(), m.detach())
return torch.cat((x, m), dim = 1)
@staticmethod
def backward(ctx, d):
ind = ctx.ind
blocks = ctx.blocks
kwargs = ctx.kwargs
dy, dn = split_at_index(1, ind, d)
y, n = ctx.saved_tensors
for block in blocks[::-1]:
y, n, dy, dn = block.backward_pass(y, n, dy, dn, **kwargs)
d = torch.cat((dy, dn), dim = 1)
return d, None, None, None
reversible_apply = ReversibleFunction.apply
def irreversible_apply(inputs, ind, blocks, kwargs):
x, m = split_at_index(1, ind, inputs)
for block in blocks:
x, m = block(x, m, _reverse = False, **kwargs)
return torch.cat((x, m), dim = 1)
# main reversible sequence class
class ReversibleSequence(nn.Module):
def __init__(self, input_blocks, block_types):
super().__init__()
self.block_types = block_types
blocks = nn.ModuleList([])
for block, block_type in zip(input_blocks, block_types):
if block_type == 'self':
reversible_klass = ReversibleSelfAttnBlock
elif block_type == 'cross':
reversible_klass = ReversibleCrossAttnBlock
elif block_type == 'conv':
reversible_klass = ReversibleSelfAttnBlock
blocks.append(reversible_klass(*block))
self.blocks = blocks
def forward(
self,
seq,
msa,
seq_shape = None,
msa_shape = None,
mask = None,
msa_mask = None,
seq_pos_emb = None,
msa_pos_emb = None,
seq_to_msa_pos_emb = None,
msa_to_seq_pos_emb = None,
reverse = True
):
assert exists(msa), 'reversibility does not work with no MSA sequences yet'
blocks = self.blocks
seq, msa = list(map(lambda t: torch.cat((t, t), dim = -1), (seq, msa)))
kwargs = {'mask': mask, 'msa_mask': msa_mask, 'seq_shape': seq_shape, 'msa_shape': msa_shape, 'seq_pos_emb': seq_pos_emb, 'msa_pos_emb': msa_pos_emb, 'seq_to_msa_pos_emb': seq_to_msa_pos_emb, 'msa_to_seq_pos_emb': msa_to_seq_pos_emb}
fn = reversible_apply if reverse else irreversible_apply
ind = seq.shape[1]
inp = torch.cat((seq, msa), dim = 1)
out = fn(inp, ind, blocks, kwargs)
seq, msa = split_at_index(1, ind, out)
return list(map(lambda t: reduce(t, 'b n (c d) -> b n d', 'mean', c = 2), (seq, msa)))
| alphafold2-main | alphafold2_pytorch/reversible.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from alphafold2_pytorch import constants
from einops import rearrange
# MSA MLM
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
class MLM(nn.Module):
def __init__(
self,
dim,
num_tokens,
mask_id,
mask_prob = 0.15,
random_replace_token_prob = 0.1,
keep_token_same_prob = 0.1,
exclude_token_ids = (0,)
):
super().__init__()
self.to_logits = nn.Linear(dim, num_tokens)
self.mask_id = mask_id
self.mask_prob = mask_prob
self.exclude_token_ids = exclude_token_ids
self.keep_token_same_prob = keep_token_same_prob
self.random_replace_token_prob = random_replace_token_prob
def noise(self, seq, mask):
num_msa = seq.shape[1]
seq = rearrange(seq, 'b n ... -> (b n) ...')
mask = rearrange(mask, 'b n ... -> (b n) ...')
# prepare masks for noising sequence
excluded_tokens_mask = mask
for token_id in self.exclude_token_ids:
excluded_tokens_mask = excluded_tokens_mask & (seq != token_id)
mlm_mask = get_mask_subset_with_prob(excluded_tokens_mask, self.mask_prob)
# keep some tokens the same
replace_token_with_mask = get_mask_subset_with_prob(mlm_mask, 1. - self.keep_token_same_prob)
# replace with mask
seq = seq.masked_fill(mlm_mask, self.mask_id)
# generate random tokens
random_replace_token_prob_mask = get_mask_subset_with_prob(mlm_mask, (1 - self.keep_token_same_prob) * self.random_replace_token_prob)
random_tokens = torch.randint(1, constants.NUM_AMINO_ACIDS, seq.shape).to(seq.device)
for token_id in self.exclude_token_ids:
random_replace_token_prob_mask = random_replace_token_prob_mask & (random_tokens != token_id) # make sure you never substitute a token with an excluded token type (pad, start, end)
# noise sequence
noised_seq = torch.where(random_replace_token_prob_mask, random_tokens, seq)
noised_seq = rearrange(noised_seq, '(b n) ... -> b n ...', n = num_msa)
mlm_mask = rearrange(mlm_mask, '(b n) ... -> b n ...', n = num_msa)
return noised_seq, mlm_mask
def forward(self, seq_embed, original_seq, mask):
logits = self.to_logits(seq_embed)
seq_logits = logits[mask]
seq_labels = original_seq[mask]
loss = F.cross_entropy(seq_logits, seq_labels, reduction = 'mean')
return loss
| alphafold2-main | alphafold2_pytorch/mlm.py |
import torch
# constants
MAX_NUM_MSA = 20
MAX_NUM_TEMPLATES = 10
NUM_AMINO_ACIDS = 21
NUM_EMBEDDS_TR = 1280 # best esm model
NUM_EMBEDDS_T5 = 1024 # best t5 model
NUM_COORDS_PER_RES = 14
DISTOGRAM_BUCKETS = 37
THETA_BUCKETS = 25
PHI_BUCKETS = 13
OMEGA_BUCKETS = 25
# embedding related constants
MSA_EMBED_DIM = 768
MSA_MODEL_PATH = ["facebookresearch/esm", "esm_msa1_t12_100M_UR50S"]
ESM_EMBED_DIM = 1280
ESM_MODEL_PATH = ["facebookresearch/esm", "esm1b_t33_650M_UR50S"]
PROTTRAN_EMBED_DIM = 1024
# default device
DEVICE_NAME = 'cuda' if torch.cuda.is_available() else 'cpu'
DEVICE = torch.device(DEVICE_NAME)
# aminoacid data
AA_DATA = {
'A': {
'bonds': [[0,1], [1,2], [2,3], [1,4]]
},
'R': {
'bonds': [[0,1], [1,2], [2,3], [2,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [8,10]]
},
'N': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'D': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'C': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5]]
},
'Q': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [6,8]]
},
'E': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8]]
},
'G': {
'bonds': [[0,1], [1,2], [2,3]]
},
'H': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [5,9]]
},
'I': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[4,7]]
},
'L': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'K': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8]]
},
'M': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7]]
},
'F': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [9,10], [5,10]]
},
'P': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[0,6]]
},
'S': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5]]
},
'T': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [4,6]]
},
'W': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [9,10], [10,11], [11,12],
[12, 13], [5,13], [8,13]]
},
'Y': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [8,10], [10,11], [5,11]]
},
'V': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [4,6]]
},
'_': {
'bonds': []
}
}
| alphafold2-main | alphafold2_pytorch/constants.py |
from alphafold2_pytorch.alphafold2 import Alphafold2, Evoformer
| alphafold2-main | alphafold2_pytorch/__init__.py |
# utils for working with 3d-protein structures
import os
import re
import numpy as np
import torch
import contextlib
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# custom
import mp_nerf
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
def exists(val):
return val is not None
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
def get_bucketed_distance_matrix(coords, mask, num_buckets = constants.DISTOGRAM_BUCKETS, ignore_index = -100):
distances = torch.cdist(coords, coords, p=2)
boundaries = torch.linspace(2, 20, steps = num_buckets, device = coords.device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
discretized_distances.masked_fill_(~(mask[..., None] & mask[..., None, :]), ignore_index)
return discretized_distances
# decorators
def set_backend_kwarg(fn):
@wraps(fn)
def inner(*args, backend = 'auto', **kwargs):
if backend == 'auto':
backend = 'torch' if isinstance(args[0], torch.Tensor) else 'numpy'
kwargs.update(backend = backend)
return fn(*args, **kwargs)
return inner
def expand_dims_to(t, length = 3):
if length == 0:
return t
return t.reshape(*((1,) * length), *t.shape) # will work with both torch and numpy
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
def outer(fn):
@wraps(fn)
def inner(x, y, **kwargs):
assert len(x.shape) == len(y.shape), "Shapes of A and B must match."
remaining_len = dim_len - len(x.shape)
x = expand_dims_to(x, length = remaining_len)
y = expand_dims_to(y, length = remaining_len)
return fn(x, y, **kwargs)
return inner
return outer
def invoke_torch_or_numpy(torch_fn, numpy_fn):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
backend = kwargs.pop('backend')
passed_args = fn(*args, **kwargs)
passed_args = list(passed_args)
if isinstance(passed_args[-1], dict):
passed_kwargs = passed_args.pop()
else:
passed_kwargs = {}
backend_fn = torch_fn if backend == 'torch' else numpy_fn
return backend_fn(*passed_args, **passed_kwargs)
return inner
return outer
@contextlib.contextmanager
def torch_default_dtype(dtype):
prev_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(prev_dtype)
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(constants.NUM_COORDS_PER_RES)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(constants.NUM_COORDS_PER_RES)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
# common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
# adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def ids_to_prottran_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for ids in x:
chars = ' '.join([id2aa[i] for i in ids])
chars = re.sub(r"[UZOB]", "X", chars)
out.append(chars)
return out
def get_prottran_embedd(seq, model, tokenizer, device = None):
from transformers import pipeline
fe = pipeline('feature-extraction', model = model, tokenizer = tokenizer, device = (-1 if not exists(device) else device.index))
max_seq_len = seq.shape[1]
embedd_inputs = ids_to_prottran_input(seq.cpu().tolist())
embedding = fe(embedd_inputs)
embedding = torch.tensor(embedding, device = device)
return embedding[:, 1:(max_seq_len + 1)]
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
# use MSA transformer
REPR_LAYER_NUM = 12
device = seq.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:max_seq_len+1, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = seq.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:max_seq_len+1, :].unsqueeze(dim=1)
return token_reps
def get_t5_embedd(seq, tokenizer, encoder, msa_data=None, device=None):
""" Returns the ProtT5-XL-U50 embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* tokenizer: tokenizer model: T5Tokenizer
* encoder: encoder model: T5EncoderModel
ex: from transformers import T5EncoderModel, T5Tokenizer
model_name = "Rostlab/prot_t5_xl_uniref50"
tokenizer = T5Tokenizer.from_pretrained(model_name, do_lower_case=False )
model = T5EncoderModel.from_pretrained(model_name)
# prepare model
model = model.to(device)
model = model.eval()
if torch.cuda.is_available():
model = model.half()
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for T5 models
* embedd_dim: number of embedding dimensions. 1024 for T5 models
"""
# get params and prepare
device = seq.device if device is None else device
embedd_inputs = ids_to_prottran_input(seq.cpu().tolist())
# embedd - https://huggingface.co/Rostlab/prot_t5_xl_uniref50
inputs_embedding = []
shift_left, shift_right = 0, -1
ids = tokenizer.batch_encode_plus(embedd_inputs, add_special_tokens=True,
padding=True,
return_tensors="pt")
with torch.no_grad():
embedding = encoder(input_ids=torch.tensor(ids['input_ids']).to(device),
attention_mask=torch.tensor(ids["attention_mask"]).to(device))
# return (batch, seq_len, embedd_dim)
token_reps = embedding.last_hidden_state[:, shift_left:shift_right].to(device)
token_reps = expand_dims_to(token_reps, 4-len(token_reps.shape))
return token_reps.float()
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the ids for all protein entries.
"""
# store ids here
ids = set([])
# iterate for all batches
for i,batch in tqdm(enumerate(dataloaders['train'])):
# for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# check if all fragments are : 4_LETTER_PDB + NUM + CHAIN
max_len_10 = len(batch.pids[i]) < 10
fragments = [len(x) <= 4 for x in batch.pids[i].split("_")]
fragments_under_4 = sum(fragments) == len(fragments) # AND CONDITION
# record id
if max_len_10 and fragments_under_4:
ids.add(batch.pids[i])
else:
if verbose:
print("skip:", batch.pids[i], "under 4", fragments)
except StopIteration:
break
# returns set of ids
return ids
def scn_cloud_mask(scn_seq, boolean=True, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* scn_seq: (batch, length) sequence as provided by Sidechainnet package
* boolean: whether to return as array of idxs or boolean values
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (batch, length, NUM_COORDS_PER_RES) boolean mask
"""
scn_seq = expand_dims_to(scn_seq, 2 - len(scn_seq.shape))
# early check for coords mask
if coords is not None:
batch_mask = ( rearrange(coords, '... (l c) d -> ... l c d', c=constants.NUM_COORDS_PER_RES) == 0 ).sum(dim=-1) < coords.shape[-1]
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
# do loop in cpu
device = scn_seq.device
batch_mask = []
scn_seq = scn_seq.cpu().tolist()
for i, seq in enumerate(scn_seq):
# get masks for each prot (points for each aa)
batch_mask.append( torch.tensor([CUSTOM_INFO[VOCAB._int2char[aa]]['cloud_mask'] \
for aa in seq]).bool().to(device) )
# concat in last dim
batch_mask = torch.stack(batch_mask, dim=0)
# return mask (boolean or indexes)
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
def scn_backbone_mask(scn_seq, boolean=True, n_aa=3):
""" Gets the boolean mask for N and CA positions.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
* n_aa: number of atoms in a backbone. (may include cbeta as 4th pos)
* bool: whether to return as array of idxs or boolean values
Outputs: (N_mask, CA_mask, C_mask)
"""
wrapper = torch.zeros(*scn_seq.shape, n_aa).to(scn_seq.device)
# N is the first atom in every AA. CA is the 2nd.
wrapper[..., 0] = 1
wrapper[..., 1] = 2
wrapper[..., 2] = 3
wrapper = rearrange(wrapper, '... l c -> ... (l c)')
# find idxs
N_mask = wrapper == 1
CA_mask = wrapper == 2
C_mask = wrapper == 3
if boolean:
return N_mask, CA_mask, C_mask
return torch.nonzero(N_mask), torch.nonzero(CA_mask), torch.nonzero(C_mask)
def scn_atom_embedd(scn_seq):
""" Returns the token for each atom in the aa.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
"""
device = scn_seq.device
batch_tokens = []
# do loop in cpu
scn_seq = scn_seq.cpu().tolist()
for i,seq in enumerate(scn_seq):
batch_tokens.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa)]["atom_id_embedd"] \
for aa in seq]) )
batch_tokens = torch.stack(batch_tokens, dim=0).long().to(device)
return batch_tokens
def mat_input_to_masked(x, x_mask=None, edges_mat=None, edges=None,
edge_mask=None, edge_attr_mat=None,
edge_attr=None):
""" Turns the padded input and edges + mask into the
non-padded inputs and edges.
At least one of (edges_mat, edges) must be provided.
The same format for edges and edge_attr must be provided
(either adj matrix form or flattened form).
Inputs:
* x: ((batch), N, D) a tensor of N nodes and D dims for each one
* x_mask: ((batch), N,) boolean mask for x
* edges: (2, E) optional. indices of the corresponding adjancecy matrix.
* edges_mat: ((batch), N, N) optional. adjacency matrix for x
* edge_mask: optional. boolean mask of the same shape of either "edge_mat" or "edges".
* edge_attr: (E, D_edge) optional. edge attributes of D_edge dims.
* edge_attr_mat: ((batch), N, N) optional. adjacency matrix with features
Outputs:
* x: (N_, D) the masked node features
* edge_index: (2, E_) the masked x-indices for the edges
* edge_attr: (E_, D_edge) the masked edge attributes
* batch: (N_,) the corresponding index in the batch for each node
"""
# collapse batch dimension
if len(x.shape) == 3:
batch_dim = x.shape[1]
# collapse for x and its mask
x = rearrange(x, 'b n d ... -> (b n) d ...')
if x_mask is not None:
x_mask = rearrange(x_mask, 'b n ... -> (b n) ...')
else:
x_mask = torch.ones_like(x[..., 0]).bool()
# collapse for edge indexes and attributes if needed
if edges_mat is not None and edges is None:
edges = torch.nonzero(edges_mat, as_tuple=False).t()
edges = edges[1:] + edges[:1]*batch_dim
# get the batch identifier for each node
batch = (torch.arange(x.shape[0], device=x.device) // batch_dim)[x_mask]
else:
# edges to indices format
if edges_mat is not None and edges is None:
edges = torch.nonzero(edges_mat, as_tuple=False).t()
# get the batch identifier for each node
batch = torch.zeros(x.shape[0], device=x.device).to(x.device)
# adapt edge attrs if provided
if edge_attr_mat is not None and edge_attr is None:
edge_attr = edge_attr[edges_mat.bool()]
# gen edge_mask if not provided
if edge_mask is None:
edge_mask = torch.ones_like(edges[-1]).bool()
# begin applying masks
x = x[x_mask]
# process edge indexes: get square mat and remove all non-coding atoms
max_num = edges.max().item()+1
wrapper = torch.zeros(max_num, max_num).to(x.device)
wrapper[edges[0][edge_mask], edges[1][edge_mask]] = 1
wrapper = wrapper[x_mask, :][:, x_mask]
edge_index = torch.nonzero(wrapper, as_tuple=False).t()
# process edge attr
edge_attr = edge_attr[edge_mask] if edge_attr is not None else None
return x, edge_index, edge_attr, batch
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: ij positions of the adjacency matrix
* edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
new_adj_mat = adj_mat.clone()
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
idxs = adj_mat.nonzero().t()
vals = adj_mat[idxs[0], idxs[1]]
new_idxs = idxs.clone()
new_vals = vals.clone()
m, k, n = 3 * [adj_mat.shape[0]] # (m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
new_idxs, new_vals = torch_sparse.spspmm(new_idxs, new_vals, idxs, vals, m=m, k=k, n=n)
new_vals = new_vals.bool().float()
# fill by indexes bc it's faster in sparse mode - will need an intersection function
previous = attr_mat[new_idxs[0], new_idxs[1]].bool().float()
attr_mat[new_idxs[0], new_idxs[1]] = (1 - previous)*(i+1)
else:
new_adj_mat = (new_adj_mat @ adj_mat).bool().float()
attr_mat.masked_fill( (new_adj_mat - attr_mat.bool().float()).bool(), i+1 )
return new_adj_mat, attr_mat
def prot_covalent_bond(seqs, adj_degree=1, cloud_mask=None, mat=True, sparse=False):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: (b, n) torch long.
* adj_degree: int. adjacency degree
* cloud_mask: mask selecting the present atoms.
* mat: whether to return as indexes of only atoms (PyG version)
or matrices of masked atoms (for batched training).
for indexes, only 1 seq is supported.
* sparse: bool. whether to use torch_sparse for adj_mat calc
Outputs: edge_idxs, edge_types (degree of adjacency).
"""
device = seqs.device
# set up container adj_mat (will get trimmed - less than 14)
next_aa = NUM_COORDS_PER_RES
adj_mat = torch.zeros(seqs.shape[0], *[seqs.shape[1]*NUM_COORDS_PER_RES]*2)
# not needed to device since it's only for indices
seq_list = seqs.cpu().tolist()
for s,seq in enumerate(seq_list):
next_idx = 0
for i,idx in enumerate(seq):
aa_bonds = constants.AA_DATA[VOCAB._int2char[idx]]['bonds']
# if no edges -> padding token -> finish bond creation for this seq
if len(aa_bonds) == 0:
break
# correct next position. for indexes functionality
next_aa = max(aa_bonds, key=lambda x: max(x))[-1]
# offset by pos in chain ( intra-aa bonds + with next aa )
bonds = next_idx + torch.tensor( aa_bonds + [[2, next_aa]] ).t()
next_idx += next_aa
# delete link with next if final AA in seq
if i == seqs.shape[1] - 1:
bonds = bonds[:, :-1]
# modify adj mat
adj_mat[s, bonds[0], bonds[1]] = 1
# convert to undirected
adj_mat[s] = adj_mat[s] + adj_mat[s].t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=sparse)
if mat:
# return the full matrix/tensor
return attr_mat.bool().to(seqs.device), attr_mat.to(device)
else:
edge_idxs = attr_mat[0].nonzero().t().long()
edge_types = attr_mat[0, edge_idxs[0], edge_idxs[1]]
return edge_idxs.to(seqs.device), edge_types.to(seqs.device)
def sidechain_container(seqs, backbones, atom_mask, cloud_mask=None, padding_tok=20):
""" Gets a backbone of the protein, returns the whole coordinates
with sidechains (same format as sidechainnet). Keeps differentiability.
Inputs:
* seqs: (batch, L) either tensor or list
* backbones: (batch, L*n_aa, 3): assume batch=1 (could be extended (?not tested)).
Coords for (N-term, C-alpha, C-term, (c_beta)) of every aa.
* atom_mask: (14,). int or bool tensor specifying which atoms are passed.
* cloud_mask: (batch, l, c). optional. cloud mask from scn_cloud_mask`.
sets point outside of mask to 0. if passed, else c_alpha
* padding: int. padding token. same as in sidechainnet: 20
Outputs: whole coordinates of shape (batch, L, 14, 3)
"""
atom_mask = atom_mask.bool().cpu().detach()
cum_atom_mask = atom_mask.cumsum(dim=-1).tolist()
device = backbones.device
batch, length = backbones.shape[0], backbones.shape[1] // cum_atom_mask[-1]
predicted = rearrange(backbones, 'b (l back) d -> b l back d', l=length)
# early check if whole chain is already pred
if cum_atom_mask[-1] == 14:
return predicted
# build scaffold from (N, CA, C, CB) - do in cpu
new_coords = torch.zeros(batch, length, constants.NUM_COORDS_PER_RES, 3)
predicted = predicted.cpu() if predicted.is_cuda else predicted
# fill atoms if they have been passed
for i,atom in enumerate(atom_mask.tolist()):
if atom:
new_coords[:, :, i] = predicted[:, :, cum_atom_mask[i]-1]
# generate sidechain if not passed
for s,seq in enumerate(seqs):
# format seq accordingly
if isinstance(seq, torch.Tensor):
padding = (seq == padding_tok).sum().item()
seq_str = ''.join([VOCAB._int2char[aa] for aa in seq.cpu().numpy()[:-padding or None]])
elif isinstance(seq, str):
padding = 0
seq_str = seq
# get scaffolds - will overwrite oxygen since its position is fully determined by N-C-CA
scaffolds = mp_nerf.proteins.build_scaffolds_from_scn_angles(seq_str, angles=None, device="cpu")
coords, _ = mp_nerf.proteins.sidechain_fold(wrapper = new_coords[s, :-padding or None].detach(),
**scaffolds, c_beta = cum_atom_mask[4]==5)
# add detached scn
for i,atom in enumerate(atom_mask.tolist()):
if not atom:
new_coords[:, :-padding or None, i] = coords[:, i]
new_coords = new_coords.to(device)
if cloud_mask is not None:
new_coords[torch.logical_not(cloud_mask)] = 0.
# replace any nan-s with previous point location (or N if pos is 13th of AA)
nan_mask = list(torch.nonzero(new_coords!=new_coords, as_tuple=True))
new_coords[nan_mask[0], nan_mask[1], nan_mask[2]] = new_coords[nan_mask[0],
nan_mask[1],
(nan_mask[-2]+1) % new_coords.shape[-1]]
return new_coords.to(device)
# distance utils (distogram to dist mat + masking)
def center_distogram_torch(distogram, bins=DISTANCE_THRESHOLDS, min_t=1., center="mean", wide="std"):
""" Returns the central estimate of a distogram. Median for now.
Inputs:
* distogram: (batch, N, N, B) where B is the number of buckets.
* bins: (B,) containing the cutoffs for the different buckets
* min_t: float. lower bound for distances.
Outputs:
* central: (batch, N, N)
* dispersion: (batch, N, N)
* weights: (batch, N, N)
"""
shape, device = distogram.shape, distogram.device
# threshold to weights and find mean value of each bin
n_bins = ( bins - 0.5 * (bins[2] - bins[1]) ).to(device)
n_bins[0] = 1.5
n_bins[-1] = 1.33*bins[-1] # above last threshold is ignored
max_bin_allowed = torch.tensor(n_bins.shape[0]-1).to(device).long()
# calculate measures of centrality and dispersion -
magnitudes = distogram.sum(dim=-1)
if center == "median":
cum_dist = torch.cumsum(distogram, dim=-1)
medium = 0.5 * cum_dist[..., -1:]
central = torch.searchsorted(cum_dist, medium).squeeze()
central = n_bins[ torch.min(central, max_bin_allowed) ]
elif center == "mean":
central = (distogram * n_bins).sum(dim=-1) / magnitudes
# create mask for last class - (IGNORE_INDEX)
mask = (central <= bins[-2].item()).float()
# mask diagonal to 0 dist - don't do masked filling to avoid inplace errors
diag_idxs = np.arange(shape[-2])
central = expand_dims_to(central, 3 - len(central.shape))
central[:, diag_idxs, diag_idxs] *= 0.
# provide weights
if wide == "var":
dispersion = (distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes
elif wide == "std":
dispersion = ((distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes).sqrt()
else:
dispersion = torch.zeros_like(central, device=device)
# rescale to 0-1. lower std / var --> weight=1. set potential nan's to 0
weights = mask / (1+dispersion)
weights[weights != weights] *= 0.
weights[:, diag_idxs, diag_idxs] *= 0.
return central, weights
# distance matrix to 3d coords: https://github.com/scikit-learn/scikit-learn/blob/42aff4e2e/sklearn/manifold/_mds.py#L279
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distogram is (N x N) and symmetric
Outs:
* best_3d_coords: (batch x 3 x N)
* historic_stresses: (batch x steps)
"""
device, dtype = pre_dist_mat.device, pre_dist_mat.type()
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
diag_idxs = np.arange(N)
his = [torch.tensor([np.inf]*batch, device=device)]
# initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf
# follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf
D = pre_dist_mat**2
M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D)
# do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)
# https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336
svds = [torch.svd_lowrank(mi) for mi in M]
u = torch.stack([svd[0] for svd in svds], dim=0)
s = torch.stack([svd[1] for svd in svds], dim=0)
v = torch.stack([svd[2] for svd in svds], dim=0)
best_3d_coords = torch.bmm(u, torch.diag_embed(s).abs().sqrt())[..., :3]
# only eigen - way faster but not weights
if weights is None and eigen==True:
return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))
elif eigen==True:
if verbose:
print("Can't use eigen flag if weights are active. Fallback to iterative")
# continue the iterative way
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
best_3d_coords = best_3d_coords.contiguous()
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[ dist_mat <= 0 ] += 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)
# update
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (his[-1] - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
his.append( stress / dis )
return torch.transpose(best_3d_coords, -1,-2), torch.stack(his, dim=0)
def mds_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Out:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = np.ones_like(pre_dist_mat)
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
his = [np.inf]
# init random coords
best_stress = np.inf * np.ones(batch)
best_3d_coords = 2*np.random.rand(batch, 3, N) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = np.linalg.norm(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :], axis=-3)
stress = (( weights * (dist_mat - pre_dist_mat) )**2).sum(axis=(-1, -2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, np.arange(N), np.arange(N)] += ratio.sum(axis=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * np.matmul(best_3d_coords, B))
dis = np.linalg.norm(coords, axis=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
best_stress = stress / dis
his.append(best_stress)
return best_3d_coords, np.array(his)
def get_dihedral_torch(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Can't use torch.dot bc it does not broadcast
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_dihedral_numpy(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return np.arctan2( ( (np.linalg.norm(u2, axis=-1, keepdims=True) * u1) * np.cross(u2,u3, axis=-1)).sum(axis=-1),
( np.cross(u1,u2, axis=-1) * np.cross(u2, u3, axis=-1) ).sum(axis=-1) )
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (batch, N) boolean mask for N-term positions
* CA_mask: (batch, N) boolean mask for C-alpha positions
* C_mask: (batch, N) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
Note: use [0] since all prots in batch have same backbone
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
# ensure dims
N_mask = expand_dims_to( N_mask, 2-len(N_mask.shape) )
CA_mask = expand_dims_to( CA_mask, 2-len(CA_mask.shape) )
if C_mask is not None:
C_mask = expand_dims_to( C_mask, 2-len(C_mask.shape) )
else:
C_mask = torch.logical_not(torch.logical_or(N_mask,CA_mask))
# select points
n_terms = pred_coords_[:, N_mask[0].squeeze()]
c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
c_terms = pred_coords_[:, C_mask[0].squeeze()]
# compute phis for every pritein in the batch
phis = [get_dihedral_torch(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return torch.stack([(x<0).float().mean() for x in phis], dim=0 )
return phis
def calc_phis_numpy(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (N, ) boolean mask for N-term positions
* CA_mask: (N, ) boolean mask for C-alpha positions
* C_mask: (N, ) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = np.transpose(pred_coords, (0, 2, 1))
n_terms = pred_coords_[:, N_mask.squeeze()]
c_alphas = pred_coords_[:, CA_mask.squeeze()]
# select c_term auto if not passed
if C_mask is not None:
c_terms = pred_coords_[:, C_mask]
else:
c_terms = pred_coords_[:, (np.ones_like(N_mask)-N_mask-CA_mask).squeeze().astype(bool) ]
# compute phis for every pritein in the batch
phis = [get_dihedral_numpy(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return np.array( [(x<0).mean() for x in phis] )
return phis
# alignment by centering + rotation to compute optimal RMSD
# adapted from : https://github.com/charnley/rmsd/
def kabsch_torch(X, Y, cpu=True):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
device = X.device
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t()).detach()
if cpu:
C = C.cpu()
# Optimal rotation matrix via SVD
if int(torch.__version__.split(".")[1]) < 8:
# warning! int torch 1.<8 : W must be transposed
V, S, W = torch.svd(C)
W = W.t()
else:
V, S, W = torch.linalg.svd(C)
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W).to(device)
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
# metrics - more formulas here: http://predictioncenter.org/casp12/doc/help.html
def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2,
custom=None, distmat_mask=None, clamp=None):
""" Calculates a loss on the distance matrix - no need to align structs.
Inputs:
* X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed.
* X_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed.
* Y_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* p: int. power for the distance calculation (2 for euclidean)
* q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc)
* custom: func or None. custom loss over distance matrices.
ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good)
* distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional.
* clamp: tuple of (min,max) values for clipping distance matrices. ex: (0,150)
"""
assert (X is not None or X_mat is not None) and \
(Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided"
# calculate distance matrices
if X_mat is None:
X = X.squeeze()
if clamp is not None:
X = torch.clamp(X, *clamp)
X_mat = torch.cdist(X, X, p=p)
if Y_mat is None:
Y = Y.squeeze()
if clamp is not None:
Y = torch.clamp(Y, *clamp)
Y_mat = torch.cdist(Y, Y, p=p)
if distmat_mask is None:
distmat_mask = torch.ones_like(Y_mat).bool()
# do custom expression if passed
if custom is not None:
return custom(X_mat.squeeze(), Y_mat.squeeze()).mean()
# **2 ensures always positive. Later scale back to desired power
else:
loss = ( X_mat - Y_mat )**2
if q != 2:
loss = loss**(q/2)
return loss[distmat_mask].mean()
def rmsd_torch(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
def rmsd_numpy(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return np.sqrt( np.mean((X - Y)**2, axis=(-1, -2)) )
def gdt_torch(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
device = X.device
if weights is None:
weights = torch.ones(1,len(cutoffs))
else:
weights = torch.tensor([weights]).to(device)
# set zeros and fill with values
GDT = torch.zeros(X.shape[0], len(cutoffs), device=device)
dist = ((X - Y)**2).sum(dim=1).sqrt()
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).float().mean(dim=-1)
# weighted mean
return (GDT*weights).mean(-1)
def gdt_numpy(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
if weights is None:
weights = np.ones( (1,len(cutoffs)) )
else:
weights = np.array([weights])
# set zeros and fill with values
GDT = np.zeros( (X.shape[0], len(cutoffs)) )
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).mean(axis=-1)
# weighted mean
return (GDT*weights).mean(-1)
def tmscore_torch(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = max(15, X.shape[-1])
d0 = 1.24 * (L - 15)**(1/3) - 1.8
# get distance
dist = ((X - Y)**2).sum(dim=1).sqrt()
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(dim=-1)
def tmscore_numpy(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = max(15, X.shape[-1])
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(axis=-1)
def mdscaling_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None,
eigen=False, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_torch(pre_dist_mat, weights=weights,iters=iters,
tol=tol, eigen=eigen, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_torch(preds, N_mask, CA_mask, C_mask, prop=True)
to_correct = torch.nonzero( (phi_ratios < 0.5)).view(-1)
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
preds[to_correct, -1] = (-1)*preds[to_correct, -1]
if verbose == 2:
print("Corrected mirror idxs:", to_correct)
return preds, stresses
def mdscaling_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_numpy(pre_dist_mat, weights=weights,iters=iters,
tol=tol, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_numpy(preds, N_mask, CA_mask, C_mask, prop=True)
for i,pred in enumerate(preds):
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
if phi_ratios < 0.5:
preds[i, -1] = (-1)*preds[i, -1]
if verbose == 2:
print("Corrected mirror in struct no.", i)
return preds, stresses
def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):
""" Computes the lddt score for each C_alpha.
https://academic.oup.com/bioinformatics/article/29/21/2722/195896
Inputs:
* true_coords: (b, l, c, d) in sidechainnet format.
* pred_coords: (b, l, c, d) in sidechainnet format.
* cloud_mask : (b, l, c) adapted for scn format.
* r_0: float. maximum inclusion radius in reference struct.
Outputs:
* (b, l) lddt for c_alpha scores (ranging between 0 and 1)
See wrapper below.
"""
device, dtype = true_coords.device, true_coords.type()
thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)
# adapt masks
cloud_mask = cloud_mask.bool().cpu()
c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim
c_alpha_mask[..., 1] = True
# container for c_alpha scores (between 0,1)
wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)
for bi, seq in enumerate(true_coords):
# select atoms for study
c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions
selected_pred = pred_coords[bi, c_alphas, :]
selected_target = true_coords[bi, c_alphas, :]
# get number under distance
dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)
dist_mat_target = torch.cdist(selected_target, selected_target, p=2)
under_r0_target = dist_mat_target < r_0
compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]
# measure diff below threshold
score = torch.zeros_like(under_r0_target).float()
max_score = torch.zeros_like(under_r0_target).float()
max_score[under_r0_target] = 4.
# measure under how many thresholds
score[under_r0_target] = thresholds.shape[0] - \
torch.bucketize( compare_dists, boundaries=thresholds ).float()
# dont include diagonal
l_mask = c_alphas.float().sum(dim=-1).bool()
wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \
( max_score.sum(dim=-1) - thresholds.shape[0] )
return wrapper
################
### WRAPPERS ###
################
@set_backend_kwarg
@invoke_torch_or_numpy(mdscaling_torch, mdscaling_numpy)
def MDScaling(pre_dist_mat, **kwargs):
""" Gets distance matrix (-ces). Outputs 3d.
Assumes (for now) distrogram is (N x N) and symmetric.
For support of ditograms: see `center_distogram_torch()`
Inputs:
* pre_dist_mat: (1, N, N) distance matrix.
* weights: optional. (N x N) pairwise relative weights .
* iters: number of iterations to run the algorithm on
* tol: relative tolerance at which to stop the algorithm if no better
improvement is achieved
* backend: one of ["numpy", "torch", "auto"] for backend choice
* fix_mirror: int. number of iterations to run the 3d generation and
pick the best mirror (highest number of negative phis)
* N_mask: indexing array/tensor for indices of backbone N.
Only used if fix_mirror > 0.
* CA_mask: indexing array/tensor for indices of backbone C_alpha.
Only used if fix_mirror > 0.
* verbose: whether to print logs
Outputs:
* best_3d_coords: (3 x N)
* historic_stress: (timesteps, )
"""
pre_dist_mat = expand_dims_to(pre_dist_mat, 3 - len(pre_dist_mat.shape))
return pre_dist_mat, kwargs
@expand_arg_dims(dim_len = 2)
@set_backend_kwarg
@invoke_torch_or_numpy(kabsch_torch, kabsch_numpy)
def Kabsch(A, B):
""" Returns Kabsch-rotated matrices resulting
from aligning A into B.
Adapted from: https://github.com/charnley/rmsd/
* Inputs:
* A,B are (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of shape (3 x N)
"""
# run calcs - pick the 0th bc an additional dim was created
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(rmsd_torch, rmsd_numpy)
def RMSD(A, B):
""" Returns RMSD score as defined here (lower is better):
https://en.wikipedia.org/wiki/
Root-mean-square_deviation_of_atomic_positions
* Inputs:
* A,B are (B x 3 x N) or (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of size (B,)
"""
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(gdt_torch, gdt_numpy)
def GDT(A, B, *, mode="TS", cutoffs=[1,2,4,8], weights=None):
""" Returns GDT score as defined here (highre is better):
Supports both TS and HA
http://predictioncenter.org/casp12/doc/help.html
* Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* cutoffs: defines thresholds for gdt
* weights: list containing the weights
* mode: one of ["numpy", "torch", "auto"] for backend
* Outputs: tensor/array of size (B,)
"""
# define cutoffs for each type of gdt and weights
cutoffs = [0.5,1,2,4] if mode in ["HA", "ha"] else [1,2,4,8]
# calculate GDT
return A, B, cutoffs, {'weights': weights}
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(tmscore_torch, tmscore_numpy)
def TMscore(A, B):
""" Returns TMscore as defined here (higher is better):
>0.5 (likely) >0.6 (highly likely) same folding.
= 0.2. https://en.wikipedia.org/wiki/Template_modeling_score
Warning! It's not exactly the code in:
https://zhanglab.ccmb.med.umich.edu/TM-score/TMscore.cpp
but will suffice for now.
Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* mode: one of ["numpy", "torch", "auto"] for backend
Outputs: tensor/array of size (B,)
"""
return A, B
| alphafold2-main | alphafold2_pytorch/utils.py |
import torch
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from inspect import isfunction
from functools import partial
from dataclasses import dataclass
import torch.nn.functional as F
from math import sqrt
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from alphafold2_pytorch.utils import *
import alphafold2_pytorch.constants as constants
from alphafold2_pytorch.mlm import MLM
# structure module
from invariant_point_attention import IPABlock
from pytorch3d.transforms import quaternion_multiply, quaternion_to_matrix
# constants
@dataclass
class Recyclables:
coords: torch.Tensor
single_msa_repr_row: torch.Tensor
pairwise_repr: torch.Tensor
@dataclass
class ReturnValues:
distance: torch.Tensor = None
theta: torch.Tensor = None
phi: torch.Tensor = None
omega: torch.Tensor = None
msa_mlm_loss: torch.Tensor = None
recyclables: Recyclables = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, tuple) else (val,) * depth
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# helper classes
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, x):
return self.val
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
init_zero_(self.net[-1])
def forward(self, x, **kwargs):
x = self.norm(x)
return self.net(x)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
seq_len = None,
heads = 8,
dim_head = 64,
dropout = 0.,
gating = True
):
super().__init__()
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads= heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.gating = nn.Linear(dim, inner_dim)
nn.init.constant_(self.gating.weight, 0.)
nn.init.constant_(self.gating.bias, 1.)
self.dropout = nn.Dropout(dropout)
init_zero_(self.to_out)
def forward(self, x, mask = None, attn_bias = None, context = None, context_mask = None, tie_dim = None):
device, orig_shape, h, has_context = x.device, x.shape, self.heads, exists(context)
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
i, j = q.shape[-2], k.shape[-2]
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# scale
q = q * self.scale
# query / key similarities
if exists(tie_dim):
# as in the paper, for the extra MSAs
# they average the queries along the rows of the MSAs
# they named this particular module MSAColumnGlobalAttention
q, k = map(lambda t: rearrange(t, '(b r) ... -> b r ...', r = tie_dim), (q, k))
q = q.mean(dim = 1)
dots = einsum('b h i d, b r h j d -> b r h i j', q, k)
dots = rearrange(dots, 'b r ... -> (b r) ...')
else:
dots = einsum('b h i d, b h j d -> b h i j', q, k)
# add attention bias, if supplied (for pairwise to msa attention communication)
if exists(attn_bias):
dots = dots + attn_bias
# masking
if exists(mask):
mask = default(mask, lambda: torch.ones(1, i, device = device).bool())
context_mask = mask if not has_context else default(context_mask, lambda: torch.ones(1, k.shape[-2], device = device).bool())
mask_value = -torch.finfo(dots.dtype).max
mask = mask[:, None, :, None] * context_mask[:, None, None, :]
dots = dots.masked_fill(~mask, mask_value)
# attention
attn = dots.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# gating
gates = self.gating(x)
out = out * gates.sigmoid()
# combine to out
out = self.to_out(out)
return out
class AxialAttention(nn.Module):
def __init__(
self,
dim,
heads,
row_attn = True,
col_attn = True,
accept_edges = False,
global_query_attn = False,
**kwargs
):
super().__init__()
assert not (not row_attn and not col_attn), 'row or column attention must be turned on'
self.row_attn = row_attn
self.col_attn = col_attn
self.global_query_attn = global_query_attn
self.norm = nn.LayerNorm(dim)
self.attn = Attention(dim = dim, heads = heads, **kwargs)
self.edges_to_attn_bias = nn.Sequential(
nn.Linear(dim, heads, bias = False),
Rearrange('b i j h -> b h i j')
) if accept_edges else None
def forward(self, x, edges = None, mask = None):
assert self.row_attn ^ self.col_attn, 'has to be either row or column attention, but not both'
b, h, w, d = x.shape
x = self.norm(x)
# axial attention
if self.col_attn:
axial_dim = w
mask_fold_axial_eq = 'b h w -> (b w) h'
input_fold_eq = 'b h w d -> (b w) h d'
output_fold_eq = '(b w) h d -> b h w d'
elif self.row_attn:
axial_dim = h
mask_fold_axial_eq = 'b h w -> (b h) w'
input_fold_eq = 'b h w d -> (b h) w d'
output_fold_eq = '(b h) w d -> b h w d'
x = rearrange(x, input_fold_eq)
if exists(mask):
mask = rearrange(mask, mask_fold_axial_eq)
attn_bias = None
if exists(self.edges_to_attn_bias) and exists(edges):
attn_bias = self.edges_to_attn_bias(edges)
attn_bias = repeat(attn_bias, 'b h i j -> (b x) h i j', x = axial_dim)
tie_dim = axial_dim if self.global_query_attn else None
out = self.attn(x, mask = mask, attn_bias = attn_bias, tie_dim = tie_dim)
out = rearrange(out, output_fold_eq, h = h, w = w)
return out
class TriangleMultiplicativeModule(nn.Module):
def __init__(
self,
*,
dim,
hidden_dim = None,
mix = 'ingoing'
):
super().__init__()
assert mix in {'ingoing', 'outgoing'}, 'mix must be either ingoing or outgoing'
hidden_dim = default(hidden_dim, dim)
self.norm = nn.LayerNorm(dim)
self.left_proj = nn.Linear(dim, hidden_dim)
self.right_proj = nn.Linear(dim, hidden_dim)
self.left_gate = nn.Linear(dim, hidden_dim)
self.right_gate = nn.Linear(dim, hidden_dim)
self.out_gate = nn.Linear(dim, hidden_dim)
# initialize all gating to be identity
for gate in (self.left_gate, self.right_gate, self.out_gate):
nn.init.constant_(gate.weight, 0.)
nn.init.constant_(gate.bias, 1.)
if mix == 'outgoing':
self.mix_einsum_eq = '... i k d, ... j k d -> ... i j d'
elif mix == 'ingoing':
self.mix_einsum_eq = '... k j d, ... k i d -> ... i j d'
self.to_out_norm = nn.LayerNorm(hidden_dim)
self.to_out = nn.Linear(hidden_dim, dim)
def forward(self, x, mask = None):
assert x.shape[1] == x.shape[2], 'feature map must be symmetrical'
if exists(mask):
mask = rearrange(mask, 'b i j -> b i j ()')
x = self.norm(x)
left = self.left_proj(x)
right = self.right_proj(x)
if exists(mask):
left = left * mask
right = right * mask
left_gate = self.left_gate(x).sigmoid()
right_gate = self.right_gate(x).sigmoid()
out_gate = self.out_gate(x).sigmoid()
left = left * left_gate
right = right * right_gate
out = einsum(self.mix_einsum_eq, left, right)
out = self.to_out_norm(out)
out = out * out_gate
return self.to_out(out)
# evoformer blocks
class OuterMean(nn.Module):
def __init__(
self,
dim,
hidden_dim = None,
eps = 1e-5
):
super().__init__()
self.eps = eps
self.norm = nn.LayerNorm(dim)
hidden_dim = default(hidden_dim, dim)
self.left_proj = nn.Linear(dim, hidden_dim)
self.right_proj = nn.Linear(dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, dim)
def forward(self, x, mask = None):
x = self.norm(x)
left = self.left_proj(x)
right = self.right_proj(x)
outer = rearrange(left, 'b m i d -> b m i () d') * rearrange(right, 'b m j d -> b m () j d')
if exists(mask):
# masked mean, if there are padding in the rows of the MSA
mask = rearrange(mask, 'b m i -> b m i () ()') * rearrange(mask, 'b m j -> b m () j ()')
outer = outer.masked_fill(~mask, 0.)
outer = outer.mean(dim = 1) / (mask.sum(dim = 1) + self.eps)
else:
outer = outer.mean(dim = 1)
return self.proj_out(outer)
class PairwiseAttentionBlock(nn.Module):
def __init__(
self,
dim,
seq_len,
heads,
dim_head,
dropout = 0.,
global_column_attn = False
):
super().__init__()
self.outer_mean = OuterMean(dim)
self.triangle_attention_outgoing = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = True, col_attn = False, accept_edges = True)
self.triangle_attention_ingoing = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = False, col_attn = True, accept_edges = True, global_query_attn = global_column_attn)
self.triangle_multiply_outgoing = TriangleMultiplicativeModule(dim = dim, mix = 'outgoing')
self.triangle_multiply_ingoing = TriangleMultiplicativeModule(dim = dim, mix = 'ingoing')
def forward(
self,
x,
mask = None,
msa_repr = None,
msa_mask = None
):
if exists(msa_repr):
x = x + self.outer_mean(msa_repr, mask = msa_mask)
x = self.triangle_multiply_outgoing(x, mask = mask) + x
x = self.triangle_multiply_ingoing(x, mask = mask) + x
x = self.triangle_attention_outgoing(x, edges = x, mask = mask) + x
x = self.triangle_attention_ingoing(x, edges = x, mask = mask) + x
return x
class MsaAttentionBlock(nn.Module):
def __init__(
self,
dim,
seq_len,
heads,
dim_head,
dropout = 0.
):
super().__init__()
self.row_attn = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = True, col_attn = False, accept_edges = True)
self.col_attn = AxialAttention(dim = dim, heads = heads, dim_head = dim_head, row_attn = False, col_attn = True)
def forward(
self,
x,
mask = None,
pairwise_repr = None
):
x = self.row_attn(x, mask = mask, edges = pairwise_repr) + x
x = self.col_attn(x, mask = mask) + x
return x
# main evoformer class
class EvoformerBlock(nn.Module):
def __init__(
self,
*,
dim,
seq_len,
heads,
dim_head,
attn_dropout,
ff_dropout,
global_column_attn = False
):
super().__init__()
self.layer = nn.ModuleList([
PairwiseAttentionBlock(dim = dim, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout, global_column_attn = global_column_attn),
FeedForward(dim = dim, dropout = ff_dropout),
MsaAttentionBlock(dim = dim, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout),
FeedForward(dim = dim, dropout = ff_dropout),
])
def forward(self, inputs):
x, m, mask, msa_mask = inputs
attn, ff, msa_attn, msa_ff = self.layer
# msa attention and transition
m = msa_attn(m, mask = msa_mask, pairwise_repr = x)
m = msa_ff(m) + m
# pairwise attention and transition
x = attn(x, mask = mask, msa_repr = m, msa_mask = msa_mask)
x = ff(x) + x
return x, m, mask, msa_mask
class Evoformer(nn.Module):
def __init__(
self,
*,
depth,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([EvoformerBlock(**kwargs) for _ in range(depth)])
def forward(
self,
x,
m,
mask = None,
msa_mask = None
):
inp = (x, m, mask, msa_mask)
x, m, *_ = checkpoint_sequential(self.layers, 1, inp)
return x, m
class Alphafold2(nn.Module):
def __init__(
self,
*,
dim,
max_seq_len = 2048,
depth = 6,
heads = 8,
dim_head = 64,
max_rel_dist = 32,
num_tokens = constants.NUM_AMINO_ACIDS,
num_embedds = constants.NUM_EMBEDDS_TR,
max_num_msas = constants.MAX_NUM_MSA,
max_num_templates = constants.MAX_NUM_TEMPLATES,
extra_msa_evoformer_layers = 4,
attn_dropout = 0.,
ff_dropout = 0.,
templates_dim = 32,
templates_embed_layers = 4,
templates_angles_feats_dim = 55,
predict_angles = False,
symmetrize_omega = False,
predict_coords = False, # structure module related keyword arguments below
structure_module_depth = 4,
structure_module_heads = 1,
structure_module_dim_head = 4,
disable_token_embed = False,
mlm_mask_prob = 0.15,
mlm_random_replace_token_prob = 0.1,
mlm_keep_token_same_prob = 0.1,
mlm_exclude_token_ids = (0,),
recycling_distance_buckets = 32
):
super().__init__()
self.dim = dim
# token embedding
self.token_emb = nn.Embedding(num_tokens + 1, dim) if not disable_token_embed else Always(0)
self.to_pairwise_repr = nn.Linear(dim, dim * 2)
self.disable_token_embed = disable_token_embed
# positional embedding
self.max_rel_dist = max_rel_dist
self.pos_emb = nn.Embedding(max_rel_dist * 2 + 1, dim)
# extra msa embedding
self.extra_msa_evoformer = Evoformer(
dim = dim,
depth = extra_msa_evoformer_layers,
seq_len = max_seq_len,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
global_column_attn = True
)
# template embedding
self.to_template_embed = nn.Linear(templates_dim, dim)
self.templates_embed_layers = templates_embed_layers
self.template_pairwise_embedder = PairwiseAttentionBlock(
dim = dim,
dim_head = dim_head,
heads = heads,
seq_len = max_seq_len
)
self.template_pointwise_attn = Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout
)
self.template_angle_mlp = nn.Sequential(
nn.Linear(templates_angles_feats_dim, dim),
nn.GELU(),
nn.Linear(dim, dim)
)
# projection for angles, if needed
self.predict_angles = predict_angles
self.symmetrize_omega = symmetrize_omega
if predict_angles:
self.to_prob_theta = nn.Linear(dim, constants.THETA_BUCKETS)
self.to_prob_phi = nn.Linear(dim, constants.PHI_BUCKETS)
self.to_prob_omega = nn.Linear(dim, constants.OMEGA_BUCKETS)
# custom embedding projection
self.embedd_project = nn.Linear(num_embedds, dim)
# main trunk modules
self.net = Evoformer(
dim = dim,
depth = depth,
seq_len = max_seq_len,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout
)
# MSA SSL MLM
self.mlm = MLM(
dim = dim,
num_tokens = num_tokens,
mask_id = num_tokens, # last token of embedding is used for masking
mask_prob = mlm_mask_prob,
keep_token_same_prob = mlm_keep_token_same_prob,
random_replace_token_prob = mlm_random_replace_token_prob,
exclude_token_ids = mlm_exclude_token_ids
)
# calculate distogram logits
self.to_distogram_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, constants.DISTOGRAM_BUCKETS)
)
# to coordinate output
self.predict_coords = predict_coords
self.structure_module_depth = structure_module_depth
self.msa_to_single_repr_dim = nn.Linear(dim, dim)
self.trunk_to_pairwise_repr_dim = nn.Linear(dim, dim)
with torch_default_dtype(torch.float32):
self.ipa_block = IPABlock(
dim = dim,
heads = structure_module_heads,
)
self.to_quaternion_update = nn.Linear(dim, 6)
init_zero_(self.ipa_block.attn.to_out)
self.to_points = nn.Linear(dim, 3)
# aux confidence measure
self.lddt_linear = nn.Linear(dim, 1)
# recycling params
self.recycling_msa_norm = nn.LayerNorm(dim)
self.recycling_pairwise_norm = nn.LayerNorm(dim)
self.recycling_distance_embed = nn.Embedding(recycling_distance_buckets, dim)
self.recycling_distance_buckets = recycling_distance_buckets
def forward(
self,
seq,
msa = None,
mask = None,
msa_mask = None,
extra_msa = None,
extra_msa_mask = None,
seq_index = None,
seq_embed = None,
msa_embed = None,
templates_feats = None,
templates_mask = None,
templates_angles = None,
embedds = None,
recyclables = None,
return_trunk = False,
return_confidence = False,
return_recyclables = False,
return_aux_logits = False
):
assert not (self.disable_token_embed and not exists(seq_embed)), 'sequence embedding must be supplied if one has disabled token embedding'
assert not (self.disable_token_embed and not exists(msa_embed)), 'msa embedding must be supplied if one has disabled token embedding'
# if MSA is not passed in, just use the sequence itself
if not exists(msa):
msa = rearrange(seq, 'b n -> b () n')
msa_mask = rearrange(mask, 'b n -> b () n')
# assert on sequence length
assert msa.shape[-1] == seq.shape[-1], 'sequence length of MSA and primary sequence must be the same'
# variables
b, n, device = *seq.shape[:2], seq.device
n_range = torch.arange(n, device = device)
# unpack (AA_code, atom_pos)
if isinstance(seq, (list, tuple)):
seq, seq_pos = seq
# embed main sequence
x = self.token_emb(seq)
if exists(seq_embed):
x += seq_embed
# mlm for MSAs
if self.training and exists(msa):
original_msa = msa
msa_mask = default(msa_mask, lambda: torch.ones_like(msa).bool())
noised_msa, replaced_msa_mask = self.mlm.noise(msa, msa_mask)
msa = noised_msa
# embed multiple sequence alignment (msa)
if exists(msa):
m = self.token_emb(msa)
if exists(msa_embed):
m = m + msa_embed
# add single representation to msa representation
m = m + rearrange(x, 'b n d -> b () n d')
# get msa_mask to all ones if none was passed
msa_mask = default(msa_mask, lambda: torch.ones_like(msa).bool())
elif exists(embedds):
m = self.embedd_project(embedds)
# get msa_mask to all ones if none was passed
msa_mask = default(msa_mask, lambda: torch.ones_like(embedds[..., -1]).bool())
else:
raise Error('either MSA or embeds must be given')
# derive pairwise representation
x_left, x_right = self.to_pairwise_repr(x).chunk(2, dim = -1)
x = rearrange(x_left, 'b i d -> b i () d') + rearrange(x_right, 'b j d-> b () j d') # create pair-wise residue embeds
x_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j') if exists(mask) else None
# add relative positional embedding
seq_index = default(seq_index, lambda: torch.arange(n, device = device))
seq_rel_dist = rearrange(seq_index, 'i -> () i ()') - rearrange(seq_index, 'j -> () () j')
seq_rel_dist = seq_rel_dist.clamp(-self.max_rel_dist, self.max_rel_dist) + self.max_rel_dist
rel_pos_emb = self.pos_emb(seq_rel_dist)
x = x + rel_pos_emb
# add recyclables, if present
if exists(recyclables):
m[:, 0] = m[:, 0] + self.recycling_msa_norm(recyclables.single_msa_repr_row)
x = x + self.recycling_pairwise_norm(recyclables.pairwise_repr)
distances = torch.cdist(recyclables.coords, recyclables.coords, p=2)
boundaries = torch.linspace(2, 20, steps = self.recycling_distance_buckets, device = device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
distance_embed = self.recycling_distance_embed(discretized_distances)
x = x + distance_embed
# embed templates, if present
if exists(templates_feats):
_, num_templates, *_ = templates_feats.shape
# embed template
t = self.to_template_embed(templates_feats)
t_mask_crossed = rearrange(templates_mask, 'b t i -> b t i ()') * rearrange(templates_mask, 'b t j -> b t () j')
t = rearrange(t, 'b t ... -> (b t) ...')
t_mask_crossed = rearrange(t_mask_crossed, 'b t ... -> (b t) ...')
for _ in range(self.templates_embed_layers):
t = self.template_pairwise_embedder(t, mask = t_mask_crossed)
t = rearrange(t, '(b t) ... -> b t ...', t = num_templates)
t_mask_crossed = rearrange(t_mask_crossed, '(b t) ... -> b t ...', t = num_templates)
# template pos emb
x_point = rearrange(x, 'b i j d -> (b i j) () d')
t_point = rearrange(t, 'b t i j d -> (b i j) t d')
x_mask_point = rearrange(x_mask, 'b i j -> (b i j) ()')
t_mask_point = rearrange(t_mask_crossed, 'b t i j -> (b i j) t')
template_pooled = self.template_pointwise_attn(
x_point,
context = t_point,
mask = x_mask_point,
context_mask = t_mask_point
)
template_pooled_mask = rearrange(t_mask_point.sum(dim = -1) > 0, 'b -> b () ()')
template_pooled = template_pooled * template_pooled_mask
template_pooled = rearrange(template_pooled, '(b i j) () d -> b i j d', i = n, j = n)
x = x + template_pooled
# add template angle features to MSAs by passing through MLP and then concat
if exists(templates_angles):
t_angle_feats = self.template_angle_mlp(templates_angles)
m = torch.cat((m, t_angle_feats), dim = 1)
msa_mask = torch.cat((msa_mask, templates_mask), dim = 1)
# embed extra msa, if present
if exists(extra_msa):
extra_m = self.token_emb(msa)
extra_msa_mask = default(extra_msa_mask, torch.ones_like(extra_m).bool())
x, extra_m = self.extra_msa_evoformer(
x,
extra_m,
mask = x_mask,
msa_mask = extra_msa_mask
)
# trunk
x, m = self.net(
x,
m,
mask = x_mask,
msa_mask = msa_mask
)
# ready output container
ret = ReturnValues()
# calculate theta and phi before symmetrization
if self.predict_angles:
ret.theta_logits = self.to_prob_theta(x)
ret.phi_logits = self.to_prob_phi(x)
# embeds to distogram
trunk_embeds = (x + rearrange(x, 'b i j d -> b j i d')) * 0.5 # symmetrize
distance_pred = self.to_distogram_logits(trunk_embeds)
ret.distance = distance_pred
# calculate mlm loss, if training
msa_mlm_loss = None
if self.training and exists(msa):
num_msa = original_msa.shape[1]
msa_mlm_loss = self.mlm(m[:, :num_msa], original_msa, replaced_msa_mask)
# determine angles, if specified
if self.predict_angles:
omega_input = trunk_embeds if self.symmetrize_omega else x
ret.omega_logits = self.to_prob_omega(omega_input)
if not self.predict_coords or return_trunk:
return ret
# derive single and pairwise embeddings for structural refinement
single_msa_repr_row = m[:, 0]
single_repr = self.msa_to_single_repr_dim(single_msa_repr_row)
pairwise_repr = self.trunk_to_pairwise_repr_dim(x)
# prepare float32 precision for equivariance
original_dtype = single_repr.dtype
single_repr, pairwise_repr = map(lambda t: t.float(), (single_repr, pairwise_repr))
# iterative refinement with equivariant transformer in high precision
with torch_default_dtype(torch.float32):
quaternions = torch.tensor([1., 0., 0., 0.], device = device) # initial rotations
quaternions = repeat(quaternions, 'd -> b n d', b = b, n = n)
translations = torch.zeros((b, n, 3), device = device)
# go through the layers and apply invariant point attention and feedforward
for i in range(self.structure_module_depth):
is_last = i == (self.structure_module_depth - 1)
# the detach comes from
# https://github.com/deepmind/alphafold/blob/0bab1bf84d9d887aba5cfb6d09af1e8c3ecbc408/alphafold/model/folding.py#L383
rotations = quaternion_to_matrix(quaternions)
if not is_last:
rotations = rotations.detach()
single_repr = self.ipa_block(
single_repr,
mask = mask,
pairwise_repr = pairwise_repr,
rotations = rotations,
translations = translations
)
# update quaternion and translation
quaternion_update, translation_update = self.to_quaternion_update(single_repr).chunk(2, dim = -1)
quaternion_update = F.pad(quaternion_update, (1, 0), value = 1.)
quaternions = quaternion_multiply(quaternions, quaternion_update)
translations = translations + einsum('b n c, b n c r -> b n r', translation_update, rotations)
points_local = self.to_points(single_repr)
rotations = quaternion_to_matrix(quaternions)
coords = einsum('b n c, b n c d -> b n d', points_local, rotations) + translations
coords.type(original_dtype)
if return_recyclables:
coords, single_msa_repr_row, pairwise_repr = map(torch.detach, (coords, single_msa_repr_row, pairwise_repr))
ret.recyclables = Recyclables(coords, single_msa_repr_row, pairwise_repr)
if return_aux_logits:
return coords, ret
if return_confidence:
return coords, self.lddt_linear(single_repr.float())
return coords
| alphafold2-main | alphafold2_pytorch/alphafold2.py |
import torch
import torch.nn.functional as F
from torch import nn
from alphafold2_pytorch.utils import get_msa_embedd, get_esm_embedd, get_prottran_embedd, exists
from alphafold2_pytorch.constants import MSA_MODEL_PATH, MSA_EMBED_DIM, ESM_MODEL_PATH, ESM_EMBED_DIM, PROTTRAN_EMBED_DIM
from einops import rearrange
class ProtTranEmbedWrapper(nn.Module):
def __init__(self, *, alphafold2):
super().__init__()
from transformers import AutoTokenizer, AutoModel
self.alphafold2 = alphafold2
self.project_embed = nn.Linear(PROTTRAN_EMBED_DIM, alphafold2.dim)
self.tokenizer = AutoTokenizer.from_pretrained('Rostlab/prot_bert', do_lower_case=False)
self.model = AutoModel.from_pretrained('Rostlab/prot_bert')
def forward(self, seq, msa, msa_mask = None, **kwargs):
device = seq.device
num_msa = msa.shape[1]
msa_flat = rearrange(msa, 'b m n -> (b m) n')
seq_embed = get_prottran_embedd(seq, self.model, self.tokenizer, device = device)
msa_embed = get_prottran_embedd(msa_flat, self.model, self.tokenizer, device = device)
seq_embed, msa_embed = map(self.project_embed, (seq_embed, msa_embed))
msa_embed = rearrange(msa_embed, '(b m) n d -> b m n d', m = num_msa)
return self.alphafold2(seq, msa, seq_embed = seq_embed, msa_embed = msa_embed, msa_mask = msa_mask, **kwargs)
class MSAEmbedWrapper(nn.Module):
def __init__(self, *, alphafold2):
super().__init__()
self.alphafold2 = alphafold2
model, alphabet = torch.hub.load(*MSA_MODEL_PATH)
batch_converter = alphabet.get_batch_converter()
self.model = model
self.batch_converter = batch_converter
self.project_embed = nn.Linear(MSA_EMBED_DIM, alphafold2.dim) if MSA_EMBED_DIM != alphafold2.dim else nn.Identity()
def forward(self, seq, msa, msa_mask = None, **kwargs):
assert seq.shape[-1] == msa.shape[-1], 'sequence and msa must have the same length if you wish to use MSA transformer embeddings'
model, batch_converter, device = self.model, self.batch_converter, seq.device
seq_and_msa = torch.cat((seq.unsqueeze(1), msa), dim = 1)
if exists(msa_mask):
# in the event that there are rows in the MSA that are completely padding
# process each batch element individually, so that padding isn't processed
# with row-tied attention
num_msa = msa_mask.any(dim = -1).sum(dim = -1).tolist()
seq_and_msa_list = seq_and_msa.unbind(dim = 0)
num_rows = seq_and_msa.shape[1]
embeds = []
for num, batch_el in zip(num_msa, seq_and_msa_list):
batch_el = rearrange(batch_el, '... -> () ...')
batch_el = batch_el[:, :num]
embed = get_msa_embedd(batch_el, model, batch_converter, device = device)
embed = F.pad(embed, (0, 0, 0, 0, 0, num_rows - num), value = 0.)
embeds.append(embed)
embeds = torch.cat(embeds, dim = 0)
else:
embeds = get_msa_embedd(seq_and_msa, model, batch_converter, device = device)
embeds = self.project_embed(embeds)
seq_embed, msa_embed = embeds[:, 0], embeds[:, 1:]
return self.alphafold2(seq, msa, seq_embed = seq_embed, msa_embed = msa_embed, msa_mask = msa_mask, **kwargs)
class ESMEmbedWrapper(nn.Module):
def __init__(self, *, alphafold2):
super().__init__()
self.alphafold2 = alphafold2
model, alphabet = torch.hub.load(*ESM_MODEL_PATH)
batch_converter = alphabet.get_batch_converter()
self.model = model
self.batch_converter = batch_converter
self.project_embed = nn.Linear(ESM_EMBED_DIM, alphafold2.dim) if ESM_EMBED_DIM != alphafold2.dim else nn.Identity()
def forward(self, seq, msa=None, **kwargs):
model, batch_converter, device = self.model, self.batch_converter, seq.device
seq_embeds = get_esm_embedd(seq, model, batch_converter, device = device)
seq_embeds = self.project_embed(seq_embeds)
if msa is not None:
flat_msa = rearrange(msa, 'b m n -> (b m) n')
msa_embeds = get_esm_embedd(flat_msa, model, batch_converter, device = device)
msa_embeds = rearrange(msa_embeds, '(b m) n d -> b m n d')
msa_embeds = self.project_embed(msa_embeds)
else:
msa_embeds = None
return self.alphafold2(seq, msa, seq_embed = seq_embeds, msa_embed = msa_embeds, **kwargs)
| alphafold2-main | alphafold2_pytorch/embeds.py |
from math import log, sqrt, pi
import torch
from torch import nn, einsum
from einops import rearrange, repeat
# rotary embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(x, sinu_pos):
sin, cos = map(lambda t: rearrange(t, 'b ... -> b () ...'), sinu_pos)
rot_dim = sin.shape[-1]
x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
x = x * cos + rotate_every_two(x) * sin
return torch.cat((x, x_pass), dim = -1)
# positional embeddings
class DepthWiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True, groups = None):
super().__init__()
groups = default(groups, dim_in)
self.net = nn.Sequential(
nn.Conv1d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = groups, stride = stride, bias = bias),
nn.Conv1d(dim_in, dim_out, 1, bias = bias)
)
def forward(self, x):
return self.net(x)
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, n, device):
seq = torch.arange(n, device = device).type_as(self.inv_freq)
freqs = einsum('i , j -> i j', seq, self.inv_freq)
freqs = repeat(freqs, 'i j -> () i (j r)', r = 2)
return [freqs.sin(), freqs.cos()]
class AxialRotaryEmbedding(nn.Module):
def __init__(self, dim, max_freq = 10):
super().__init__()
self.dim = dim // 2
inv_freq = 1. / (10000 ** (torch.arange(0, self.dim, 2).float() / self.dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, n, device):
seq = torch.arange(n, device = device).type_as(self.inv_freq)
x = einsum('n, d -> n d', seq, self.inv_freq)
y = einsum('n, d -> n d', seq, self.inv_freq)
x_sinu = repeat(x, 'i d -> i j d', j = n)
y_sinu = repeat(y, 'j d -> i j d', i = n)
sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)
cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)
sin, cos = map(lambda t: repeat(t, 'i j d -> () (i j) (d r)', r = 2), (sin, cos))
return [sin, cos]
| alphafold2-main | alphafold2_pytorch/rotary.py |
import torch
import numpy as np
from alphafold2_pytorch.utils import *
def test_mat_to_masked():
# nodes
x = torch.ones(19, 3)
x_mask = torch.randn(19) > -0.3
# edges
edges_mat = torch.randn(19, 19) < 1
edges = torch.nonzero(edges_mat, as_tuple=False).t()
# test normal edges / nodes
cleaned = mat_input_to_masked(x, x_mask, edges=edges)
cleaned_2 = mat_input_to_masked(x, x_mask, edges_mat=edges_mat)
# test batch dimension
x_ = torch.stack([x]*2, dim=0)
x_mask_ = torch.stack([x_mask]*2, dim=0)
edges_mat_ = torch.stack([edges_mat]*2, dim=0)
cleaned_3 = mat_input_to_masked(x_, x_mask_, edges_mat=edges_mat_)
assert True
def test_center_distogram_median():
distogram = torch.randn(1, 128, 128, 37)
distances, weights = center_distogram_torch(distogram, center = 'median')
assert True
def test_masks():
seqs = torch.randint(20, size=(2, 50))
# cloud point mask - can't test bc it needs sidechainnet installed
# cloud_masks = scn_cloud_mask(seqs, boolean=True)
# atom masking
N_mask, CA_mask, C_mask = scn_backbone_mask(seqs, boolean = True)
assert True
def test_mds_and_mirrors():
distogram = torch.randn(2, 32*3, 32*3, 37)
distances, weights = center_distogram_torch(distogram)
# set out some points (due to padding)
paddings = [7,0]
for i,pad in enumerate(paddings):
if pad > 0:
weights[i, -pad:, -pad:] = 0.
# masks
masker = torch.arange(distogram.shape[1]) % 3
N_mask = (masker==0).bool()
CA_mask = (masker==1).bool()
coords_3d, _ = MDScaling(distances,
weights = weights,
iters = 5,
fix_mirror = 2,
N_mask = N_mask,
CA_mask = CA_mask,
C_mask = None
)
assert list(coords_3d.shape) == [2, 3, 32*3], 'coordinates must be of the right shape after MDS'
def test_sidechain_container():
seqs = torch.tensor([[0]*137, [3]*137]).long()
bb = torch.randn(2, 137*4, 3)
atom_mask = torch.tensor( [1]*4 + [0]*(14-4) )
proto_3d = sidechain_container(seqs, bb, atom_mask=atom_mask)
assert list(proto_3d.shape) == [2, 137, 14, 3]
def test_distmat_loss():
a = torch.randn(2, 137, 14, 3)
b = torch.randn(2, 137, 14, 3)
loss = distmat_loss_torch(a, b, p=2, q=2) # mse on distmat
assert True
def test_lddt():
a = torch.randn(2, 137, 14, 3)
b = torch.randn(2, 137, 14, 3)
cloud_mask = torch.ones(a.shape[:-1]).bool()
lddt_result = lddt_ca_torch(a, b, cloud_mask)
assert list(lddt_result.shape) == [2, 137]
def test_kabsch():
a = torch.randn(3, 8)
b = torch.randn(3, 8)
a_, b_ = Kabsch(a,b)
assert a.shape == a_.shape
def test_tmscore():
a = torch.randn(2, 3, 8)
b = torch.randn(2, 3, 8)
out = TMscore(a, b)
assert True
def test_gdt():
a = torch.randn(1, 3, 8)
b = torch.randn(1, 3, 8)
GDT(a, b, weights = 1)
assert True
| alphafold2-main | tests/test_utils.py |
import torch
from torch import nn
from einops import repeat
from alphafold2_pytorch.alphafold2 import Alphafold2
from alphafold2_pytorch.utils import *
def test_main():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 128))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_no_msa():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 128))
mask = torch.ones_like(seq).bool()
distogram = model(
seq,
mask = mask
)
assert True
def test_anglegrams():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_angles = True
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 128))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
ret = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_templates():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
templates_dim = 32,
templates_angles_feats_dim = 32
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
templates_feats = torch.randn(2, 3, 16, 16, 32)
templates_angles = torch.randn(2, 3, 16, 32)
templates_mask = torch.ones(2, 3, 16).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
templates_feats = templates_feats,
templates_angles = templates_angles,
templates_mask = templates_mask
)
assert True
def test_extra_msa():
model = Alphafold2(
dim = 128,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True
)
seq = torch.randint(0, 21, (2, 4))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 4))
msa_mask = torch.ones_like(msa).bool()
extra_msa = torch.randint(0, 21, (2, 5, 4))
extra_msa_mask = torch.ones_like(extra_msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
extra_msa = extra_msa,
extra_msa_mask = extra_msa_mask
)
assert True
def test_embeddings():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
embedds = torch.randn(2, 1, 16, 1280)
# without mask
distogram = model(
seq,
mask = mask,
embedds = embedds,
msa_mask = None
)
# with mask
embedds_mask = torch.ones_like(embedds[..., -1]).bool()
distogram = model(
seq,
mask = mask,
embedds = embedds,
msa_mask = embedds_mask
)
assert True
def test_coords():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_coords_backbone_with_cbeta():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_coords_all_atoms():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_mds():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 16, 3), 'must output coordinates'
def test_edges_to_equivariant_network():
model = Alphafold2(
dim = 32,
depth = 1,
heads = 2,
dim_head = 32,
predict_coords = True,
predict_angles = True
)
seq = torch.randint(0, 21, (2, 32))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
coords, confidences = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
return_confidence = True
)
assert True, 'should run without errors'
def test_coords_backwards():
model = Alphafold2(
dim = 256,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
coords.sum().backward()
assert True, 'must be able to go backwards through MDS and center distogram'
def test_confidence():
model = Alphafold2(
dim = 256,
depth = 1,
heads = 2,
dim_head = 32,
predict_coords = True
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords, confidences = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
return_confidence = True
)
assert coords.shape[:-1] == confidences.shape[:-1]
def test_recycling():
model = Alphafold2(
dim = 128,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
)
seq = torch.randint(0, 21, (2, 4))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 4))
msa_mask = torch.ones_like(msa).bool()
extra_msa = torch.randint(0, 21, (2, 5, 4))
extra_msa_mask = torch.ones_like(extra_msa).bool()
coords, ret = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
extra_msa = extra_msa,
extra_msa_mask = extra_msa_mask,
return_aux_logits = True,
return_recyclables = True
)
coords, ret = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
extra_msa = extra_msa,
extra_msa_mask = extra_msa_mask,
recyclables = ret.recyclables,
return_aux_logits = True,
return_recyclables = True
)
assert True
| alphafold2-main | tests/test_attention.py |
alphafold2-main | training_scripts/deepspeed.py |
|
alphafold2-main | training_scripts/lightning.py |
|
alphafold2-main | training_scripts/datasets/__init__.py |
|
import pickle
import string
from argparse import ArgumentParser
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import numpy.linalg as LA
import prody
import torch
from Bio import SeqIO
from einops import repeat
from sidechainnet.utils.measure import get_seq_coords_and_angles
from sidechainnet.utils.sequence import ProteinVocabulary
from torch.utils.data import DataLoader, Dataset
from alphafold2_pytorch.constants import DISTOGRAM_BUCKETS
from tqdm import tqdm
try:
import pytorch_lightning as pl
LightningDataModule = pl.LightningDataModule
except ImportError:
LightningDataModule = object
CACHE_PATH = Path("~/.cache/alphafold2_pytorch").expanduser()
DATA_DIR = CACHE_PATH / "trrosetta" / "trrosetta"
URL = "http://s3.amazonaws.com/proteindata/data_pytorch/trrosetta.tar.gz"
REMOVE_KEYS = dict.fromkeys(string.ascii_lowercase)
REMOVE_KEYS["."] = None
REMOVE_KEYS["*"] = None
translation = str.maketrans(REMOVE_KEYS)
DEFAULT_VOCAB = ProteinVocabulary()
def default_tokenize(seq: str) -> List[int]:
return [DEFAULT_VOCAB[ch] for ch in seq]
def read_fasta(filename: str) -> List[Tuple[str, str]]:
def remove_insertions(sequence: str) -> str:
return sequence.translate(translation)
return [
(record.description, remove_insertions(str(record.seq)))
for record in SeqIO.parse(filename, "fasta")
]
def read_pdb(pdb: str):
ag = prody.parsePDB(pdb)
for chain in ag.iterChains():
angles, coords, seq = get_seq_coords_and_angles(chain)
return angles, coords, seq
def download_file(url, filename=None, root=CACHE_PATH):
import os
import urllib
root.mkdir(exist_ok=True, parents=True)
filename = filename or os.path.basename(url)
download_target = root / filename
download_target_tmp = root / f"tmp.{filename}"
if download_target.exists() and not download_target.is_file():
raise RuntimeError(f"{download_target} exists and is not a regular file")
if download_target.is_file():
return download_target
with urllib.request.urlopen(url) as source, open(
download_target_tmp, "wb"
) as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
download_target_tmp.rename(download_target)
return download_target
def get_or_download(url: str = URL):
"""
download and extract trrosetta data
"""
import tarfile
file = CACHE_PATH / "trrosetta.tar.gz"
dir = CACHE_PATH / "trrosetta"
dir_temp = CACHE_PATH / "trrosetta_tmp"
if dir.is_dir():
print(f"Load cached data from {dir}")
return dir
if not file.is_file():
print(f"Cache not found, download from {url} to {file}")
download_file(url)
print(f"Extract data from {file} to {dir}")
with tarfile.open(file, "r:gz") as tar:
tar.extractall(dir_temp)
dir_temp.rename(dir)
return dir
def pad_sequences(sequences, constant_value=0, dtype=None) -> np.ndarray:
batch_size = len(sequences)
shape = [batch_size] + np.max([seq.shape for seq in sequences], 0).tolist()
if dtype is None:
dtype = sequences[0].dtype
if isinstance(sequences[0], np.ndarray):
array = np.full(shape, constant_value, dtype=dtype)
elif isinstance(sequences[0], torch.Tensor):
array = torch.full(shape, constant_value, dtype=dtype)
for arr, seq in zip(array, sequences):
arrslice = tuple(slice(dim) for dim in seq.shape)
arr[arrslice] = seq
return array
class TrRosettaDataset(Dataset):
def __init__(
self,
data_dir: Path,
list_path: Path,
tokenize: Callable[[str], List[int]],
seq_pad_value: int = 20,
random_sample_msa: bool = False,
max_seq_len: int = 300,
max_msa_num: int = 300,
overwrite: bool = False,
):
self.data_dir = data_dir
self.file_list: List[Path] = self.read_file_list(data_dir, list_path)
self.tokenize = tokenize
self.seq_pad_value = seq_pad_value
self.random_sample_msa = random_sample_msa
self.max_seq_len = max_seq_len
self.max_msa_num = max_msa_num
self.overwrite = overwrite
def __len__(self) -> int:
return len(self.file_list)
def read_file_list(self, data_dir: Path, list_path: Path):
file_glob = (data_dir / "npz").glob("*.npz")
files = set(list_path.read_text().split())
if len(files) == 0:
raise ValueError("Passed an empty split file set")
file_list = [f for f in file_glob if f.name in files]
if len(file_list) != len(files):
num_missing = len(files) - len(file_list)
raise FileNotFoundError(
f"{num_missing} specified split files not found in directory"
)
return file_list
def has_cache(self, index):
if self.overwrite:
return False
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
return path.is_file()
def write_cache(self, index, data):
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
path.parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as file:
pickle.dump(data, file)
def read_cache(self, index):
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
with open(path, "rb") as file:
return pickle.load(file)
def __getitem__(self, index):
if self.has_cache(index):
item = self.read_cache(index)
else:
id = self.file_list[index].stem
pdb_path = self.data_dir / "pdb" / f"{id}.pdb"
msa_path = self.data_dir / "a3m" / f"{id}.a3m"
_, msa = zip(*read_fasta(str(msa_path)))
msa = np.array([np.array(list(seq)) for seq in msa])
angles, coords, seq = read_pdb(str(pdb_path))
seq = np.array(list(seq))
coords = coords.reshape((coords.shape[0] // 14, 14, 3))
dist = self.get_bucketed_distance(seq, coords, subset="ca")
item = {
"id": id,
"seq": seq,
"msa": msa,
"coords": coords,
"angles": angles,
"dist": dist
}
self.write_cache(index, item)
item["msa"] = self.sample(item["msa"], self.max_msa_num, self.random_sample_msa)
item = self.crop(item, self.max_seq_len)
return item
def calc_cb(self, coord):
N = coord[0]
CA = coord[1]
C = coord[2]
b = CA - N
c = C - CA
a = np.cross(b, c)
CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
return CB
def get_bucketed_distance(
self, seq, coords, subset="ca", start=2, bins=DISTOGRAM_BUCKETS-1, step=0.5
):
assert subset in ("ca", "cb")
if subset == "ca":
coords = coords[:, 1, :]
elif subset == "cb":
cb_coords = []
for res, coord in zip(seq, coords):
if res == "G":
cb = self.calc_cb(coord)
cb_coords.append(cb)
else:
cb_coords.append(coord[4, :])
coords = np.array(cb_coords)
vcs = coords + np.zeros([coords.shape[0]] + list(coords.shape))
vcs = vcs - np.swapaxes(vcs, 0, 1)
distance_map = LA.norm(vcs, axis=2)
mask = np.ones(distance_map.shape) - np.eye(distance_map.shape[0])
low_pos = np.where(distance_map < start)
high_pos = np.where(distance_map >= start + step * bins)
mask[low_pos] = 0
distance_map = (distance_map - start) // step
distance_map[high_pos] = bins
dist = (distance_map * mask).astype(int)
return dist
def crop(self, item, max_seq_len: int):
seq_len = len(item["seq"])
if seq_len <= max_seq_len or max_seq_len <= 0:
return item
start = 0
end = start + max_seq_len
item["seq"] = item["seq"][start:end]
item["msa"] = item["msa"][:, start:end]
item["coords"] = item["coords"][start:end]
item["angles"] = item["angles"][start:end]
item["dist"] = item["dist"][start:end, start:end]
return item
def sample(self, msa, max_msa_num: int, random: bool):
num_msa, seq_len = len(msa), len(msa[0])
if num_msa <= max_msa_num or max_msa_num <= 0:
return msa
if random:
num_sample = max_msa_num - 1
indices = np.random.choice(num_msa - 1, size=num_sample, replace=False) + 1
indices = np.pad(indices, [1, 0], "constant")
return msa[indices]
else:
return msa[:max_msa_num]
def collate_fn(self, batch):
b = len(batch)
batch = {k: [item[k] for item in batch] for k in batch[0]}
id = batch["id"]
seq = batch["seq"]
msa = batch["msa"]
coords = batch["coords"]
angles = batch["angles"]
dist = batch["dist"]
lengths = torch.LongTensor([len(x[0]) for x in msa])
depths = torch.LongTensor([len(x) for x in msa])
max_len = lengths.max()
max_depth = depths.max()
seq = pad_sequences(
[torch.LongTensor(self.tokenize(seq_)) for seq_ in seq], self.seq_pad_value,
)
msa = pad_sequences(
[torch.LongTensor([self.tokenize(seq_) for seq_ in msa_]) for msa_ in msa],
self.seq_pad_value,
)
coords = pad_sequences([torch.FloatTensor(x) for x in coords], 0.0)
angles = pad_sequences([torch.FloatTensor(x) for x in angles], 0.0)
dist = pad_sequences([torch.LongTensor(x) for x in dist], -100)
mask = repeat(torch.arange(max_len), "l -> b l", b=b) < repeat(
lengths, "b -> b l", l=max_len
)
msa_seq_mask = repeat(
torch.arange(max_len), "l -> b s l", b=b, s=max_depth
) < repeat(lengths, "b -> b s l", s=max_depth, l=max_len)
msa_depth_mask = repeat(
torch.arange(max_depth), "s -> b s l", b=b, l=max_len
) < repeat(depths, "b -> b s l", s=max_depth, l=max_len)
msa_mask = msa_seq_mask & msa_depth_mask
return {
"id": id,
"seq": seq,
"msa": msa,
"coords": coords,
"angles": angles,
"mask": mask,
"msa_mask": msa_mask,
"dist": dist,
}
class TrRosettaDataModule(LightningDataModule):
@staticmethod
def add_data_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--data_dir", type=str, default=str(DATA_DIR))
parser.add_argument("--train_batch_size", type=int, default=1)
parser.add_argument("--eval_batch_size", type=int, default=1)
parser.add_argument("--test_batch_size", type=int, default=1)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--train_max_seq_len", type=int, default=256)
parser.add_argument("--eval_max_seq_len", type=int, default=256)
parser.add_argument("--test_max_seq_len", type=int, default=-1)
parser.add_argument("--train_max_msa_num", type=int, default=256)
parser.add_argument("--eval_max_msa_num", type=int, default=256)
parser.add_argument("--test_max_msa_num", type=int, default=1000)
parser.add_argument("--overwrite", dest="overwrite", action="store_true")
return parser
def __init__(
self,
data_dir: str = DATA_DIR,
train_batch_size: int = 1,
eval_batch_size: int = 1,
test_batch_size: int = 1,
num_workers: int = 0,
train_max_seq_len: int = 256,
eval_max_seq_len: int = 256,
test_max_seq_len: int = -1,
train_max_msa_num: int = 32,
eval_max_msa_num: int = 32,
test_max_msa_num: int = 64,
tokenize: Callable[[str], List[int]] = default_tokenize,
seq_pad_value: int = 20,
overwrite: bool = False,
**kwargs,
):
super(TrRosettaDataModule, self).__init__()
self.data_dir = Path(data_dir).expanduser().resolve()
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.train_max_seq_len = train_max_seq_len
self.eval_max_seq_len = eval_max_seq_len
self.test_max_seq_len = test_max_seq_len
self.train_max_msa_num = train_max_msa_num
self.eval_max_msa_num = eval_max_msa_num
self.test_max_msa_num = test_max_msa_num
self.tokenize = tokenize
self.seq_pad_value = seq_pad_value
self.overwrite = overwrite
get_or_download()
def setup(self, stage: Optional[str] = None):
self.train = TrRosettaDataset(
self.data_dir,
self.data_dir / "train_files.txt",
self.tokenize,
self.seq_pad_value,
random_sample_msa=True,
max_seq_len=self.train_max_seq_len,
max_msa_num=self.train_max_msa_num,
overwrite=self.overwrite,
)
self.val = TrRosettaDataset(
self.data_dir,
self.data_dir / "valid_files.txt",
self.tokenize,
self.seq_pad_value,
random_sample_msa=False,
max_seq_len=self.eval_max_seq_len,
max_msa_num=self.eval_max_msa_num,
overwrite=self.overwrite,
)
self.test = TrRosettaDataset(
self.data_dir,
self.data_dir / "valid_files.txt",
self.tokenize,
self.seq_pad_value,
random_sample_msa=False,
max_seq_len=self.test_max_seq_len,
max_msa_num=self.test_max_msa_num,
overwrite=self.overwrite,
)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(
self.train,
batch_size=self.train_batch_size,
shuffle=True,
collate_fn=self.train.collate_fn,
num_workers=self.num_workers,
)
def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.val,
batch_size=self.eval_batch_size,
shuffle=False,
collate_fn=self.val.collate_fn,
num_workers=self.num_workers,
)
def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.test,
batch_size=self.test_batch_size,
shuffle=False,
collate_fn=self.test.collate_fn,
num_workers=self.num_workers,
)
def test():
dm = TrRosettaDataModule(train_batch_size=1, num_workers=4)
dm.setup()
for batch in dm.train_dataloader():
print("id", batch["id"])
print("seq", batch["seq"].shape, batch["seq"])
print("msa", batch["msa"].shape, batch["msa"][..., :20])
print("msa", batch["msa"].shape, batch["msa"][..., -20:])
print("coords", batch["coords"].shape)
print("angles", batch["angles"].shape)
print("mask", batch["mask"].shape)
print("msa_mask", batch["msa_mask"].shape)
print("dist", batch["dist"].shape, batch["dist"])
break
if __name__ == "__main__":
test()
| alphafold2-main | training_scripts/datasets/trrosetta.py |
# will use FastRelax routine to refine structure
import os
import json
import warnings
# science
import numpy as np
# pyrosetta installation instructs in readme
try:
import pyrosetta
except ModuleNotFoundError:
msg = "Unable to find an existing installation of the PyRosetta module. " +\
"Functions involving this module such as the FastRelax pipeline " +\
"will not work."
warnings.warn(msg) # no pyRosetta was found
#####################
### ROSETTA STUFF ###
#####################
def pdb2rosetta(route):
""" Takes pdb file route(s) as input and returns rosetta pose(s).
Input:
* route: list or string.
Output: list of 1 or many according to input
"""
if isinstance(route, str):
return [pyrosetta.io.pose_from_pdb(route)]
else:
return list(pyrosetta.io.poses_from_files(route))
def rosetta2pdb(pose, route, verbose=True):
""" Takes pose(s) as input and saves pdb(s) to disk.
Input:
* pose: list or string. rosetta poses object(s).
* route: list or string. destin filenames to be written.
* verbose: bool. warns if lengths dont match and @ every write.
Inspo:
* https://www.rosettacommons.org/demos/latest/tutorials/input_and_output/input_and_output#controlling-output_common-structure-output-files_pdb-file
* https://graylab.jhu.edu/PyRosetta.documentation/pyrosetta.rosetta.core.io.pdb.html#pyrosetta.rosetta.core.io.pdb.dump_pdb
"""
# convert to list
pose = [pose] if isinstance(pose, str) else pose
route = [route] if isinstance(route, str) else route
# check lengths and warn if necessary
if verbose and ( len(pose) != len(route) ):
print("Length of pose and route are not the same. Will stop at the minimum.")
# convert and save
for i,pos in enumerate(pose):
pyrosetta.rosetta.core.io.pdb.dump_pdb(pos, route[i])
if verbose:
print("Saved structure @ "+route)
return
def run_fast_relax(config_route, pdb_route=None, pose=None):
""" Runs the Fast-Relax pipeline.
* config_route: route to json file with config
* pose: rosetta pose to run the pipeline on
Output: rosetta pose
"""
# load rosetta pose - if string or list is passed, convert to pose + recall
if isinstance(pdb_route, str):
pose = pdb2rosetta(pdb_route)
return run_fast_relax(config, pose=pose)
elif isinstance(pdb_route, list):
return [run_fast_relax(config, pdb_route=pdb) for pdb in pdb_route]
# load config:
config = json.load(config_route)
# run fast relax pipeline - examples:
# https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.02-Packing-design-and-regional-relax.ipynb#scrollTo=PYr025Rn1Q8i
# https://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.03-Design-with-a-resfile-and-relax.ipynb
# https://faculty.washington.edu/dimaio/files/demo2.py
raise NotImplementedError("Last step. Not implemented yet.")
| alphafold2-main | scripts/refinement.py |
from setuptools import setup, find_packages
exec(open('denoising_diffusion_pytorch/version.py').read())
setup(
name = 'denoising-diffusion-pytorch',
packages = find_packages(),
version = __version__,
license='MIT',
description = 'Denoising Diffusion Probabilistic Models - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/denoising-diffusion-pytorch',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'generative models'
],
install_requires=[
'accelerate',
'einops',
'ema-pytorch',
'numpy',
'pillow',
'pytorch-fid',
'torch',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| denoising-diffusion-pytorch-main | setup.py |
from math import sqrt
from random import random
import torch
from torch import nn, einsum
import torch.nn.functional as F
from tqdm import tqdm
from einops import rearrange, repeat, reduce
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# main class
class ElucidatedDiffusion(nn.Module):
def __init__(
self,
net,
*,
image_size,
channels = 3,
num_sample_steps = 32, # number of sampling steps
sigma_min = 0.002, # min noise level
sigma_max = 80, # max noise level
sigma_data = 0.5, # standard deviation of data distribution
rho = 7, # controls the sampling schedule
P_mean = -1.2, # mean of log-normal distribution from which noise is drawn for training
P_std = 1.2, # standard deviation of log-normal distribution from which noise is drawn for training
S_churn = 80, # parameters for stochastic sampling - depends on dataset, Table 5 in apper
S_tmin = 0.05,
S_tmax = 50,
S_noise = 1.003,
):
super().__init__()
assert net.random_or_learned_sinusoidal_cond
self.self_condition = net.self_condition
self.net = net
# image dimensions
self.channels = channels
self.image_size = image_size
# parameters
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.sigma_data = sigma_data
self.rho = rho
self.P_mean = P_mean
self.P_std = P_std
self.num_sample_steps = num_sample_steps # otherwise known as N in the paper
self.S_churn = S_churn
self.S_tmin = S_tmin
self.S_tmax = S_tmax
self.S_noise = S_noise
@property
def device(self):
return next(self.net.parameters()).device
# derived preconditioning params - Table 1
def c_skip(self, sigma):
return (self.sigma_data ** 2) / (sigma ** 2 + self.sigma_data ** 2)
def c_out(self, sigma):
return sigma * self.sigma_data * (self.sigma_data ** 2 + sigma ** 2) ** -0.5
def c_in(self, sigma):
return 1 * (sigma ** 2 + self.sigma_data ** 2) ** -0.5
def c_noise(self, sigma):
return log(sigma) * 0.25
# preconditioned network output
# equation (7) in the paper
def preconditioned_network_forward(self, noised_images, sigma, self_cond = None, clamp = False):
batch, device = noised_images.shape[0], noised_images.device
if isinstance(sigma, float):
sigma = torch.full((batch,), sigma, device = device)
padded_sigma = rearrange(sigma, 'b -> b 1 1 1')
net_out = self.net(
self.c_in(padded_sigma) * noised_images,
self.c_noise(sigma),
self_cond
)
out = self.c_skip(padded_sigma) * noised_images + self.c_out(padded_sigma) * net_out
if clamp:
out = out.clamp(-1., 1.)
return out
# sampling
# sample schedule
# equation (5) in the paper
def sample_schedule(self, num_sample_steps = None):
num_sample_steps = default(num_sample_steps, self.num_sample_steps)
N = num_sample_steps
inv_rho = 1 / self.rho
steps = torch.arange(num_sample_steps, device = self.device, dtype = torch.float32)
sigmas = (self.sigma_max ** inv_rho + steps / (N - 1) * (self.sigma_min ** inv_rho - self.sigma_max ** inv_rho)) ** self.rho
sigmas = F.pad(sigmas, (0, 1), value = 0.) # last step is sigma value of 0.
return sigmas
@torch.no_grad()
def sample(self, batch_size = 16, num_sample_steps = None, clamp = True):
num_sample_steps = default(num_sample_steps, self.num_sample_steps)
shape = (batch_size, self.channels, self.image_size, self.image_size)
# get the schedule, which is returned as (sigma, gamma) tuple, and pair up with the next sigma and gamma
sigmas = self.sample_schedule(num_sample_steps)
gammas = torch.where(
(sigmas >= self.S_tmin) & (sigmas <= self.S_tmax),
min(self.S_churn / num_sample_steps, sqrt(2) - 1),
0.
)
sigmas_and_gammas = list(zip(sigmas[:-1], sigmas[1:], gammas[:-1]))
# images is noise at the beginning
init_sigma = sigmas[0]
images = init_sigma * torch.randn(shape, device = self.device)
# for self conditioning
x_start = None
# gradually denoise
for sigma, sigma_next, gamma in tqdm(sigmas_and_gammas, desc = 'sampling time step'):
sigma, sigma_next, gamma = map(lambda t: t.item(), (sigma, sigma_next, gamma))
eps = self.S_noise * torch.randn(shape, device = self.device) # stochastic sampling
sigma_hat = sigma + gamma * sigma
images_hat = images + sqrt(sigma_hat ** 2 - sigma ** 2) * eps
self_cond = x_start if self.self_condition else None
model_output = self.preconditioned_network_forward(images_hat, sigma_hat, self_cond, clamp = clamp)
denoised_over_sigma = (images_hat - model_output) / sigma_hat
images_next = images_hat + (sigma_next - sigma_hat) * denoised_over_sigma
# second order correction, if not the last timestep
if sigma_next != 0:
self_cond = model_output if self.self_condition else None
model_output_next = self.preconditioned_network_forward(images_next, sigma_next, self_cond, clamp = clamp)
denoised_prime_over_sigma = (images_next - model_output_next) / sigma_next
images_next = images_hat + 0.5 * (sigma_next - sigma_hat) * (denoised_over_sigma + denoised_prime_over_sigma)
images = images_next
x_start = model_output_next if sigma_next != 0 else model_output
images = images.clamp(-1., 1.)
return unnormalize_to_zero_to_one(images)
@torch.no_grad()
def sample_using_dpmpp(self, batch_size = 16, num_sample_steps = None):
"""
thanks to Katherine Crowson (https://github.com/crowsonkb) for figuring it all out!
https://arxiv.org/abs/2211.01095
"""
device, num_sample_steps = self.device, default(num_sample_steps, self.num_sample_steps)
sigmas = self.sample_schedule(num_sample_steps)
shape = (batch_size, self.channels, self.image_size, self.image_size)
images = sigmas[0] * torch.randn(shape, device = device)
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
old_denoised = None
for i in tqdm(range(len(sigmas) - 1)):
denoised = self.preconditioned_network_forward(images, sigmas[i].item())
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
h = t_next - t
if not exists(old_denoised) or sigmas[i + 1] == 0:
denoised_d = denoised
else:
h_last = t - t_fn(sigmas[i - 1])
r = h_last / h
gamma = - 1 / (2 * r)
denoised_d = (1 - gamma) * denoised + gamma * old_denoised
images = (sigma_fn(t_next) / sigma_fn(t)) * images - (-h).expm1() * denoised_d
old_denoised = denoised
images = images.clamp(-1., 1.)
return unnormalize_to_zero_to_one(images)
# training
def loss_weight(self, sigma):
return (sigma ** 2 + self.sigma_data ** 2) * (sigma * self.sigma_data) ** -2
def noise_distribution(self, batch_size):
return (self.P_mean + self.P_std * torch.randn((batch_size,), device = self.device)).exp()
def forward(self, images):
batch_size, c, h, w, device, image_size, channels = *images.shape, images.device, self.image_size, self.channels
assert h == image_size and w == image_size, f'height and width of image must be {image_size}'
assert c == channels, 'mismatch of image channels'
images = normalize_to_neg_one_to_one(images)
sigmas = self.noise_distribution(batch_size)
padded_sigmas = rearrange(sigmas, 'b -> b 1 1 1')
noise = torch.randn_like(images)
noised_images = images + padded_sigmas * noise # alphas are 1. in the paper
self_cond = None
if self.self_condition and random() < 0.5:
# from hinton's group's bit diffusion paper
with torch.no_grad():
self_cond = self.preconditioned_network_forward(noised_images, sigmas)
self_cond.detach_()
denoised = self.preconditioned_network_forward(noised_images, sigmas, self_cond)
losses = F.mse_loss(denoised, images, reduction = 'none')
losses = reduce(losses, 'b ... -> b', 'mean')
losses = losses * self.loss_weight(sigmas)
return losses.mean()
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/elucidated_diffusion.py |
import math
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
from denoising_diffusion_pytorch.version import __version__
# constants
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# data
class Dataset1D(Dataset):
def __init__(self, tensor: Tensor):
super().__init__()
self.tensor = tensor.clone()
def __len__(self):
return len(self.tensor)
def __getitem__(self, idx):
return self.tensor[idx].clone()
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv1d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv1d(dim, default(dim_out, dim), 4, 2, 1)
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1))
def forward(self, x):
return F.normalize(x, dim = 1) * self.g * (x.shape[1] ** 0.5)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = RMSNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
self.dim = dim
self.theta = theta
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(self.theta) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random = False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv1d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv1d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv1d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv1d(hidden_dim, dim, 1),
RMSNorm(dim)
)
def forward(self, x):
b, c, n = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) n -> b h c n', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c n -> b (h c) n', h = self.heads)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv1d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv1d(hidden_dim, dim, 1)
def forward(self, x):
b, c, n = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) n -> b h c n', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n')
return self.to_out(out)
# model
class Unet1D(nn.Module):
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
self_condition = False,
resnet_block_groups = 8,
learned_variance = False,
learned_sinusoidal_cond = False,
random_fourier_features = False,
learned_sinusoidal_dim = 16,
sinusoidal_pos_emb_theta = 10000,
attn_dim_head = 32,
attn_heads = 4
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv1d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim, theta = sinusoidal_pos_emb_theta)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv1d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim, dim_head = attn_dim_head, heads = attn_heads)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv1d(dim_out, dim_in, 3, padding = 1)
]))
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv1d(dim, self.out_dim, 1)
def forward(self, x, time, x_self_cond = None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def linear_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
class GaussianDiffusion1D(nn.Module):
def __init__(
self,
model,
*,
seq_length,
timesteps = 1000,
sampling_timesteps = None,
objective = 'pred_noise',
beta_schedule = 'cosine',
ddim_sampling_eta = 0.,
auto_normalize = True
):
super().__init__()
self.model = model
self.channels = self.model.channels
self.self_condition = self.model.self_condition
self.seq_length = seq_length
self.objective = objective
assert objective in {'pred_noise', 'pred_x0', 'pred_v'}, 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start) or pred_v (predict v [v-parameterization as defined in appendix D of progressive distillation paper, used in imagen-video successfully])'
if beta_schedule == 'linear':
betas = linear_beta_schedule(timesteps)
elif beta_schedule == 'cosine':
betas = cosine_beta_schedule(timesteps)
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
# sampling related parameters
self.sampling_timesteps = default(sampling_timesteps, timesteps) # default num sampling timesteps to number of timesteps at training
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = ddim_sampling_eta
# helper function to register buffer from float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# calculate loss weight
snr = alphas_cumprod / (1 - alphas_cumprod)
if objective == 'pred_noise':
loss_weight = torch.ones_like(snr)
elif objective == 'pred_x0':
loss_weight = snr
elif objective == 'pred_v':
loss_weight = snr / (snr + 1)
register_buffer('loss_weight', loss_weight)
# whether to autonormalize
self.normalize = normalize_to_neg_one_to_one if auto_normalize else identity
self.unnormalize = unnormalize_to_zero_to_one if auto_normalize else identity
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def predict_v(self, x_start, t, noise):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def model_predictions(self, x, t, x_self_cond = None, clip_x_start = False, rederive_pred_noise = False):
model_output = self.model(x, t, x_self_cond)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
if clip_x_start and rederive_pred_noise:
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_x0':
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_v':
v = model_output
x_start = self.predict_start_from_v(x, t, v)
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
def p_mean_variance(self, x, t, x_self_cond = None, clip_denoised = True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start = x_start, x_t = x, t = t)
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.no_grad()
def p_sample(self, x, t: int, x_self_cond = None, clip_denoised = True):
b, *_, device = *x.shape, x.device
batched_times = torch.full((b,), t, device = x.device, dtype = torch.long)
model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = batched_times, x_self_cond = x_self_cond, clip_denoised = clip_denoised)
noise = torch.randn_like(x) if t > 0 else 0. # no noise if t == 0
pred_img = model_mean + (0.5 * model_log_variance).exp() * noise
return pred_img, x_start
@torch.no_grad()
def p_sample_loop(self, shape):
batch, device = shape[0], self.betas.device
img = torch.randn(shape, device=device)
x_start = None
for t in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, t, self_cond)
img = self.unnormalize(img)
return img
@torch.no_grad()
def ddim_sample(self, shape, clip_denoised = True):
batch, device, total_timesteps, sampling_timesteps, eta, objective = shape[0], self.betas.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective
times = torch.linspace(-1, total_timesteps - 1, steps=sampling_timesteps + 1) # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
img = torch.randn(shape, device = device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
time_cond = torch.full((batch,), time, device=device, dtype=torch.long)
self_cond = x_start if self.self_condition else None
pred_noise, x_start, *_ = self.model_predictions(img, time_cond, self_cond, clip_x_start = clip_denoised)
if time_next < 0:
img = x_start
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c = (1 - alpha_next - sigma ** 2).sqrt()
noise = torch.randn_like(img)
img = x_start * alpha_next.sqrt() + \
c * pred_noise + \
sigma * noise
img = self.unnormalize(img)
return img
@torch.no_grad()
def sample(self, batch_size = 16):
seq_length, channels = self.seq_length, self.channels
sample_fn = self.p_sample_loop if not self.is_ddim_sampling else self.ddim_sample
return sample_fn((batch_size, channels, seq_length))
@torch.no_grad()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.full((b,), t, device = device)
xt1, xt2 = map(lambda x: self.q_sample(x, t = t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
x_start = None
for i in tqdm(reversed(range(0, t)), desc = 'interpolation sample time step', total = t):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, i, self_cond)
return img
@autocast(enabled = False)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, noise = None):
b, c, n = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
# noise sample
x = self.q_sample(x_start = x_start, t = t, noise = noise)
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
x_self_cond = None
if self.self_condition and random() < 0.5:
with torch.no_grad():
x_self_cond = self.model_predictions(x, t).pred_x_start
x_self_cond.detach_()
# predict and take gradient step
model_out = self.model(x, t, x_self_cond)
if self.objective == 'pred_noise':
target = noise
elif self.objective == 'pred_x0':
target = x_start
elif self.objective == 'pred_v':
v = self.predict_v(x_start, t, noise)
target = v
else:
raise ValueError(f'unknown objective {self.objective}')
loss = F.mse_loss(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = loss * extract(self.loss_weight, t, loss.shape)
return loss.mean()
def forward(self, img, *args, **kwargs):
b, c, n, device, seq_length, = *img.shape, img.device, self.seq_length
assert n == seq_length, f'seq length must be {seq_length}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = self.normalize(img)
return self.p_losses(img, t, *args, **kwargs)
# trainer class
class Trainer1D(object):
def __init__(
self,
diffusion_model: GaussianDiffusion1D,
dataset: Dataset,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
mixed_precision_type = 'fp16',
split_batches = True,
max_grad_norm = 1.
):
super().__init__()
# accelerator
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = mixed_precision_type if amp else 'no'
)
# model
self.model = diffusion_model
self.channels = diffusion_model.channels
# sampling and training hyperparameters
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.max_grad_norm = max_grad_norm
self.train_num_steps = train_num_steps
# dataset and dataloader
dl = DataLoader(dataset, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.ema.to(self.device)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
@property
def device(self):
return self.accelerator.device
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None,
'version': __version__
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
if self.accelerator.is_main_process:
self.ema.load_state_dict(data["ema"])
if 'version' in data:
print(f"loading from version {data['version']}")
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
self.step += 1
if accelerator.is_main_process:
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_samples_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_samples = torch.cat(all_samples_list, dim = 0)
torch.save(all_samples, str(self.results_folder / f'sample-{milestone}.png'))
self.save(milestone)
pbar.update(1)
accelerator.print('training complete')
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/denoising_diffusion_pytorch_1d.py |
import math
import copy
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torchvision import transforms as T, utils
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
from PIL import Image
from tqdm.auto import tqdm
from ema_pytorch import EMA
from accelerate import Accelerator
# from denoising_diffusion_pytorch.version import __version__
# constants
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1)
)
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
return F.normalize(x, dim = 1) * self.g * (x.shape[-1] ** 0.5)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = RMSNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random = False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
RMSNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
self_condition = False,
resnet_block_groups = 8,
learned_variance = False,
learned_sinusoidal_cond = False,
random_fourier_features = False,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
def forward(self, x, time, x_self_cond = None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def linear_beta_schedule(timesteps):
"""
linear schedule, proposed in original ddpm paper
"""
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
alphas_cumprod = torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
def sigmoid_beta_schedule(timesteps, start = -3, end = 3, tau = 1, clamp_min = 1e-5):
"""
sigmoid schedule
proposed in https://arxiv.org/abs/2212.11972 - Figure 8
better for images > 64x64, when used during training
"""
steps = timesteps + 1
t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
sampling_timesteps = None,
objective = 'pred_noise',
beta_schedule = 'sigmoid',
schedule_fn_kwargs = dict(),
ddim_sampling_eta = 0.,
auto_normalize = True,
min_snr_loss_weight = False,
min_snr_gamma = 5
):
super().__init__()
assert not (type(self) == GaussianDiffusion and model.channels != model.out_dim)
assert not model.random_or_learned_sinusoidal_cond
self.model = model
self.channels = self.model.channels
self.self_condition = self.model.self_condition
self.image_size = image_size
self.objective = objective
assert objective in {'pred_noise', 'pred_x0', 'pred_v'}, 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start) or pred_v (predict v [v-parameterization as defined in appendix D of progressive distillation paper, used in imagen-video successfully])'
if beta_schedule == 'linear':
beta_schedule_fn = linear_beta_schedule
elif beta_schedule == 'cosine':
beta_schedule_fn = cosine_beta_schedule
elif beta_schedule == 'sigmoid':
beta_schedule_fn = sigmoid_beta_schedule
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
betas = beta_schedule_fn(timesteps, **schedule_fn_kwargs)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
# sampling related parameters
self.sampling_timesteps = default(sampling_timesteps, timesteps) # default num sampling timesteps to number of timesteps at training
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = ddim_sampling_eta
# helper function to register buffer from float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# loss weight
snr = alphas_cumprod / (1 - alphas_cumprod)
maybe_clipped_snr = snr.clone()
if min_snr_loss_weight:
maybe_clipped_snr.clamp_(max = min_snr_gamma)
if objective == 'pred_noise':
loss_weight = maybe_clipped_snr / snr
elif objective == 'pred_x0':
loss_weight = maybe_clipped_snr
elif objective == 'pred_v':
loss_weight = maybe_clipped_snr / (snr + 1)
register_buffer('loss_weight', loss_weight)
# auto-normalization of data [0, 1] -> [-1, 1] - can turn off by setting it to be False
self.normalize = normalize_to_neg_one_to_one if auto_normalize else identity
self.unnormalize = unnormalize_to_zero_to_one if auto_normalize else identity
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def predict_v(self, x_start, t, noise):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def model_predictions(self, x, t, x_self_cond = None, clip_x_start = False):
model_output = self.model(x, t, x_self_cond)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
elif self.objective == 'pred_x0':
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_v':
v = model_output
x_start = self.predict_start_from_v(x, t, v)
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
def p_mean_variance(self, x, t, x_self_cond = None, clip_denoised = True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start = x_start, x_t = x, t = t)
return model_mean, posterior_variance, posterior_log_variance, x_start
def condition_mean(self, cond_fn, mean,variance, x, t, guidance_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, **guidance_kwargs)
new_mean = (
mean.float() + variance * gradient.float()
)
print("gradient: ",(variance * gradient.float()).mean())
return new_mean
@torch.no_grad()
def p_sample(self, x, t: int, x_self_cond = None, cond_fn=None, guidance_kwargs=None):
b, *_, device = *x.shape, x.device
batched_times = torch.full((b,), t, device = x.device, dtype = torch.long)
model_mean, variance, model_log_variance, x_start = self.p_mean_variance(
x = x, t = batched_times, x_self_cond = x_self_cond, clip_denoised = True
)
if exists(cond_fn) and exists(guidance_kwargs):
model_mean = self.condition_mean(cond_fn, model_mean, variance, x, batched_times, guidance_kwargs)
noise = torch.randn_like(x) if t > 0 else 0. # no noise if t == 0
pred_img = model_mean + (0.5 * model_log_variance).exp() * noise
return pred_img, x_start
@torch.no_grad()
def p_sample_loop(self, shape, return_all_timesteps = False, cond_fn=None, guidance_kwargs=None):
batch, device = shape[0], self.betas.device
img = torch.randn(shape, device = device)
imgs = [img]
x_start = None
for t in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, t, self_cond, cond_fn, guidance_kwargs)
imgs.append(img)
ret = img if not return_all_timesteps else torch.stack(imgs, dim = 1)
ret = self.unnormalize(ret)
return ret
@torch.no_grad()
def ddim_sample(self, shape, return_all_timesteps = False, cond_fn=None, guidance_kwargs=None):
batch, device, total_timesteps, sampling_timesteps, eta, objective = shape[0], self.betas.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective
times = torch.linspace(-1, total_timesteps - 1, steps = sampling_timesteps + 1) # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
img = torch.randn(shape, device = device)
imgs = [img]
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
time_cond = torch.full((batch,), time, device = device, dtype = torch.long)
self_cond = x_start if self.self_condition else None
pred_noise, x_start, *_ = self.model_predictions(img, time_cond, self_cond, clip_x_start = True)
imgs.append(img)
if time_next < 0:
img = x_start
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c = (1 - alpha_next - sigma ** 2).sqrt()
noise = torch.randn_like(img)
img = x_start * alpha_next.sqrt() + \
c * pred_noise + \
sigma * noise
ret = img if not return_all_timesteps else torch.stack(imgs, dim = 1)
ret = self.unnormalize(ret)
return ret
@torch.no_grad()
def sample(self, batch_size = 16, return_all_timesteps = False, cond_fn=None, guidance_kwargs=None):
image_size, channels = self.image_size, self.channels
sample_fn = self.p_sample_loop if not self.is_ddim_sampling else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size), return_all_timesteps = return_all_timesteps, cond_fn=cond_fn, guidance_kwargs=guidance_kwargs)
@torch.no_grad()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.full((b,), t, device = device)
xt1, xt2 = map(lambda x: self.q_sample(x, t = t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
x_start = None
for i in tqdm(reversed(range(0, t)), desc = 'interpolation sample time step', total = t):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, i, self_cond)
return img
@autocast(enabled = False)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, noise = None):
b, c, h, w = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
# noise sample
x = self.q_sample(x_start = x_start, t = t, noise = noise)
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
x_self_cond = None
if self.self_condition and random() < 0.5:
with torch.no_grad():
x_self_cond = self.model_predictions(x, t).pred_x_start
x_self_cond.detach_()
# predict and take gradient step
model_out = self.model(x, t, x_self_cond)
if self.objective == 'pred_noise':
target = noise
elif self.objective == 'pred_x0':
target = x_start
elif self.objective == 'pred_v':
v = self.predict_v(x_start, t, noise)
target = v
else:
raise ValueError(f'unknown objective {self.objective}')
loss = F.mse_loss(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = loss * extract(self.loss_weight, t, loss.shape)
return loss.mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = self.normalize(img)
return self.p_losses(img, t, *args, **kwargs)
# dataset classes
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
convert_image_to = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
maybe_convert_fn = partial(convert_image_to_fn, convert_image_to) if exists(convert_image_to) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None,
'version': __version__
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if 'version' in data:
print(f"loading from version {data['version']}")
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
self.step += 1
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
pbar.update(1)
accelerator.print('training complete')
if __name__ == '__main__':
class Classifier(nn.Module):
def __init__(self, image_size, num_classes, t_dim=1) -> None:
super().__init__()
self.linear_t = nn.Linear(t_dim, num_classes)
self.linear_img = nn.Linear(image_size * image_size * 3, num_classes)
def forward(self, x, t):
"""
Args:
x (_type_): [B, 3, N, N]
t (_type_): [B,]
Returns:
logits [B, num_classes]
"""
B = x.shape[0]
t = t.view(B, 1)
logits = self.linear_t(t.float()) + self.linear_img(x.view(x.shape[0], -1))
return logits
def classifier_cond_fn(x, t, classifier, y, classifier_scale=1):
"""
return the graident of the classifier outputing y wrt x.
formally expressed as d_log(classifier(x, t)) / dx
"""
assert y is not None
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
logits = classifier(x_in, t)
log_probs = F.log_softmax(logits, dim=-1)
selected = log_probs[range(len(logits)), y.view(-1)]
grad = torch.autograd.grad(selected.sum(), x_in)[0] * classifier_scale
return grad
model = Unet(
dim = 64,
dim_mults = (1, 2, 4, 8)
)
image_size = 128
diffusion = GaussianDiffusion(
model,
image_size = image_size,
timesteps = 1000 # number of steps
)
classifier = Classifier(image_size=image_size, num_classes=1000, t_dim=1)
batch_size = 4
sampled_images = diffusion.sample(
batch_size = batch_size,
cond_fn=classifier_cond_fn,
guidance_kwargs={
"classifier":classifier,
"y":torch.fill(torch.zeros(batch_size), 1).long(),
"classifier_scale":1,
}
)
sampled_images.shape # (4, 3, 128, 128) | denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/guided_diffusion.py |
__version__ = '1.8.11'
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/version.py |
import math
import copy
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
from torch.cuda.amp import autocast
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torchvision import transforms as T, utils
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from PIL import Image
from tqdm.auto import tqdm
from ema_pytorch import EMA
from accelerate import Accelerator
from denoising_diffusion_pytorch.attend import Attend
from denoising_diffusion_pytorch.fid_evaluation import FIDEvaluation
from denoising_diffusion_pytorch.version import __version__
# constants
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(t, length = 1):
if isinstance(t, tuple):
return t
return ((t,) * length)
def divisible_by(numer, denom):
return (numer % denom) == 0
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1)
)
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
return F.normalize(x, dim = 1) * self.g * (x.shape[1] ** 0.5)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
self.dim = dim
self.theta = theta
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(self.theta) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random = False):
super().__init__()
assert divisible_by(dim, 2)
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
heads = 4,
dim_head = 32
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.norm = RMSNorm(dim)
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
RMSNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
x = self.norm(x)
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 4,
dim_head = 32,
flash = False
):
super().__init__()
self.heads = heads
hidden_dim = dim_head * heads
self.norm = RMSNorm(dim)
self.attend = Attend(flash = flash)
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
x = self.norm(x)
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h (x y) c', h = self.heads), qkv)
out = self.attend(q, k, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
dim_mults = (1, 2, 4, 8),
channels = 3,
self_condition = False,
resnet_block_groups = 8,
learned_variance = False,
learned_sinusoidal_cond = False,
random_fourier_features = False,
learned_sinusoidal_dim = 16,
sinusoidal_pos_emb_theta = 10000,
attn_dim_head = 32,
attn_heads = 4,
full_attn = (False, False, False, True),
flash_attn = False
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim, theta = sinusoidal_pos_emb_theta)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# attention
num_stages = len(dim_mults)
full_attn = cast_tuple(full_attn, num_stages)
attn_heads = cast_tuple(attn_heads, num_stages)
attn_dim_head = cast_tuple(attn_dim_head, num_stages)
assert len(full_attn) == len(dim_mults)
FullAttention = partial(Attention, flash = flash_attn)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, ((dim_in, dim_out), layer_full_attn, layer_attn_heads, layer_attn_dim_head) in enumerate(zip(in_out, full_attn, attn_heads, attn_dim_head)):
is_last = ind >= (num_resolutions - 1)
attn_klass = FullAttention if layer_full_attn else LinearAttention
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
attn_klass(dim_in, dim_head = layer_attn_dim_head, heads = layer_attn_heads),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = FullAttention(mid_dim, heads = attn_heads[-1], dim_head = attn_dim_head[-1])
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, ((dim_in, dim_out), layer_full_attn, layer_attn_heads, layer_attn_dim_head) in enumerate(zip(*map(reversed, (in_out, full_attn, attn_heads, attn_dim_head)))):
is_last = ind == (len(in_out) - 1)
attn_klass = FullAttention if layer_full_attn else LinearAttention
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
attn_klass(dim_out, dim_head = layer_attn_dim_head, heads = layer_attn_heads),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
@property
def downsample_factor(self):
return 2 ** (len(self.downs) - 1)
def forward(self, x, time, x_self_cond = None):
assert all([divisible_by(d, self.downsample_factor) for d in x.shape[-2:]]), f'your input dimensions {x.shape[-2:]} need to be divisible by {self.downsample_factor}, given the unet'
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x) + x
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x) + x
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x) + x
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def linear_beta_schedule(timesteps):
"""
linear schedule, proposed in original ddpm paper
"""
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
alphas_cumprod = torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
def sigmoid_beta_schedule(timesteps, start = -3, end = 3, tau = 1, clamp_min = 1e-5):
"""
sigmoid schedule
proposed in https://arxiv.org/abs/2212.11972 - Figure 8
better for images > 64x64, when used during training
"""
steps = timesteps + 1
t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
sampling_timesteps = None,
objective = 'pred_v',
beta_schedule = 'sigmoid',
schedule_fn_kwargs = dict(),
ddim_sampling_eta = 0.,
auto_normalize = True,
offset_noise_strength = 0., # https://www.crosslabs.org/blog/diffusion-with-offset-noise
min_snr_loss_weight = False, # https://arxiv.org/abs/2303.09556
min_snr_gamma = 5
):
super().__init__()
assert not (type(self) == GaussianDiffusion and model.channels != model.out_dim)
assert not model.random_or_learned_sinusoidal_cond
self.model = model
self.channels = self.model.channels
self.self_condition = self.model.self_condition
self.image_size = image_size
self.objective = objective
assert objective in {'pred_noise', 'pred_x0', 'pred_v'}, 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start) or pred_v (predict v [v-parameterization as defined in appendix D of progressive distillation paper, used in imagen-video successfully])'
if beta_schedule == 'linear':
beta_schedule_fn = linear_beta_schedule
elif beta_schedule == 'cosine':
beta_schedule_fn = cosine_beta_schedule
elif beta_schedule == 'sigmoid':
beta_schedule_fn = sigmoid_beta_schedule
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
betas = beta_schedule_fn(timesteps, **schedule_fn_kwargs)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
# sampling related parameters
self.sampling_timesteps = default(sampling_timesteps, timesteps) # default num sampling timesteps to number of timesteps at training
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = ddim_sampling_eta
# helper function to register buffer from float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# offset noise strength - in blogpost, they claimed 0.1 was ideal
self.offset_noise_strength = offset_noise_strength
# derive loss weight
# snr - signal noise ratio
snr = alphas_cumprod / (1 - alphas_cumprod)
# https://arxiv.org/abs/2303.09556
maybe_clipped_snr = snr.clone()
if min_snr_loss_weight:
maybe_clipped_snr.clamp_(max = min_snr_gamma)
if objective == 'pred_noise':
register_buffer('loss_weight', maybe_clipped_snr / snr)
elif objective == 'pred_x0':
register_buffer('loss_weight', maybe_clipped_snr)
elif objective == 'pred_v':
register_buffer('loss_weight', maybe_clipped_snr / (snr + 1))
# auto-normalization of data [0, 1] -> [-1, 1] - can turn off by setting it to be False
self.normalize = normalize_to_neg_one_to_one if auto_normalize else identity
self.unnormalize = unnormalize_to_zero_to_one if auto_normalize else identity
@property
def device(self):
return self.betas.device
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def predict_v(self, x_start, t, noise):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def model_predictions(self, x, t, x_self_cond = None, clip_x_start = False, rederive_pred_noise = False):
model_output = self.model(x, t, x_self_cond)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
if clip_x_start and rederive_pred_noise:
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_x0':
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_v':
v = model_output
x_start = self.predict_start_from_v(x, t, v)
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
def p_mean_variance(self, x, t, x_self_cond = None, clip_denoised = True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start = x_start, x_t = x, t = t)
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.inference_mode()
def p_sample(self, x, t: int, x_self_cond = None):
b, *_, device = *x.shape, self.device
batched_times = torch.full((b,), t, device = device, dtype = torch.long)
model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = batched_times, x_self_cond = x_self_cond, clip_denoised = True)
noise = torch.randn_like(x) if t > 0 else 0. # no noise if t == 0
pred_img = model_mean + (0.5 * model_log_variance).exp() * noise
return pred_img, x_start
@torch.inference_mode()
def p_sample_loop(self, shape, return_all_timesteps = False):
batch, device = shape[0], self.device
img = torch.randn(shape, device = device)
imgs = [img]
x_start = None
for t in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, t, self_cond)
imgs.append(img)
ret = img if not return_all_timesteps else torch.stack(imgs, dim = 1)
ret = self.unnormalize(ret)
return ret
@torch.inference_mode()
def ddim_sample(self, shape, return_all_timesteps = False):
batch, device, total_timesteps, sampling_timesteps, eta, objective = shape[0], self.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective
times = torch.linspace(-1, total_timesteps - 1, steps = sampling_timesteps + 1) # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
img = torch.randn(shape, device = device)
imgs = [img]
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
time_cond = torch.full((batch,), time, device = device, dtype = torch.long)
self_cond = x_start if self.self_condition else None
pred_noise, x_start, *_ = self.model_predictions(img, time_cond, self_cond, clip_x_start = True, rederive_pred_noise = True)
if time_next < 0:
img = x_start
imgs.append(img)
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c = (1 - alpha_next - sigma ** 2).sqrt()
noise = torch.randn_like(img)
img = x_start * alpha_next.sqrt() + \
c * pred_noise + \
sigma * noise
imgs.append(img)
ret = img if not return_all_timesteps else torch.stack(imgs, dim = 1)
ret = self.unnormalize(ret)
return ret
@torch.inference_mode()
def sample(self, batch_size = 16, return_all_timesteps = False):
image_size, channels = self.image_size, self.channels
sample_fn = self.p_sample_loop if not self.is_ddim_sampling else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size), return_all_timesteps = return_all_timesteps)
@torch.inference_mode()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.full((b,), t, device = device)
xt1, xt2 = map(lambda x: self.q_sample(x, t = t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
x_start = None
for i in tqdm(reversed(range(0, t)), desc = 'interpolation sample time step', total = t):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, i, self_cond)
return img
@autocast(enabled = False)
def q_sample(self, x_start, t, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, noise = None, offset_noise_strength = None):
b, c, h, w = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
# offset noise - https://www.crosslabs.org/blog/diffusion-with-offset-noise
offset_noise_strength = default(offset_noise_strength, self.offset_noise_strength)
if offset_noise_strength > 0.:
offset_noise = torch.randn(x_start.shape[:2], device = self.device)
noise += offset_noise_strength * rearrange(offset_noise, 'b c -> b c 1 1')
# noise sample
x = self.q_sample(x_start = x_start, t = t, noise = noise)
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
x_self_cond = None
if self.self_condition and random() < 0.5:
with torch.inference_mode():
x_self_cond = self.model_predictions(x, t).pred_x_start
x_self_cond.detach_()
# predict and take gradient step
model_out = self.model(x, t, x_self_cond)
if self.objective == 'pred_noise':
target = noise
elif self.objective == 'pred_x0':
target = x_start
elif self.objective == 'pred_v':
v = self.predict_v(x_start, t, noise)
target = v
else:
raise ValueError(f'unknown objective {self.objective}')
loss = F.mse_loss(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = loss * extract(self.loss_weight, t, loss.shape)
return loss.mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = self.normalize(img)
return self.p_losses(img, t, *args, **kwargs)
# dataset classes
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
convert_image_to = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
maybe_convert_fn = partial(convert_image_to_fn, convert_image_to) if exists(convert_image_to) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
mixed_precision_type = 'fp16',
split_batches = True,
convert_image_to = None,
calculate_fid = True,
inception_block_idx = 2048,
max_grad_norm = 1.,
num_fid_samples = 50000,
save_best_and_latest_only = False
):
super().__init__()
# accelerator
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = mixed_precision_type if amp else 'no'
)
# model
self.model = diffusion_model
self.channels = diffusion_model.channels
is_ddim_sampling = diffusion_model.is_ddim_sampling
# sampling and training hyperparameters
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
assert (train_batch_size * gradient_accumulate_every) >= 16, f'your effective batch size (train_batch_size x gradient_accumulate_every) should be at least 16 or above'
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
self.max_grad_norm = max_grad_norm
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
assert len(self.ds) >= 100, 'you should have at least 100 images in your folder. at least 10k images recommended'
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.ema.to(self.device)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
# FID-score computation
self.calculate_fid = calculate_fid and self.accelerator.is_main_process
if self.calculate_fid:
if not is_ddim_sampling:
self.accelerator.print(
"WARNING: Robust FID computation requires a lot of generated samples and can therefore be very time consuming."\
"Consider using DDIM sampling to save time."
)
self.fid_scorer = FIDEvaluation(
batch_size=self.batch_size,
dl=self.dl,
sampler=self.ema.ema_model,
channels=self.channels,
accelerator=self.accelerator,
stats_dir=results_folder,
device=self.device,
num_fid_samples=num_fid_samples,
inception_block_idx=inception_block_idx
)
if save_best_and_latest_only:
assert calculate_fid, "`calculate_fid` must be True to provide a means for model evaluation for `save_best_and_latest_only`."
self.best_fid = 1e10 # infinite
self.save_best_and_latest_only = save_best_and_latest_only
@property
def device(self):
return self.accelerator.device
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None,
'version': __version__
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
if self.accelerator.is_main_process:
self.ema.load_state_dict(data["ema"])
if 'version' in data:
print(f"loading from version {data['version']}")
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
self.step += 1
if accelerator.is_main_process:
self.ema.update()
if self.step != 0 and divisible_by(self.step, self.save_and_sample_every):
self.ema.ema_model.eval()
with torch.inference_mode():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
# whether to calculate fid
if self.calculate_fid:
fid_score = self.fid_scorer.fid_score()
accelerator.print(f'fid_score: {fid_score}')
if self.save_best_and_latest_only:
if self.best_fid > fid_score:
self.best_fid = fid_score
self.save("best")
self.save("latest")
else:
self.save(milestone)
pbar.update(1)
accelerator.print('training complete')
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/denoising_diffusion_pytorch.py |
from denoising_diffusion_pytorch.denoising_diffusion_pytorch import GaussianDiffusion, Unet, Trainer
from denoising_diffusion_pytorch.learned_gaussian_diffusion import LearnedGaussianDiffusion
from denoising_diffusion_pytorch.continuous_time_gaussian_diffusion import ContinuousTimeGaussianDiffusion
from denoising_diffusion_pytorch.weighted_objective_gaussian_diffusion import WeightedObjectiveGaussianDiffusion
from denoising_diffusion_pytorch.elucidated_diffusion import ElucidatedDiffusion
from denoising_diffusion_pytorch.v_param_continuous_time_gaussian_diffusion import VParamContinuousTimeGaussianDiffusion
from denoising_diffusion_pytorch.denoising_diffusion_pytorch_1d import GaussianDiffusion1D, Unet1D, Trainer1D, Dataset1D
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/__init__.py |
import math
import torch
from torch import sqrt
from torch import nn, einsum
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.special import expm1
from tqdm import tqdm
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# diffusion helpers
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
# neural net helpers
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return x + self.fn(x)
class MonotonicLinear(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.net = nn.Linear(*args, **kwargs)
def forward(self, x):
return F.linear(x, self.net.weight.abs(), self.net.bias.abs())
# continuous schedules
# equations are taken from https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material
# @crowsonkb Katherine's repository also helped here https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py
# log(snr) that approximates the original linear schedule
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def beta_linear_log_snr(t):
return -log(expm1(1e-4 + 10 * (t ** 2)))
def alpha_cosine_log_snr(t, s = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5)
class learned_noise_schedule(nn.Module):
""" described in section H and then I.2 of the supplementary material for variational ddpm paper """
def __init__(
self,
*,
log_snr_max,
log_snr_min,
hidden_dim = 1024,
frac_gradient = 1.
):
super().__init__()
self.slope = log_snr_min - log_snr_max
self.intercept = log_snr_max
self.net = nn.Sequential(
Rearrange('... -> ... 1'),
MonotonicLinear(1, 1),
Residual(nn.Sequential(
MonotonicLinear(1, hidden_dim),
nn.Sigmoid(),
MonotonicLinear(hidden_dim, 1)
)),
Rearrange('... 1 -> ...'),
)
self.frac_gradient = frac_gradient
def forward(self, x):
frac_gradient = self.frac_gradient
device = x.device
out_zero = self.net(torch.zeros_like(x))
out_one = self.net(torch.ones_like(x))
x = self.net(x)
normed = self.slope * ((x - out_zero) / (out_one - out_zero)) + self.intercept
return normed * frac_gradient + normed.detach() * (1 - frac_gradient)
class ContinuousTimeGaussianDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
channels = 3,
noise_schedule = 'linear',
num_sample_steps = 500,
clip_sample_denoised = True,
learned_schedule_net_hidden_dim = 1024,
learned_noise_schedule_frac_gradient = 1., # between 0 and 1, determines what percentage of gradients go back, so one can update the learned noise schedule more slowly
min_snr_loss_weight = False,
min_snr_gamma = 5
):
super().__init__()
assert model.random_or_learned_sinusoidal_cond
assert not model.self_condition, 'not supported yet'
self.model = model
# image dimensions
self.channels = channels
self.image_size = image_size
# continuous noise schedule related stuff
if noise_schedule == 'linear':
self.log_snr = beta_linear_log_snr
elif noise_schedule == 'cosine':
self.log_snr = alpha_cosine_log_snr
elif noise_schedule == 'learned':
log_snr_max, log_snr_min = [beta_linear_log_snr(torch.tensor([time])).item() for time in (0., 1.)]
self.log_snr = learned_noise_schedule(
log_snr_max = log_snr_max,
log_snr_min = log_snr_min,
hidden_dim = learned_schedule_net_hidden_dim,
frac_gradient = learned_noise_schedule_frac_gradient
)
else:
raise ValueError(f'unknown noise schedule {noise_schedule}')
# sampling
self.num_sample_steps = num_sample_steps
self.clip_sample_denoised = clip_sample_denoised
# proposed https://arxiv.org/abs/2303.09556
self.min_snr_loss_weight = min_snr_loss_weight
self.min_snr_gamma = min_snr_gamma
@property
def device(self):
return next(self.model.parameters()).device
def p_mean_variance(self, x, time, time_next):
# reviewer found an error in the equation in the paper (missing sigma)
# following - https://openreview.net/forum?id=2LdBqxc1Yv¬eId=rIQgH0zKsRt
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
c = -expm1(log_snr - log_snr_next)
squared_alpha, squared_alpha_next = log_snr.sigmoid(), log_snr_next.sigmoid()
squared_sigma, squared_sigma_next = (-log_snr).sigmoid(), (-log_snr_next).sigmoid()
alpha, sigma, alpha_next = map(sqrt, (squared_alpha, squared_sigma, squared_alpha_next))
batch_log_snr = repeat(log_snr, ' -> b', b = x.shape[0])
pred_noise = self.model(x, batch_log_snr)
if self.clip_sample_denoised:
x_start = (x - sigma * pred_noise) / alpha
# in Imagen, this was changed to dynamic thresholding
x_start.clamp_(-1., 1.)
model_mean = alpha_next * (x * (1 - c) / alpha + c * x_start)
else:
model_mean = alpha_next / alpha * (x - c * sigma * pred_noise)
posterior_variance = squared_sigma_next * c
return model_mean, posterior_variance
# sampling related functions
@torch.no_grad()
def p_sample(self, x, time, time_next):
batch, *_, device = *x.shape, x.device
model_mean, model_variance = self.p_mean_variance(x = x, time = time, time_next = time_next)
if time_next == 0:
return model_mean
noise = torch.randn_like(x)
return model_mean + sqrt(model_variance) * noise
@torch.no_grad()
def p_sample_loop(self, shape):
batch = shape[0]
img = torch.randn(shape, device = self.device)
steps = torch.linspace(1., 0., self.num_sample_steps + 1, device = self.device)
for i in tqdm(range(self.num_sample_steps), desc = 'sampling loop time step', total = self.num_sample_steps):
times = steps[i]
times_next = steps[i + 1]
img = self.p_sample(img, times, times_next)
img.clamp_(-1., 1.)
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def sample(self, batch_size = 16):
return self.p_sample_loop((batch_size, self.channels, self.image_size, self.image_size))
# training related functions - noise prediction
@autocast(enabled = False)
def q_sample(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
log_snr = self.log_snr(times)
log_snr_padded = right_pad_dims_to(x_start, log_snr)
alpha, sigma = sqrt(log_snr_padded.sigmoid()), sqrt((-log_snr_padded).sigmoid())
x_noised = x_start * alpha + noise * sigma
return x_noised, log_snr
def random_times(self, batch_size):
# times are now uniform from 0 to 1
return torch.zeros((batch_size,), device = self.device).float().uniform_(0, 1)
def p_losses(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
x, log_snr = self.q_sample(x_start = x_start, times = times, noise = noise)
model_out = self.model(x, log_snr)
losses = F.mse_loss(model_out, noise, reduction = 'none')
losses = reduce(losses, 'b ... -> b', 'mean')
if self.min_snr_loss_weight:
snr = log_snr.exp()
loss_weight = snr.clamp(min = self.min_snr_gamma) / snr
losses = losses * loss_weight
return losses.mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
times = self.random_times(b)
img = normalize_to_neg_one_to_one(img)
return self.p_losses(img, times, *args, **kwargs)
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/continuous_time_gaussian_diffusion.py |
import math
from functools import partial, wraps
import torch
from torch import sqrt
from torch import nn, einsum
import torch.nn.functional as F
from torch.special import expm1
from torch.cuda.amp import autocast
from tqdm import tqdm
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def identity(t):
return t
def is_lambda(f):
return callable(f) and f.__name__ == "<lambda>"
def default(val, d):
if exists(val):
return val
return d() if is_lambda(d) else d
def cast_tuple(t, l = 1):
return ((t,) * l) if not isinstance(t, tuple) else t
def append_dims(t, dims):
shape = t.shape
return t.reshape(*shape, *((1,) * dims))
def l2norm(t):
return F.normalize(t, dim = -1)
# u-vit related functions and modules
class Upsample(nn.Module):
def __init__(
self,
dim,
dim_out = None,
factor = 2
):
super().__init__()
self.factor = factor
self.factor_squared = factor ** 2
dim_out = default(dim_out, dim)
conv = nn.Conv2d(dim, dim_out * self.factor_squared, 1)
self.net = nn.Sequential(
conv,
nn.SiLU(),
nn.PixelShuffle(factor)
)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, h, w = conv.weight.shape
conv_weight = torch.empty(o // self.factor_squared, i, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o r) ...', r = self.factor_squared)
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(
dim,
dim_out = None,
factor = 2
):
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = factor, p2 = factor),
nn.Conv2d(dim * (factor ** 2), default(dim_out, dim), 1)
)
class RMSNorm(nn.Module):
def __init__(self, dim, scale = True, normalize_dim = 2):
super().__init__()
self.g = nn.Parameter(torch.ones(dim)) if scale else 1
self.scale = scale
self.normalize_dim = normalize_dim
def forward(self, x):
normalize_dim = self.normalize_dim
scale = append_dims(self.g, x.ndim - self.normalize_dim - 1) if self.scale else 1
return F.normalize(x, dim = normalize_dim) * scale * (x.shape[normalize_dim] ** 0.5)
# sinusoidal positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.norm = RMSNorm(dim, normalize_dim = 1)
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
RMSNorm(dim, normalize_dim = 1)
)
def forward(self, x):
residual = x
b, c, h, w = x.shape
x = self.norm(x)
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out) + residual
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32, scale = 8, dropout = 0.):
super().__init__()
self.scale = scale
self.heads = heads
hidden_dim = dim_head * heads
self.norm = RMSNorm(dim)
self.attn_dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, hidden_dim * 3, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Linear(hidden_dim, dim, bias = False)
def forward(self, x):
x = self.norm(x)
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class FeedForward(nn.Module):
def __init__(
self,
dim,
cond_dim,
mult = 4,
dropout = 0.
):
super().__init__()
self.norm = RMSNorm(dim, scale = False)
dim_hidden = dim * mult
self.to_scale_shift = nn.Sequential(
nn.SiLU(),
nn.Linear(cond_dim, dim_hidden * 2),
Rearrange('b d -> b 1 d')
)
to_scale_shift_linear = self.to_scale_shift[-2]
nn.init.zeros_(to_scale_shift_linear.weight)
nn.init.zeros_(to_scale_shift_linear.bias)
self.proj_in = nn.Sequential(
nn.Linear(dim, dim_hidden, bias = False),
nn.SiLU()
)
self.proj_out = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim, bias = False)
)
def forward(self, x, t):
x = self.norm(x)
x = self.proj_in(x)
scale, shift = self.to_scale_shift(t).chunk(2, dim = -1)
x = x * (scale + 1) + shift
return self.proj_out(x)
# vit
class Transformer(nn.Module):
def __init__(
self,
dim,
time_cond_dim,
depth,
dim_head = 32,
heads = 4,
ff_mult = 4,
dropout = 0.,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = dropout),
FeedForward(dim = dim, mult = ff_mult, cond_dim = time_cond_dim, dropout = dropout)
]))
def forward(self, x, t):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x, t) + x
return x
# model
class UViT(nn.Module):
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
dim_mults = (1, 2, 4, 8),
downsample_factor = 2,
channels = 3,
vit_depth = 6,
vit_dropout = 0.2,
attn_dim_head = 32,
attn_heads = 4,
ff_mult = 4,
resnet_block_groups = 8,
learned_sinusoidal_dim = 16,
init_img_transform: callable = None,
final_img_itransform: callable = None,
patch_size = 1,
dual_patchnorm = False
):
super().__init__()
# for initial dwt transform (or whatever transform researcher wants to try here)
if exists(init_img_transform) and exists(final_img_itransform):
init_shape = torch.Size(1, 1, 32, 32)
mock_tensor = torch.randn(init_shape)
assert final_img_itransform(init_img_transform(mock_tensor)).shape == init_shape
self.init_img_transform = default(init_img_transform, identity)
self.final_img_itransform = default(final_img_itransform, identity)
input_channels = channels
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
# whether to do initial patching, as alternative to dwt
self.unpatchify = identity
input_channels = channels * (patch_size ** 2)
needs_patch = patch_size > 1
if needs_patch:
if not dual_patchnorm:
self.init_conv = nn.Conv2d(channels, init_dim, patch_size, stride = patch_size)
else:
self.init_conv = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b h w (c p1 p2)', p1 = patch_size, p2 = patch_size),
nn.LayerNorm(input_channels),
nn.Linear(input_channels, init_dim),
nn.LayerNorm(init_dim),
Rearrange('b h w c -> b c h w')
)
self.unpatchify = nn.ConvTranspose2d(input_channels, channels, patch_size, stride = patch_size)
# determine dimensions
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
resnet_block = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinusoidal_dim)
fourier_dim = learned_sinusoidal_dim + 1
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# downsample factors
downsample_factor = cast_tuple(downsample_factor, len(dim_mults))
assert len(downsample_factor) == len(dim_mults)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, ((dim_in, dim_out), factor) in enumerate(zip(in_out, downsample_factor)):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
resnet_block(dim_in, dim_in, time_emb_dim = time_dim),
resnet_block(dim_in, dim_in, time_emb_dim = time_dim),
LinearAttention(dim_in),
Downsample(dim_in, dim_out, factor = factor)
]))
mid_dim = dims[-1]
self.vit = Transformer(
dim = mid_dim,
time_cond_dim = time_dim,
depth = vit_depth,
dim_head = attn_dim_head,
heads = attn_heads,
ff_mult = ff_mult,
dropout = vit_dropout
)
for ind, ((dim_in, dim_out), factor) in enumerate(zip(reversed(in_out), reversed(downsample_factor))):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
Upsample(dim_out, dim_in, factor = factor),
resnet_block(dim_in * 2, dim_in, time_emb_dim = time_dim),
resnet_block(dim_in * 2, dim_in, time_emb_dim = time_dim),
LinearAttention(dim_in),
]))
default_out_dim = input_channels
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = resnet_block(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
def forward(self, x, time):
x = self.init_img_transform(x)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = rearrange(x, 'b c h w -> b h w c')
x, ps = pack([x], 'b * c')
x = self.vit(x, t)
x, = unpack(x, ps, 'b * c')
x = rearrange(x, 'b h w c -> b c h w')
for upsample, block1, block2, attn in self.ups:
x = upsample(x)
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
x = self.final_conv(x)
x = self.unpatchify(x)
return self.final_img_itransform(x)
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# diffusion helpers
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
# logsnr schedules and shifting / interpolating decorators
# only cosine for now
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def logsnr_schedule_cosine(t, logsnr_min = -15, logsnr_max = 15):
t_min = math.atan(math.exp(-0.5 * logsnr_max))
t_max = math.atan(math.exp(-0.5 * logsnr_min))
return -2 * log(torch.tan(t_min + t * (t_max - t_min)))
def logsnr_schedule_shifted(fn, image_d, noise_d):
shift = 2 * math.log(noise_d / image_d)
@wraps(fn)
def inner(*args, **kwargs):
nonlocal shift
return fn(*args, **kwargs) + shift
return inner
def logsnr_schedule_interpolated(fn, image_d, noise_d_low, noise_d_high):
logsnr_low_fn = logsnr_schedule_shifted(fn, image_d, noise_d_low)
logsnr_high_fn = logsnr_schedule_shifted(fn, image_d, noise_d_high)
@wraps(fn)
def inner(t, *args, **kwargs):
nonlocal logsnr_low_fn
nonlocal logsnr_high_fn
return t * logsnr_low_fn(t, *args, **kwargs) + (1 - t) * logsnr_high_fn(t, *args, **kwargs)
return inner
# main gaussian diffusion class
class GaussianDiffusion(nn.Module):
def __init__(
self,
model: UViT,
*,
image_size,
channels = 3,
pred_objective = 'v',
noise_schedule = logsnr_schedule_cosine,
noise_d = None,
noise_d_low = None,
noise_d_high = None,
num_sample_steps = 500,
clip_sample_denoised = True,
min_snr_loss_weight = True,
min_snr_gamma = 5
):
super().__init__()
assert pred_objective in {'v', 'eps'}, 'whether to predict v-space (progressive distillation paper) or noise'
self.model = model
# image dimensions
self.channels = channels
self.image_size = image_size
# training objective
self.pred_objective = pred_objective
# noise schedule
assert not all([*map(exists, (noise_d, noise_d_low, noise_d_high))]), 'you must either set noise_d for shifted schedule, or noise_d_low and noise_d_high for shifted and interpolated schedule'
# determine shifting or interpolated schedules
self.log_snr = noise_schedule
if exists(noise_d):
self.log_snr = logsnr_schedule_shifted(self.log_snr, image_size, noise_d)
if exists(noise_d_low) or exists(noise_d_high):
assert exists(noise_d_low) and exists(noise_d_high), 'both noise_d_low and noise_d_high must be set'
self.log_snr = logsnr_schedule_interpolated(self.log_snr, image_size, noise_d_low, noise_d_high)
# sampling
self.num_sample_steps = num_sample_steps
self.clip_sample_denoised = clip_sample_denoised
# loss weight
self.min_snr_loss_weight = min_snr_loss_weight
self.min_snr_gamma = min_snr_gamma
@property
def device(self):
return next(self.model.parameters()).device
def p_mean_variance(self, x, time, time_next):
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
c = -expm1(log_snr - log_snr_next)
squared_alpha, squared_alpha_next = log_snr.sigmoid(), log_snr_next.sigmoid()
squared_sigma, squared_sigma_next = (-log_snr).sigmoid(), (-log_snr_next).sigmoid()
alpha, sigma, alpha_next = map(sqrt, (squared_alpha, squared_sigma, squared_alpha_next))
batch_log_snr = repeat(log_snr, ' -> b', b = x.shape[0])
pred = self.model(x, batch_log_snr)
if self.pred_objective == 'v':
x_start = alpha * x - sigma * pred
elif self.pred_objective == 'eps':
x_start = (x - sigma * pred) / alpha
x_start.clamp_(-1., 1.)
model_mean = alpha_next * (x * (1 - c) / alpha + c * x_start)
posterior_variance = squared_sigma_next * c
return model_mean, posterior_variance
# sampling related functions
@torch.no_grad()
def p_sample(self, x, time, time_next):
batch, *_, device = *x.shape, x.device
model_mean, model_variance = self.p_mean_variance(x = x, time = time, time_next = time_next)
if time_next == 0:
return model_mean
noise = torch.randn_like(x)
return model_mean + sqrt(model_variance) * noise
@torch.no_grad()
def p_sample_loop(self, shape):
batch = shape[0]
img = torch.randn(shape, device = self.device)
steps = torch.linspace(1., 0., self.num_sample_steps + 1, device = self.device)
for i in tqdm(range(self.num_sample_steps), desc = 'sampling loop time step', total = self.num_sample_steps):
times = steps[i]
times_next = steps[i + 1]
img = self.p_sample(img, times, times_next)
img.clamp_(-1., 1.)
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def sample(self, batch_size = 16):
return self.p_sample_loop((batch_size, self.channels, self.image_size, self.image_size))
# training related functions - noise prediction
@autocast(enabled = False)
def q_sample(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
log_snr = self.log_snr(times)
log_snr_padded = right_pad_dims_to(x_start, log_snr)
alpha, sigma = sqrt(log_snr_padded.sigmoid()), sqrt((-log_snr_padded).sigmoid())
x_noised = x_start * alpha + noise * sigma
return x_noised, log_snr
def p_losses(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
x, log_snr = self.q_sample(x_start = x_start, times = times, noise = noise)
model_out = self.model(x, log_snr)
if self.pred_objective == 'v':
padded_log_snr = right_pad_dims_to(x, log_snr)
alpha, sigma = padded_log_snr.sigmoid().sqrt(), (-padded_log_snr).sigmoid().sqrt()
target = alpha * noise - sigma * x_start
elif self.pred_objective == 'eps':
target = noise
loss = F.mse_loss(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b', 'mean')
snr = log_snr.exp()
maybe_clip_snr = snr.clone()
if self.min_snr_loss_weight:
maybe_clip_snr.clamp_(max = self.min_snr_gamma)
if self.pred_objective == 'v':
loss_weight = maybe_clip_snr / (snr + 1)
elif self.pred_objective == 'eps':
loss_weight = maybe_clip_snr / snr
return (loss * loss_weight).mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
img = normalize_to_neg_one_to_one(img)
times = torch.zeros((img.shape[0],), device = self.device).float().uniform_(0, 1)
return self.p_losses(img, times, *args, **kwargs)
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/simple_diffusion.py |
import math
import copy
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.cuda.amp import autocast
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from tqdm.auto import tqdm
# constants
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# classifier free guidance functions
def uniform(shape, device):
return torch.zeros(shape, device = device).float().uniform_(0, 1)
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
return F.normalize(x, dim = 1) * self.g * (x.shape[1] ** 0.5)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = RMSNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random = False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, classes_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(int(time_emb_dim) + int(classes_emb_dim), dim_out * 2)
) if exists(time_emb_dim) or exists(classes_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None, class_emb = None):
scale_shift = None
if exists(self.mlp) and (exists(time_emb) or exists(class_emb)):
cond_emb = tuple(filter(exists, (time_emb, class_emb)))
cond_emb = torch.cat(cond_emb, dim = -1)
cond_emb = self.mlp(cond_emb)
cond_emb = rearrange(cond_emb, 'b c -> b c 1 1')
scale_shift = cond_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
RMSNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
num_classes,
cond_drop_prob = 0.5,
init_dim = None,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
resnet_block_groups = 8,
learned_variance = False,
learned_sinusoidal_cond = False,
random_fourier_features = False,
learned_sinusoidal_dim = 16,
attn_dim_head = 32,
attn_heads = 4
):
super().__init__()
# classifier free guidance stuff
self.cond_drop_prob = cond_drop_prob
# determine dimensions
self.channels = channels
input_channels = channels
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# class embeddings
self.classes_emb = nn.Embedding(num_classes, dim)
self.null_classes_emb = nn.Parameter(torch.randn(dim))
classes_dim = dim * 4
self.classes_mlp = nn.Sequential(
nn.Linear(dim, classes_dim),
nn.GELU(),
nn.Linear(classes_dim, classes_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim, classes_emb_dim = classes_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim, classes_emb_dim = classes_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim, classes_emb_dim = classes_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim, dim_head = attn_dim_head, heads = attn_heads)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim, classes_emb_dim = classes_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim, classes_emb_dim = classes_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim, classes_emb_dim = classes_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim, classes_emb_dim = classes_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
rescaled_phi = 0.,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_logits = null_logits + (logits - null_logits) * cond_scale
if rescaled_phi == 0.:
return scaled_logits
std_fn = partial(torch.std, dim = tuple(range(1, scaled_logits.ndim)), keepdim = True)
rescaled_logits = scaled_logits * (std_fn(logits) / std_fn(scaled_logits))
return rescaled_logits * rescaled_phi + scaled_logits * (1. - rescaled_phi)
def forward(
self,
x,
time,
classes,
cond_drop_prob = None
):
batch, device = x.shape[0], x.device
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
# derive condition, with condition dropout for classifier free guidance
classes_emb = self.classes_emb(classes)
if cond_drop_prob > 0:
keep_mask = prob_mask_like((batch,), 1 - cond_drop_prob, device = device)
null_classes_emb = repeat(self.null_classes_emb, 'd -> b d', b = batch)
classes_emb = torch.where(
rearrange(keep_mask, 'b -> b 1'),
classes_emb,
null_classes_emb
)
c = self.classes_mlp(classes_emb)
# unet
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t, c)
h.append(x)
x = block2(x, t, c)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t, c)
x = self.mid_attn(x)
x = self.mid_block2(x, t, c)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t, c)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t, c)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t, c)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def linear_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
sampling_timesteps = None,
objective = 'pred_noise',
beta_schedule = 'cosine',
ddim_sampling_eta = 1.,
offset_noise_strength = 0.,
min_snr_loss_weight = False,
min_snr_gamma = 5
):
super().__init__()
assert not (type(self) == GaussianDiffusion and model.channels != model.out_dim)
assert not model.random_or_learned_sinusoidal_cond
self.model = model
self.channels = self.model.channels
self.image_size = image_size
self.objective = objective
assert objective in {'pred_noise', 'pred_x0', 'pred_v'}, 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start) or pred_v (predict v [v-parameterization as defined in appendix D of progressive distillation paper, used in imagen-video successfully])'
if beta_schedule == 'linear':
betas = linear_beta_schedule(timesteps)
elif beta_schedule == 'cosine':
betas = cosine_beta_schedule(timesteps)
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
# sampling related parameters
self.sampling_timesteps = default(sampling_timesteps, timesteps) # default num sampling timesteps to number of timesteps at training
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = ddim_sampling_eta
# helper function to register buffer from float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# offset noise strength - 0.1 was claimed ideal
self.offset_noise_strength = offset_noise_strength
# loss weight
snr = alphas_cumprod / (1 - alphas_cumprod)
maybe_clipped_snr = snr.clone()
if min_snr_loss_weight:
maybe_clipped_snr.clamp_(max = min_snr_gamma)
if objective == 'pred_noise':
loss_weight = maybe_clipped_snr / snr
elif objective == 'pred_x0':
loss_weight = maybe_clipped_snr
elif objective == 'pred_v':
loss_weight = maybe_clipped_snr / (snr + 1)
register_buffer('loss_weight', loss_weight)
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def predict_v(self, x_start, t, noise):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def model_predictions(self, x, t, classes, cond_scale = 6., rescaled_phi = 0.7, clip_x_start = False):
model_output = self.model.forward_with_cond_scale(x, t, classes, cond_scale = cond_scale, rescaled_phi = rescaled_phi)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
elif self.objective == 'pred_x0':
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_v':
v = model_output
x_start = self.predict_start_from_v(x, t, v)
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
def p_mean_variance(self, x, t, classes, cond_scale, rescaled_phi, clip_denoised = True):
preds = self.model_predictions(x, t, classes, cond_scale, rescaled_phi)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start = x_start, x_t = x, t = t)
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.no_grad()
def p_sample(self, x, t: int, classes, cond_scale = 6., rescaled_phi = 0.7, clip_denoised = True):
b, *_, device = *x.shape, x.device
batched_times = torch.full((x.shape[0],), t, device = x.device, dtype = torch.long)
model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = batched_times, classes = classes, cond_scale = cond_scale, rescaled_phi = rescaled_phi, clip_denoised = clip_denoised)
noise = torch.randn_like(x) if t > 0 else 0. # no noise if t == 0
pred_img = model_mean + (0.5 * model_log_variance).exp() * noise
return pred_img, x_start
@torch.no_grad()
def p_sample_loop(self, classes, shape, cond_scale = 6., rescaled_phi = 0.7):
batch, device = shape[0], self.betas.device
img = torch.randn(shape, device=device)
x_start = None
for t in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
img, x_start = self.p_sample(img, t, classes, cond_scale, rescaled_phi)
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def ddim_sample(self, classes, shape, cond_scale = 6., rescaled_phi = 0.7, clip_denoised = True):
batch, device, total_timesteps, sampling_timesteps, eta, objective = shape[0], self.betas.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective
times = torch.linspace(-1, total_timesteps - 1, steps=sampling_timesteps + 1) # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
img = torch.randn(shape, device = device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
time_cond = torch.full((batch,), time, device=device, dtype=torch.long)
pred_noise, x_start, *_ = self.model_predictions(img, time_cond, classes, cond_scale = cond_scale, clip_x_start = clip_denoised)
if time_next < 0:
img = x_start
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c = (1 - alpha_next - sigma ** 2).sqrt()
noise = torch.randn_like(img)
img = x_start * alpha_next.sqrt() + \
c * pred_noise + \
sigma * noise
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def sample(self, classes, cond_scale = 6., rescaled_phi = 0.7):
batch_size, image_size, channels = classes.shape[0], self.image_size, self.channels
sample_fn = self.p_sample_loop if not self.is_ddim_sampling else self.ddim_sample
return sample_fn(classes, (batch_size, channels, image_size, image_size), cond_scale, rescaled_phi)
@torch.no_grad()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.stack([torch.tensor(t, device = device)] * b)
xt1, xt2 = map(lambda x: self.q_sample(x, t = t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
for i in tqdm(reversed(range(0, t)), desc = 'interpolation sample time step', total = t):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))
return img
@autocast(enabled = False)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
if self.offset_noise_strength > 0.:
offset_noise = torch.randn(x_start.shape[:2], device = self.device)
noise += self.offset_noise_strength * rearrange(offset_noise, 'b c -> b c 1 1')
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, *, classes, noise = None):
b, c, h, w = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
# noise sample
x = self.q_sample(x_start = x_start, t = t, noise = noise)
# predict and take gradient step
model_out = self.model(x, t, classes)
if self.objective == 'pred_noise':
target = noise
elif self.objective == 'pred_x0':
target = x_start
elif self.objective == 'pred_v':
v = self.predict_v(x_start, t, noise)
target = v
else:
raise ValueError(f'unknown objective {self.objective}')
loss = F.mse_loss(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = loss * extract(self.loss_weight, t, loss.shape)
return loss.mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = normalize_to_neg_one_to_one(img)
return self.p_losses(img, t, *args, **kwargs)
# example
if __name__ == '__main__':
num_classes = 10
model = Unet(
dim = 64,
dim_mults = (1, 2, 4, 8),
num_classes = num_classes,
cond_drop_prob = 0.5
)
diffusion = GaussianDiffusion(
model,
image_size = 128,
timesteps = 1000
).cuda()
training_images = torch.randn(8, 3, 128, 128).cuda() # images are normalized from 0 to 1
image_classes = torch.randint(0, num_classes, (8,)).cuda() # say 10 classes
loss = diffusion(training_images, classes = image_classes)
loss.backward()
# do above for many steps
sampled_images = diffusion.sample(
classes = image_classes,
cond_scale = 6. # condition scaling, anything greater than 1 strengthens the classifier free guidance. reportedly 3-8 is good empirically
)
sampled_images.shape # (8, 3, 128, 128)
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/classifier_free_guidance.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = AttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
q, k, v = map(lambda t: t.contiguous(), (q, k, v))
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
if self.flash:
return self.flash_attn(q, k, v)
scale = q.shape[-1] ** -0.5
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/attend.py |
import torch
from inspect import isfunction
from torch import nn, einsum
from einops import rearrange
from denoising_diffusion_pytorch.denoising_diffusion_pytorch import GaussianDiffusion
# helper functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# some improvisation on my end
# where i have the model learn to both predict noise and x0
# and learn the weighted sum for each depending on time step
class WeightedObjectiveGaussianDiffusion(GaussianDiffusion):
def __init__(
self,
model,
*args,
pred_noise_loss_weight = 0.1,
pred_x_start_loss_weight = 0.1,
**kwargs
):
super().__init__(model, *args, **kwargs)
channels = model.channels
assert model.out_dim == (channels * 2 + 2), 'dimension out (out_dim) of unet must be twice the number of channels + 2 (for the softmax weighted sum) - for channels of 3, this should be (3 * 2) + 2 = 8'
assert not model.self_condition, 'not supported yet'
assert not self.is_ddim_sampling, 'ddim sampling cannot be used'
self.split_dims = (channels, channels, 2)
self.pred_noise_loss_weight = pred_noise_loss_weight
self.pred_x_start_loss_weight = pred_x_start_loss_weight
def p_mean_variance(self, *, x, t, clip_denoised, model_output = None):
model_output = self.model(x, t)
pred_noise, pred_x_start, weights = model_output.split(self.split_dims, dim = 1)
normalized_weights = weights.softmax(dim = 1)
x_start_from_noise = self.predict_start_from_noise(x, t = t, noise = pred_noise)
x_starts = torch.stack((x_start_from_noise, pred_x_start), dim = 1)
weighted_x_start = einsum('b j h w, b j c h w -> b c h w', normalized_weights, x_starts)
if clip_denoised:
weighted_x_start.clamp_(-1., 1.)
model_mean, model_variance, model_log_variance = self.q_posterior(weighted_x_start, x, t)
return model_mean, model_variance, model_log_variance
def p_losses(self, x_start, t, noise = None, clip_denoised = False):
noise = default(noise, lambda: torch.randn_like(x_start))
x_t = self.q_sample(x_start = x_start, t = t, noise = noise)
model_output = self.model(x_t, t)
pred_noise, pred_x_start, weights = model_output.split(self.split_dims, dim = 1)
# get loss for predicted noise and x_start
# with the loss weight given at initialization
noise_loss = F.mse_loss(noise, pred_noise) * self.pred_noise_loss_weight
x_start_loss = F.mse_loss(x_start, pred_x_start) * self.pred_x_start_loss_weight
# calculate x_start from predicted noise
# then do a weighted sum of the x_start prediction, weights also predicted by the model (softmax normalized)
x_start_from_pred_noise = self.predict_start_from_noise(x_t, t, pred_noise)
x_start_from_pred_noise = x_start_from_pred_noise.clamp(-2., 2.)
weighted_x_start = einsum('b j h w, b j c h w -> b c h w', weights.softmax(dim = 1), torch.stack((x_start_from_pred_noise, pred_x_start), dim = 1))
# main loss to x_start with the weighted one
weighted_x_start_loss = F.mse_loss(x_start, weighted_x_start)
return weighted_x_start_loss + x_start_loss + noise_loss
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/weighted_objective_gaussian_diffusion.py |
import math
import torch
from torch import sqrt
from torch import nn, einsum
import torch.nn.functional as F
from torch.special import expm1
from torch.cuda.amp import autocast
from tqdm import tqdm
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# diffusion helpers
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
# continuous schedules
# log(snr) that approximates the original linear schedule
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def alpha_cosine_log_snr(t, s = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5)
class VParamContinuousTimeGaussianDiffusion(nn.Module):
"""
a new type of parameterization in v-space proposed in https://arxiv.org/abs/2202.00512 that
(1) allows for improved distillation over noise prediction objective and
(2) noted in imagen-video to improve upsampling unets by removing the color shifting artifacts
"""
def __init__(
self,
model,
*,
image_size,
channels = 3,
num_sample_steps = 500,
clip_sample_denoised = True,
):
super().__init__()
assert model.random_or_learned_sinusoidal_cond
assert not model.self_condition, 'not supported yet'
self.model = model
# image dimensions
self.channels = channels
self.image_size = image_size
# continuous noise schedule related stuff
self.log_snr = alpha_cosine_log_snr
# sampling
self.num_sample_steps = num_sample_steps
self.clip_sample_denoised = clip_sample_denoised
@property
def device(self):
return next(self.model.parameters()).device
def p_mean_variance(self, x, time, time_next):
# reviewer found an error in the equation in the paper (missing sigma)
# following - https://openreview.net/forum?id=2LdBqxc1Yv¬eId=rIQgH0zKsRt
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
c = -expm1(log_snr - log_snr_next)
squared_alpha, squared_alpha_next = log_snr.sigmoid(), log_snr_next.sigmoid()
squared_sigma, squared_sigma_next = (-log_snr).sigmoid(), (-log_snr_next).sigmoid()
alpha, sigma, alpha_next = map(sqrt, (squared_alpha, squared_sigma, squared_alpha_next))
batch_log_snr = repeat(log_snr, ' -> b', b = x.shape[0])
pred_v = self.model(x, batch_log_snr)
# shown in Appendix D in the paper
x_start = alpha * x - sigma * pred_v
if self.clip_sample_denoised:
x_start.clamp_(-1., 1.)
model_mean = alpha_next * (x * (1 - c) / alpha + c * x_start)
posterior_variance = squared_sigma_next * c
return model_mean, posterior_variance
# sampling related functions
@torch.no_grad()
def p_sample(self, x, time, time_next):
batch, *_, device = *x.shape, x.device
model_mean, model_variance = self.p_mean_variance(x = x, time = time, time_next = time_next)
if time_next == 0:
return model_mean
noise = torch.randn_like(x)
return model_mean + sqrt(model_variance) * noise
@torch.no_grad()
def p_sample_loop(self, shape):
batch = shape[0]
img = torch.randn(shape, device = self.device)
steps = torch.linspace(1., 0., self.num_sample_steps + 1, device = self.device)
for i in tqdm(range(self.num_sample_steps), desc = 'sampling loop time step', total = self.num_sample_steps):
times = steps[i]
times_next = steps[i + 1]
img = self.p_sample(img, times, times_next)
img.clamp_(-1., 1.)
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def sample(self, batch_size = 16):
return self.p_sample_loop((batch_size, self.channels, self.image_size, self.image_size))
# training related functions - noise prediction
@autocast(enabled = False)
def q_sample(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
log_snr = self.log_snr(times)
log_snr_padded = right_pad_dims_to(x_start, log_snr)
alpha, sigma = sqrt(log_snr_padded.sigmoid()), sqrt((-log_snr_padded).sigmoid())
x_noised = x_start * alpha + noise * sigma
return x_noised, log_snr, alpha, sigma
def random_times(self, batch_size):
return torch.zeros((batch_size,), device = self.device).float().uniform_(0, 1)
def p_losses(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))
x, log_snr, alpha, sigma = self.q_sample(x_start = x_start, times = times, noise = noise)
# described in section 4 as the prediction objective, with derivation in Appendix D
v = alpha * noise - sigma * x_start
model_out = self.model(x, log_snr)
return F.mse_loss(model_out, v)
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
times = self.random_times(b)
img = normalize_to_neg_one_to_one(img)
return self.p_losses(img, times, *args, **kwargs)
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/v_param_continuous_time_gaussian_diffusion.py |
import torch
from collections import namedtuple
from math import pi, sqrt, log as ln
from inspect import isfunction
from torch import nn, einsum
from einops import rearrange
from denoising_diffusion_pytorch.denoising_diffusion_pytorch import GaussianDiffusion, extract, unnormalize_to_zero_to_one
# constants
NAT = 1. / ln(2)
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start', 'pred_variance'])
# helper functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# tensor helpers
def log(t, eps = 1e-15):
return torch.log(t.clamp(min = eps))
def meanflat(x):
return x.mean(dim = tuple(range(1, len(x.shape))))
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
KL divergence between normal distributions parameterized by mean and log-variance.
"""
return 0.5 * (-1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * torch.exp(-logvar2))
def approx_standard_normal_cdf(x):
return 0.5 * (1.0 + torch.tanh(sqrt(2.0 / pi) * (x + 0.044715 * (x ** 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales, thres = 0.999):
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = log(cdf_plus)
log_one_minus_cdf_min = log(1. - cdf_min)
cdf_delta = cdf_plus - cdf_min
log_probs = torch.where(x < -thres,
log_cdf_plus,
torch.where(x > thres,
log_one_minus_cdf_min,
log(cdf_delta)))
return log_probs
# https://arxiv.org/abs/2102.09672
# i thought the results were questionable, if one were to focus only on FID
# but may as well get this in here for others to try, as GLIDE is using it (and DALL-E2 first stage of cascade)
# gaussian diffusion for learned variance + hybrid eps simple + vb loss
class LearnedGaussianDiffusion(GaussianDiffusion):
def __init__(
self,
model,
vb_loss_weight = 0.001, # lambda was 0.001 in the paper
*args,
**kwargs
):
super().__init__(model, *args, **kwargs)
assert model.out_dim == (model.channels * 2), 'dimension out of unet must be twice the number of channels for learned variance - you can also set the `learned_variance` keyword argument on the Unet to be `True`'
assert not model.self_condition, 'not supported yet'
self.vb_loss_weight = vb_loss_weight
def model_predictions(self, x, t, clip_x_start = False):
model_output = self.model(x, t)
model_output, pred_variance = model_output.chunk(2, dim = 1)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, model_output)
elif self.objective == 'pred_x0':
pred_noise = self.predict_noise_from_start(x, t, model_output)
x_start = model_output
x_start = maybe_clip(x_start)
return ModelPrediction(pred_noise, x_start, pred_variance)
def p_mean_variance(self, *, x, t, clip_denoised, model_output = None, **kwargs):
model_output = default(model_output, lambda: self.model(x, t))
pred_noise, var_interp_frac_unnormalized = model_output.chunk(2, dim = 1)
min_log = extract(self.posterior_log_variance_clipped, t, x.shape)
max_log = extract(torch.log(self.betas), t, x.shape)
var_interp_frac = unnormalize_to_zero_to_one(var_interp_frac_unnormalized)
model_log_variance = var_interp_frac * max_log + (1 - var_interp_frac) * min_log
model_variance = model_log_variance.exp()
x_start = self.predict_start_from_noise(x, t, pred_noise)
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, _, _ = self.q_posterior(x_start, x, t)
return model_mean, model_variance, model_log_variance, x_start
def p_losses(self, x_start, t, noise = None, clip_denoised = False):
noise = default(noise, lambda: torch.randn_like(x_start))
x_t = self.q_sample(x_start = x_start, t = t, noise = noise)
# model output
model_output = self.model(x_t, t)
# calculating kl loss for learned variance (interpolation)
true_mean, _, true_log_variance_clipped = self.q_posterior(x_start = x_start, x_t = x_t, t = t)
model_mean, _, model_log_variance, _ = self.p_mean_variance(x = x_t, t = t, clip_denoised = clip_denoised, model_output = model_output)
# kl loss with detached model predicted mean, for stability reasons as in paper
detached_model_mean = model_mean.detach()
kl = normal_kl(true_mean, true_log_variance_clipped, detached_model_mean, model_log_variance)
kl = meanflat(kl) * NAT
decoder_nll = -discretized_gaussian_log_likelihood(x_start, means = detached_model_mean, log_scales = 0.5 * model_log_variance)
decoder_nll = meanflat(decoder_nll) * NAT
# at the first timestep return the decoder NLL, otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
vb_losses = torch.where(t == 0, decoder_nll, kl)
# simple loss - predicting noise, x0, or x_prev
pred_noise, _ = model_output.chunk(2, dim = 1)
simple_losses = F.mse_loss(pred_noise, noise)
return simple_losses + vb_losses.mean() * self.vb_loss_weight
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/learned_gaussian_diffusion.py |
import math
import os
import numpy as np
import torch
from einops import rearrange, repeat
from pytorch_fid.fid_score import calculate_frechet_distance
from pytorch_fid.inception import InceptionV3
from torch.nn.functional import adaptive_avg_pool2d
from tqdm.auto import tqdm
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
class FIDEvaluation:
def __init__(
self,
batch_size,
dl,
sampler,
channels=3,
accelerator=None,
stats_dir="./results",
device="cuda",
num_fid_samples=50000,
inception_block_idx=2048,
):
self.batch_size = batch_size
self.n_samples = num_fid_samples
self.device = device
self.channels = channels
self.dl = dl
self.sampler = sampler
self.stats_dir = stats_dir
self.print_fn = print if accelerator is None else accelerator.print
assert inception_block_idx in InceptionV3.BLOCK_INDEX_BY_DIM
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[inception_block_idx]
self.inception_v3 = InceptionV3([block_idx]).to(device)
self.dataset_stats_loaded = False
def calculate_inception_features(self, samples):
if self.channels == 1:
samples = repeat(samples, "b 1 ... -> b c ...", c=3)
self.inception_v3.eval()
features = self.inception_v3(samples)[0]
if features.size(2) != 1 or features.size(3) != 1:
features = adaptive_avg_pool2d(features, output_size=(1, 1))
features = rearrange(features, "... 1 1 -> ...")
return features
def load_or_precalc_dataset_stats(self):
path = os.path.join(self.stats_dir, "dataset_stats")
try:
ckpt = np.load(path + ".npz")
self.m2, self.s2 = ckpt["m2"], ckpt["s2"]
self.print_fn("Dataset stats loaded from disk.")
ckpt.close()
except OSError:
num_batches = int(math.ceil(self.n_samples / self.batch_size))
stacked_real_features = []
self.print_fn(
f"Stacking Inception features for {self.n_samples} samples from the real dataset."
)
for _ in tqdm(range(num_batches)):
try:
real_samples = next(self.dl)
except StopIteration:
break
real_samples = real_samples.to(self.device)
real_features = self.calculate_inception_features(real_samples)
stacked_real_features.append(real_features)
stacked_real_features = (
torch.cat(stacked_real_features, dim=0).cpu().numpy()
)
m2 = np.mean(stacked_real_features, axis=0)
s2 = np.cov(stacked_real_features, rowvar=False)
np.savez_compressed(path, m2=m2, s2=s2)
self.print_fn(f"Dataset stats cached to {path}.npz for future use.")
self.m2, self.s2 = m2, s2
self.dataset_stats_loaded = True
@torch.inference_mode()
def fid_score(self):
if not self.dataset_stats_loaded:
self.load_or_precalc_dataset_stats()
self.sampler.eval()
batches = num_to_groups(self.n_samples, self.batch_size)
stacked_fake_features = []
self.print_fn(
f"Stacking Inception features for {self.n_samples} generated samples."
)
for batch in tqdm(batches):
fake_samples = self.sampler.sample(batch_size=batch)
fake_features = self.calculate_inception_features(fake_samples)
stacked_fake_features.append(fake_features)
stacked_fake_features = torch.cat(stacked_fake_features, dim=0).cpu().numpy()
m1 = np.mean(stacked_fake_features, axis=0)
s1 = np.cov(stacked_fake_features, rowvar=False)
return calculate_frechet_distance(m1, s1, self.m2, self.s2)
| denoising-diffusion-pytorch-main | denoising_diffusion_pytorch/fid_evaluation.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.