python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| CLIP-main | clip/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| CLIP-main | clip/simple_tokenizer.py |
import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize('model_name', clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
| CLIP-main | tests/test_consistency.py |
from setuptools import setup, find_packages
setup(
name = 'memformer',
packages = find_packages(exclude=['examples']),
version = '0.3.1',
license='MIT',
description = 'Memformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/memformer',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'memory'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| memformer-main | setup.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
# help auto-solve an area of confusion around input masks in auto-regressive
# if user supplies a mask that is only off by one from the source sequence, resolve it for them
mask = kwargs.pop('src_mask', None)
if mask is not None and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs.update(src_mask = mask)
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| memformer-main | memformer/autoregressive_wrapper.py |
from memformer.memformer import Memformer
from memformer.mrbp import memory_replay_backprop
| memformer-main | memformer/__init__.py |
import torch
from operator import itemgetter
def memory_replay_backprop(
model,
src,
tgt,
src_mask = None,
tgt_mask = None
):
b, *_ = src.shape
# get initial memory and max sequence length from encoder
mem_init = model.get_initial_mem(b)
max_seq_len = model.encoder.max_seq_len
# instantiate memory replay buffer
replay_buffer = [mem_init]
# split sequences and masks
src_segs = src.split(max_seq_len, dim = 1)
num_segs = len(src_segs)
src_mask_segs = src_mask.split(max_seq_len, dim = 1) if src_mask is not None else ((None,) * num_segs)
# for now, assume target sequence and mask is passed at the very last segment
# todo - allow to tether a target sequence at any point in the segment
# and attach custom loss to encoder output
tgt_segs = ((None,) * (num_segs - 1)) + (tgt,)
tgt_mask_segs = ((None,) * (num_segs - 1)) + (tgt_mask,)
# run forwards and gather all memories
prev_mem = mem_init
with torch.no_grad():
for i in range(num_segs - 1):
src, src_mask = map(itemgetter(i), (src_segs, src_mask_segs))
_, mem, _ = model(src, src_mask = src_mask, mems = prev_mem)
replay_buffer.append(mem)
prev_mem = mem
# do backpropagation one segment at a time from last step to first
mem_grad = torch.zeros_like(prev_mem)
for i in reversed(range(num_segs)):
src, src_mask, tgt, tgt_mask, mems = map(itemgetter(i), (src_segs, src_mask_segs, tgt_segs, tgt_mask_segs, replay_buffer))
mems = mems.requires_grad_()
_, mems_next, tgt_loss = model(src = src, tgt = tgt, src_mask = src_mask, tgt_mask = tgt_mask, mems = mems)
tgt_loss.backward(retain_graph = True)
mems_next.backward(mem_grad, retain_graph = True)
# if not the last step, pass the next memory's gradient back a step
if i != 0:
mem_grad.copy_(mems.grad.data)
| memformer-main | memformer/mrbp.py |
import math
import torch
from torch import nn, einsum
from functools import partial
import torch.nn.functional as F
from inspect import isfunction
from einops import rearrange, repeat
from collections import namedtuple
from memformer.autoregressive_wrapper import AutoregressiveWrapper
# constants
Results = namedtuple('Results', ['enc_out', 'mem', 'dec_out'])
EncOnlyResults = namedtuple('EncOnlyResults', ['enc_out', 'mem'])
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key, None), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def group_by_key_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
# positional embedding
class RelativePositionBias(nn.Module):
def __init__(self, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, qlen, klen):
device = self.relative_attention_bias.weight.device
q_pos = torch.arange(qlen, dtype = torch.long, device = device)
k_pos = torch.arange(klen, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets)
values = self.relative_attention_bias(rp_bucket)
return rearrange(values, 'i j h -> () h i j')
# main classes
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, causal = False, rel_pos_emb = False):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
dim_head = dim // heads
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.to_q = nn.Linear(dim, dim)
self.to_kv = nn.Linear(dim, dim * 2)
self.to_out = nn.Linear(dim, dim)
def forward(self, x, context = None, pos_emb = None, mask = None, query_mask = None, kv_mask = None, attend_self = False):
b, n, _, h, scale, device = *x.shape, self.heads, self.scale, x.device
if attend_self:
kv_input = torch.cat((x, context), dim = 1)
else:
kv_input = default(context, x)
q = self.to_q(x)
kv = self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, *kv))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale
if exists(pos_emb):
pos_emb_bias = pos_emb(*dots.shape[-2:])
dots += pos_emb_bias
mask_value = max_neg_value(dots)
if self.causal:
causal_mask = torch.ones((n, n), device = device).triu_(1).bool()
dots.masked_fill_(causal_mask, mask_value)
del causal_mask
if any(map(exists, (query_mask, kv_mask))):
query_mask = default(query_mask, lambda: torch.ones((b, n), device = device).bool())
if exists(context):
kv_mask = default(kv_mask, lambda: torch.ones((b, context.shape[1]), device = device).bool())
else:
kv_mask = default(kv_mask, query_mask)
query_mask = rearrange(query_mask, 'b i -> b () i ()')
kv_mask = rearrange(kv_mask, 'b j -> b () () j')
seq_mask = query_mask * kv_mask
dots.masked_fill_(~seq_mask, mask_value)
del seq_mask
if exists(mask):
mask = rearrange(mask, 'b i j -> b () i j')
dots.masked_fill_(~mask, mask_value)
del mask
attn = dots.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Encoder(nn.Module):
def __init__(self, dim, depth, heads = 8):
super().__init__()
self.rel_pos_emb = RelativePositionBias(heads = heads)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, heads = heads, rel_pos_emb = True))),
Residual(PreNorm(dim, Attention(dim, heads = heads))),
Residual(PreNorm(dim, FeedForward(dim)))
]))
def forward(self, x, context = None, src_mask = None):
for (self_attn, cross_attn, ff) in self.layers:
x = self_attn(x, pos_emb = self.rel_pos_emb, query_mask = src_mask)
x = cross_attn(x, context = context)
x = ff(x)
return x
class Decoder(nn.Module):
def __init__(self, dim, depth, heads = 8):
super().__init__()
self.rel_pos_emb = RelativePositionBias(heads = heads, causal = True)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, heads = heads, causal = True, rel_pos_emb = True))),
Residual(PreNorm(dim, Attention(dim, heads = heads))),
Residual(PreNorm(dim, FeedForward(dim))),
]))
def forward(self, x, context = None, src_mask = None, tgt_mask = None):
for (self_attn, cross_attn, ff) in self.layers:
x = self_attn(x, pos_emb = self.rel_pos_emb, query_mask = src_mask)
x = cross_attn(x, context = context, query_mask = src_mask, kv_mask = tgt_mask)
x = ff(x)
return x
class TransformerWrapper(nn.Module):
def __init__(self, *, num_tokens, max_seq_len, dim, layer_blocks, heads = 8, return_logits = True):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.max_seq_len = max_seq_len
self.layer_blocks = layer_blocks
self.norm = nn.LayerNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens) if return_logits else nn.Identity()
def forward(self, x, **kwargs):
_, n, device = *x.shape, x.device
x = self.token_emb(x)
x = self.layer_blocks(x, **kwargs)
x = self.norm(x)
return self.to_logits(x)
class Memformer(nn.Module):
def __init__(
self,
*,
dim,
num_memory_slots,
num_mem_updates = 1,
encoder_only = False,
mem_update_attn_heads = 8,
**kwargs):
super().__init__()
enc_kwargs, kwargs = group_by_key_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = group_by_key_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
self.encoder = TransformerWrapper(
dim = dim,
layer_blocks = Encoder(dim = dim, **enc_kwargs),
return_logits = False,
**enc_transformer_kwargs
)
self.decoder = TransformerWrapper(
dim = dim,
layer_blocks = Decoder(dim = dim, **dec_kwargs),
return_logits = True,
**dec_transformer_kwargs
) if not encoder_only else None
if exists(self.decoder):
self.decoder = AutoregressiveWrapper(self.decoder)
self.num_mem = num_memory_slots
self.memory_slots = nn.Parameter(torch.randn(num_memory_slots, dim))
self.num_mem_updates = num_mem_updates
self.mem_updater = Attention(dim, heads = mem_update_attn_heads)
self.gru = nn.GRUCell(dim, dim)
self.mem_ff = Residual(PreNorm(dim, FeedForward(dim)))
def get_initial_mem(self, batch_size):
return repeat(self.memory_slots, 'n d -> b n d', b = batch_size)
def forward(self, src, tgt = None, mems = None, src_mask = None, tgt_mask = None):
b, n, num_mem, device = *src.shape, self.num_mem, src.device
mems = default(mems, lambda: self.get_initial_mem(b))
enc = self.encoder(src, context = mems, src_mask = src_mask)
if exists(self.decoder) and exists(tgt):
dec_out = self.decoder(tgt, context = enc, src_mask = tgt_mask, tgt_mask = src_mask, return_loss = True)
else:
dec_out = torch.tensor(0., requires_grad = True, device = device)
# update memory with attention
mem_mask = torch.eye(num_mem, num_mem, device = device).bool()
mem_mask = repeat(mem_mask, 'i j -> b i j', b = b)
mem_mask = F.pad(mem_mask, (0, n), value = True)
if exists(src_mask):
src_mask = rearrange(src_mask, 'b j -> b () j')
mem_enc_mask = F.pad(src_mask, (num_mem, 0), value = True)
mem_mask &= mem_enc_mask
for _ in range(self.num_mem_updates):
prev_mems = mems
updated_mems = self.mem_updater(mems, enc, mask = mem_mask, attend_self = True)
next_mems = self.gru(
rearrange(updated_mems, 'b n d -> (b n) d'),
rearrange(prev_mems, 'b n d -> (b n) d')
)
mems = rearrange(next_mems, '(b n) d -> b n d', b = b)
mems = self.mem_ff(mems)
if not exists(self.decoder):
return EncOnlyResults(enc, mems)
return Results(enc, mems, dec_out)
| memformer-main | memformer/memformer.py |
from setuptools import setup, find_packages
setup(
name = 'enformer-pytorch',
packages = find_packages(exclude=[]),
include_package_data = True,
version = '0.7.6',
license='MIT',
description = 'Enformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/enformer-pytorch',
keywords = [
'artificial intelligence',
'transformer',
'gene-expression'
],
install_requires=[
'discrete-key-value-bottleneck-pytorch>=0.0.8',
'einops>=0.3',
'numpy',
'torch>=1.6',
'torchmetrics',
'polars',
'pyfaidx',
'pyyaml',
'transformers[torch]',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| enformer-pytorch-main | setup.py |
import torch
from enformer_pytorch import Enformer
enformer = Enformer.from_pretrained('EleutherAI/enformer-official-rough').cuda()
enformer.eval()
data = torch.load('./data/test-sample.pt')
seq, target = data['sequence'].cuda(), data['target'].cuda()
with torch.no_grad():
corr_coef = enformer(
seq,
target = target,
return_corr_coef = True,
head = 'human'
)
print(corr_coef)
assert corr_coef > 0.1
| enformer-pytorch-main | test_pretrained.py |
from torchmetrics import Metric
from typing import Optional
import torch
class MeanPearsonCorrCoefPerChannel(Metric):
is_differentiable: Optional[bool] = False
full_state_update:bool = False
higher_is_better: Optional[bool] = True
def __init__(self, n_channels:int, dist_sync_on_step=False):
"""Calculates the mean pearson correlation across channels aggregated over regions"""
super().__init__(dist_sync_on_step=dist_sync_on_step, full_state_update=False)
self.reduce_dims=(0, 1)
self.add_state("product", default=torch.zeros(n_channels, dtype=torch.float32), dist_reduce_fx="sum", )
self.add_state("true", default=torch.zeros(n_channels, dtype=torch.float32), dist_reduce_fx="sum", )
self.add_state("true_squared", default=torch.zeros(n_channels, dtype=torch.float32), dist_reduce_fx="sum", )
self.add_state("pred", default=torch.zeros(n_channels, dtype=torch.float32), dist_reduce_fx="sum", )
self.add_state("pred_squared", default=torch.zeros(n_channels, dtype=torch.float32), dist_reduce_fx="sum", )
self.add_state("count", default=torch.zeros(n_channels, dtype=torch.float32), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, target: torch.Tensor):
assert preds.shape == target.shape
self.product += torch.sum(preds * target, dim=self.reduce_dims)
self.true += torch.sum(target, dim=self.reduce_dims)
self.true_squared += torch.sum(torch.square(target), dim=self.reduce_dims)
self.pred += torch.sum(preds, dim=self.reduce_dims)
self.pred_squared += torch.sum(torch.square(preds), dim=self.reduce_dims)
self.count += torch.sum(torch.ones_like(target), dim=self.reduce_dims)
def compute(self):
true_mean = self.true / self.count
pred_mean = self.pred / self.count
covariance = (self.product
- true_mean * self.pred
- pred_mean * self.true
+ self.count * true_mean * pred_mean)
true_var = self.true_squared - self.count * torch.square(true_mean)
pred_var = self.pred_squared - self.count * torch.square(pred_mean)
tp_var = torch.sqrt(true_var) * torch.sqrt(pred_var)
correlation = covariance / tp_var
return correlation | enformer-pytorch-main | enformer_pytorch/metrics.py |
from enformer_pytorch.config_enformer import EnformerConfig
from enformer_pytorch.modeling_enformer import Enformer, SEQUENCE_LENGTH, AttentionPool
from enformer_pytorch.data import seq_indices_to_one_hot, str_to_one_hot, GenomeIntervalDataset, FastaInterval | enformer-pytorch-main | enformer_pytorch/__init__.py |
import math
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint_sequential
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
from enformer_pytorch.data import str_to_one_hot, seq_indices_to_one_hot
from enformer_pytorch.config_enformer import EnformerConfig
from transformers import PreTrainedModel
# constants
SEQUENCE_LENGTH = 196_608
TARGET_LENGTH = 896
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def map_values(fn, d):
return {key: fn(values) for key, values in d.items()}
def exponential_linspace_int(start, end, num, divisible_by = 1):
def _round(x):
return int(round(x / divisible_by) * divisible_by)
base = math.exp(math.log(end / start) / (num - 1))
return [_round(start * base**i) for i in range(num)]
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
# losses and metrics
def poisson_loss(pred, target):
return (pred - target * log(pred)).mean()
def pearson_corr_coef(x, y, dim = 1, reduce_dims = (-1,)):
x_centered = x - x.mean(dim = dim, keepdim = True)
y_centered = y - y.mean(dim = dim, keepdim = True)
return F.cosine_similarity(x_centered, y_centered, dim = dim).mean(dim = reduce_dims)
# relative positional encoding functions
def get_positional_features_exponential(positions, features, seq_len, min_half_life = 3.):
max_range = math.log(seq_len) / math.log(2.)
half_life = 2 ** torch.linspace(min_half_life, max_range, features, device = positions.device)
half_life = half_life[None, ...]
positions = positions.abs()[..., None]
return torch.exp(-math.log(2.) / half_life * positions)
def get_positional_features_central_mask(positions, features, seq_len):
center_widths = 2 ** torch.arange(1, features + 1, device = positions.device).float()
center_widths = center_widths - 1
return (center_widths[None, ...] > positions.abs()[..., None]).float()
def gamma_pdf(x, concentration, rate):
log_unnormalized_prob = torch.xlogy(concentration - 1., x) - rate * x
log_normalization = (torch.lgamma(concentration) - concentration * torch.log(rate))
return torch.exp(log_unnormalized_prob - log_normalization)
def get_positional_features_gamma(positions, features, seq_len, stddev = None, start_mean = None, eps = 1e-8):
if not exists(stddev):
stddev = seq_len / (2 * features)
if not exists(start_mean):
start_mean = seq_len / features
mean = torch.linspace(start_mean, seq_len, features, device = positions.device)
mean = mean[None, ...]
concentration = (mean / stddev) ** 2
rate = mean / stddev ** 2
probabilities = gamma_pdf(positions.float().abs()[..., None], concentration, rate)
probabilities = probabilities + eps
outputs = probabilities / torch.amax(probabilities, dim = -1, keepdim = True)
return outputs
def get_positional_embed(seq_len, feature_size, device):
distances = torch.arange(-seq_len + 1, seq_len, device = device)
feature_functions = [
get_positional_features_exponential,
get_positional_features_central_mask,
get_positional_features_gamma
]
num_components = len(feature_functions) * 2
if (feature_size % num_components) != 0:
raise ValueError(f'feature size is not divisible by number of components ({num_components})')
num_basis_per_class = feature_size // num_components
embeddings = []
for fn in feature_functions:
embeddings.append(fn(distances, num_basis_per_class, seq_len))
embeddings = torch.cat(embeddings, dim = -1)
embeddings = torch.cat((embeddings, torch.sign(distances)[..., None] * embeddings), dim = -1)
return embeddings
def relative_shift(x):
to_pad = torch.zeros_like(x[..., :1])
x = torch.cat((to_pad, x), dim = -1)
_, h, t1, t2 = x.shape
x = x.reshape(-1, h, t2, t1)
x = x[:, :, 1:, :]
x = x.reshape(-1, h, t1, t2 - 1)
return x[..., :((t2 + 1) // 2)]
# classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class GELU(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
class AttentionPool(nn.Module):
def __init__(self, dim, pool_size = 2):
super().__init__()
self.pool_size = pool_size
self.pool_fn = Rearrange('b d (n p) -> b d n p', p = pool_size)
self.to_attn_logits = nn.Conv2d(dim, dim, 1, bias = False)
nn.init.dirac_(self.to_attn_logits.weight)
with torch.no_grad():
self.to_attn_logits.weight.mul_(2)
def forward(self, x):
b, _, n = x.shape
remainder = n % self.pool_size
needs_padding = remainder > 0
if needs_padding:
x = F.pad(x, (0, remainder), value = 0)
mask = torch.zeros((b, 1, n), dtype = torch.bool, device = x.device)
mask = F.pad(mask, (0, remainder), value = True)
x = self.pool_fn(x)
logits = self.to_attn_logits(x)
if needs_padding:
mask_value = -torch.finfo(logits.dtype).max
logits = logits.masked_fill(self.pool_fn(mask), mask_value)
attn = logits.softmax(dim = -1)
return (x * attn).sum(dim = -1)
class TargetLengthCrop(nn.Module):
def __init__(self, target_length):
super().__init__()
self.target_length = target_length
def forward(self, x):
seq_len, target_len = x.shape[-2], self.target_length
if target_len == -1:
return x
if seq_len < target_len:
raise ValueError(f'sequence length {seq_len} is less than target length {target_len}')
trim = (target_len - seq_len) // 2
if trim == 0:
return x
return x[:, -trim:trim]
def ConvBlock(dim, dim_out = None, kernel_size = 1):
return nn.Sequential(
nn.BatchNorm1d(dim),
GELU(),
nn.Conv1d(dim, default(dim_out, dim), kernel_size, padding = kernel_size // 2)
)
# attention classes
class Attention(nn.Module):
def __init__(
self,
dim,
*,
num_rel_pos_features,
heads = 8,
dim_key = 64,
dim_value = 64,
dropout = 0.,
pos_dropout = 0.
):
super().__init__()
self.scale = dim_key ** -0.5
self.heads = heads
self.to_q = nn.Linear(dim, dim_key * heads, bias = False)
self.to_k = nn.Linear(dim, dim_key * heads, bias = False)
self.to_v = nn.Linear(dim, dim_value * heads, bias = False)
self.to_out = nn.Linear(dim_value * heads, dim)
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
# relative positional encoding
self.num_rel_pos_features = num_rel_pos_features
self.to_rel_k = nn.Linear(num_rel_pos_features, dim_key * heads, bias = False)
self.rel_content_bias = nn.Parameter(torch.randn(1, heads, 1, dim_key))
self.rel_pos_bias = nn.Parameter(torch.randn(1, heads, 1, dim_key))
# dropouts
self.pos_dropout = nn.Dropout(pos_dropout)
self.attn_dropout = nn.Dropout(dropout)
def forward(self, x):
n, h, device = x.shape[-2], self.heads, x.device
q = self.to_q(x)
k = self.to_k(x)
v = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
content_logits = einsum('b h i d, b h j d -> b h i j', q + self.rel_content_bias, k)
positions = get_positional_embed(n, self.num_rel_pos_features, device)
positions = self.pos_dropout(positions)
rel_k = self.to_rel_k(positions)
rel_k = rearrange(rel_k, 'n (h d) -> h n d', h = h)
rel_logits = einsum('b h i d, h j d -> b h i j', q + self.rel_pos_bias, rel_k)
rel_logits = relative_shift(rel_logits)
logits = content_logits + rel_logits
attn = logits.softmax(dim = -1)
attn = self.attn_dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main class
class Enformer(PreTrainedModel):
config_class = EnformerConfig
base_model_prefix = "enformer"
@staticmethod
def from_hparams(**kwargs):
return Enformer(EnformerConfig(**kwargs))
def __init__(self, config):
super().__init__(config)
self.dim = config.dim
half_dim = config.dim // 2
twice_dim = config.dim * 2
# create stem
self.stem = nn.Sequential(
nn.Conv1d(4, half_dim, 15, padding = 7),
Residual(ConvBlock(half_dim)),
AttentionPool(half_dim, pool_size = 2)
)
# create conv tower
filter_list = exponential_linspace_int(half_dim, config.dim, num = (config.num_downsamples - 1), divisible_by = config.dim_divisible_by)
filter_list = [half_dim, *filter_list]
conv_layers = []
for dim_in, dim_out in zip(filter_list[:-1], filter_list[1:]):
conv_layers.append(nn.Sequential(
ConvBlock(dim_in, dim_out, kernel_size = 5),
Residual(ConvBlock(dim_out, dim_out, 1)),
AttentionPool(dim_out, pool_size = 2)
))
self.conv_tower = nn.Sequential(*conv_layers)
# transformer
transformer = []
for _ in range(config.depth):
transformer.append(nn.Sequential(
Residual(nn.Sequential(
nn.LayerNorm(config.dim),
Attention(
config.dim,
heads = config.heads,
dim_key = config.attn_dim_key,
dim_value = config.dim // config.heads,
dropout = config.attn_dropout,
pos_dropout = config.pos_dropout,
num_rel_pos_features = config.dim // config.heads
),
nn.Dropout(config.dropout_rate)
)),
Residual(nn.Sequential(
nn.LayerNorm(config.dim),
nn.Linear(config.dim, config.dim * 2),
nn.Dropout(config.dropout_rate),
nn.ReLU(),
nn.Linear(config.dim * 2, config.dim),
nn.Dropout(config.dropout_rate)
))
))
self.transformer = nn.Sequential(*transformer)
# target cropping
self.target_length = config.target_length
self.crop_final = TargetLengthCrop(config.target_length)
# final pointwise
self.final_pointwise = nn.Sequential(
Rearrange('b n d -> b d n'),
ConvBlock(filter_list[-1], twice_dim, 1),
Rearrange('b d n -> b n d'),
nn.Dropout(config.dropout_rate / 8),
GELU()
)
# create trunk sequential module
self._trunk = nn.Sequential(
Rearrange('b n d -> b d n'),
self.stem,
self.conv_tower,
Rearrange('b d n -> b n d'),
self.transformer,
self.crop_final,
self.final_pointwise
)
# create final heads for human and mouse
self.add_heads(**config.output_heads)
# use checkpointing on transformer trunk
self.use_checkpointing = config.use_checkpointing
def add_heads(self, **kwargs):
self.output_heads = kwargs
self._heads = nn.ModuleDict(map_values(lambda features: nn.Sequential(
nn.Linear(self.dim * 2, features),
nn.Softplus()
), kwargs))
def set_target_length(self, target_length):
crop_module = self._trunk[-2]
crop_module.target_length = target_length
@property
def trunk(self):
return self._trunk
@property
def heads(self):
return self._heads
def trunk_checkpointed(self, x):
x = rearrange(x, 'b n d -> b d n')
x = self.stem(x)
x = self.conv_tower(x)
x = rearrange(x, 'b d n -> b n d')
x = checkpoint_sequential(self.transformer, len(self.transformer), x)
x = self.crop_final(x)
x = self.final_pointwise(x)
return x
def forward(
self,
x,
target = None,
return_corr_coef = False,
return_embeddings = False,
return_only_embeddings = False,
head = None,
target_length = None
):
if isinstance(x, list):
x = str_to_one_hot(x)
elif x.dtype == torch.long:
x = seq_indices_to_one_hot(x)
no_batch = x.ndim == 2
if no_batch:
x = rearrange(x, '... -> () ...')
if exists(target_length):
self.set_target_length(target_length)
trunk_fn = self.trunk_checkpointed if self.use_checkpointing else self._trunk
x = trunk_fn(x)
if no_batch:
x = rearrange(x, '() ... -> ...')
if return_only_embeddings:
return x
out = map_values(lambda fn: fn(x), self._heads)
if exists(head):
assert head in self._heads, f'head {head} not found'
out = out[head]
if exists(target):
assert exists(head), 'head must be passed in if one were to calculate loss directly with targets'
if return_corr_coef:
return pearson_corr_coef(out, target)
return poisson_loss(out, target)
if return_embeddings:
return out, x
return out
| enformer-pytorch-main | enformer_pytorch/modeling_enformer.py |
from transformers import PretrainedConfig
class EnformerConfig(PretrainedConfig):
model_type = "enformer"
def __init__(
self,
dim = 1536,
depth = 11,
heads = 8,
output_heads = dict(human = 5313, mouse= 1643),
target_length = 896,
attn_dim_key = 64,
dropout_rate = 0.4,
attn_dropout = 0.05,
pos_dropout = 0.01,
use_checkpointing = False,
use_convnext = False,
num_downsamples = 7, # genetic sequence is downsampled 2 ** 7 == 128x in default Enformer - can be changed for higher resolution
dim_divisible_by = 128,
**kwargs,
):
self.dim = dim
self.depth = depth
self.heads = heads
self.output_heads = output_heads
self.target_length = target_length
self.attn_dim_key = attn_dim_key
self.dropout_rate = dropout_rate
self.attn_dropout = attn_dropout
self.pos_dropout = pos_dropout
self.use_checkpointing = use_checkpointing
self.num_downsamples = num_downsamples
self.dim_divisible_by = dim_divisible_by
super().__init__(**kwargs) | enformer-pytorch-main | enformer_pytorch/config_enformer.py |
import torch
from typing import Optional
from copy import deepcopy
from contextlib import contextmanager
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from enformer_pytorch.modeling_enformer import Enformer, poisson_loss
from discrete_key_value_bottleneck_pytorch import DiscreteKeyValueBottleneck
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
# better sequential
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# controlling freezing of layers
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
def freeze_batchnorms_(model):
bns = [m for m in model.modules() if isinstance(m, nn.BatchNorm1d)]
for bn in bns:
bn.eval()
bn.track_running_stats = False
set_module_requires_grad_(bn, False)
def freeze_all_but_layernorms_(model):
for m in model.modules():
set_module_requires_grad_(m, isinstance(m, nn.LayerNorm))
def freeze_all_but_last_n_layers_(enformer, n):
assert isinstance(enformer, Enformer)
freeze_all_layers_(enformer)
transformer_blocks = enformer.transformer
for module in transformer_blocks[-n:]:
set_module_requires_grad_(module, True)
# get enformer embeddings
def get_enformer_embeddings(
model,
seq,
freeze = False,
train_layernorms_only = False,
train_last_n_layers_only = None,
enformer_kwargs: dict = {}
):
freeze_batchnorms_(model)
if train_layernorms_only:
assert not freeze, 'you set the intent to train the layernorms of the enformer, yet also indicated you wanted to freeze the entire model'
freeze_all_but_layernorms_(model)
if exists(train_last_n_layers_only):
assert not freeze, 'you set the intent to train last N layers of enformer, but also indicated you wanted to freeze the entire network'
freeze_all_but_last_n_layers_(model, train_last_n_layers_only)
enformer_context = null_context() if not freeze else torch.no_grad()
with enformer_context:
embeddings = model(seq, return_only_embeddings = True, **enformer_kwargs)
if freeze:
embeddings.detach_()
return embeddings
# fine-tune wrapper classes
# extra head projection, akin to how human and mouse tracks were trained
class HeadAdapterWrapper(nn.Module):
def __init__(
self,
*,
enformer,
num_tracks,
post_transformer_embed = False, # whether to take the embeddings from right after the transformer, instead of after the final pointwise convolutional - this would add another layernorm
discrete_key_value_bottleneck = False,
bottleneck_num_memories = 256,
bottleneck_num_codebooks = 4,
bottleneck_decay = 0.9,
transformer_embed_fn: nn.Module = nn.Identity(),
output_activation: Optional[nn.Module] = nn.Softplus(),
auto_set_target_length = True
):
super().__init__()
assert isinstance(enformer, Enformer)
enformer_hidden_dim = enformer.dim * (2 if not post_transformer_embed else 1)
self.discrete_key_value_bottleneck = discrete_key_value_bottleneck
if discrete_key_value_bottleneck:
enformer = DiscreteKeyValueBottleneck(
encoder = enformer,
dim = enformer_hidden_dim,
num_memory_codebooks = bottleneck_num_codebooks,
num_memories = bottleneck_num_memories,
dim_memory = enformer_hidden_dim // bottleneck_num_codebooks,
decay = bottleneck_decay,
)
self.post_transformer_embed = post_transformer_embed
self.enformer = enformer
self.auto_set_target_length = auto_set_target_length
if post_transformer_embed:
self.enformer = deepcopy(enformer)
self.enformer._trunk[-1] = nn.Identity()
self.enformer.final_pointwise = nn.Identity()
self.post_embed_transform = Sequential(
transformer_embed_fn,
nn.LayerNorm(enformer_hidden_dim) if post_transformer_embed else None
)
self.to_tracks = Sequential(
nn.Linear(enformer_hidden_dim, num_tracks),
output_activation
)
def forward(
self,
seq,
*,
target = None,
freeze_enformer = False,
finetune_enformer_ln_only = False,
finetune_last_n_layers_only = None
):
enformer_kwargs = dict()
if exists(target) and self.auto_set_target_length:
enformer_kwargs = dict(target_length = target.shape[-2])
if self.discrete_key_value_bottleneck:
embeddings = self.enformer(seq, return_only_embeddings = True, **enformer_kwargs)
else:
embeddings = get_enformer_embeddings(self.enformer, seq, freeze = freeze_enformer, train_layernorms_only = finetune_enformer_ln_only, train_last_n_layers_only = finetune_last_n_layers_only, enformer_kwargs = enformer_kwargs)
preds = self.to_tracks(embeddings)
if not exists(target):
return preds
return poisson_loss(preds, target)
# wrapper that allows one to supply each track with a context dimension
# the context embedding will be projected into the weights and biases of the head linear projection (hypernetwork)
class ContextAdapterWrapper(nn.Module):
def __init__(
self,
*,
enformer,
context_dim,
discrete_key_value_bottleneck = False,
bottleneck_num_memories = 256,
bottleneck_num_codebooks = 4,
bottleneck_decay = 0.9,
auto_set_target_length = True,
output_activation: Optional[nn.Module] = nn.Softplus()
):
super().__init__()
assert isinstance(enformer, Enformer)
enformer_hidden_dim = enformer.dim * 2
self.discrete_key_value_bottleneck = discrete_key_value_bottleneck
if discrete_key_value_bottleneck:
enformer = DiscreteKeyValueBottleneck(
encoder = enformer,
dim = enformer_hidden_dim,
num_memory_codebooks = bottleneck_num_codebooks,
num_memories = bottleneck_num_memories,
dim_memory = enformer_hidden_dim // bottleneck_num_codebooks,
decay = bottleneck_decay,
)
self.enformer = enformer
self.auto_set_target_length = auto_set_target_length
self.to_context_weights = nn.Parameter(torch.randn(context_dim, enformer_hidden_dim))
self.to_context_bias = nn.Parameter(torch.randn(context_dim))
self.activation = default(output_activation, nn.Identity())
def forward(
self,
seq,
*,
context,
target = None,
freeze_enformer = False,
finetune_enformer_ln_only = False,
finetune_last_n_layers_only = None
):
enformer_kwargs = dict()
if exists(target) and self.auto_set_target_length:
enformer_kwargs = dict(target_length = target.shape[-2])
if self.discrete_key_value_bottleneck:
embeddings = self.enformer(seq, return_only_embeddings = True, **enformer_kwargs)
else:
embeddings = get_enformer_embeddings(self.enformer, seq, freeze = freeze_enformer, train_layernorms_only = finetune_enformer_ln_only, train_last_n_layers_only = finetune_last_n_layers_only, enformer_kwargs = enformer_kwargs)
weights = einsum('t d, d e -> t e', context, self.to_context_weights)
bias = einsum('t d, d -> t', context, self.to_context_bias)
pred = einsum('b n d, t d -> b n t', embeddings, weights) + bias
pred = self.activation(pred)
if not exists(target):
return pred
return poisson_loss(pred, target)
# wrapper that does attention aggregation of the context, which can be a list of tokens (batch x seq x dim)
class ContextAttentionAdapterWrapper(nn.Module):
def __init__(
self,
*,
enformer,
context_dim,
heads = 8,
dim_head = 64,
discrete_key_value_bottleneck = False,
bottleneck_num_memories = 256,
bottleneck_num_codebooks = 4,
bottleneck_decay = 0.9,
auto_set_target_length = True,
output_activation: Optional[nn.Module] = nn.Softplus()
):
super().__init__()
assert isinstance(enformer, Enformer)
enformer_hidden_dim = enformer.dim * 2
self.discrete_key_value_bottleneck = discrete_key_value_bottleneck
if discrete_key_value_bottleneck:
enformer = DiscreteKeyValueBottleneck(
encoder = enformer,
dim = enformer_hidden_dim,
num_memory_codebooks = bottleneck_num_codebooks,
num_memories = bottleneck_num_memories,
dim_memory = enformer_hidden_dim // bottleneck_num_codebooks,
decay = bottleneck_decay,
)
self.enformer = enformer
self.auto_set_target_length = auto_set_target_length
self.query_norm = nn.LayerNorm(enformer_hidden_dim)
self.key_values_norm = nn.LayerNorm(context_dim)
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = heads * dim_head
self.to_queries = nn.Linear(enformer_hidden_dim, inner_dim, bias = False)
self.null_key = nn.Parameter(torch.randn(inner_dim))
self.null_value = nn.Parameter(torch.randn(inner_dim))
self.to_key_values = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, enformer_hidden_dim)
self.to_pred = Sequential(
nn.Linear(enformer_hidden_dim, 1),
Rearrange('b c ... 1 -> b ... c'),
output_activation
)
def forward(
self,
seq,
*,
context,
context_mask = None,
target = None,
freeze_enformer = False,
finetune_enformer_ln_only = False,
finetune_last_n_layers_only = None
):
"""
b - batch
n - sequence length
c - number of contexts (tracks)
d - dimension
i - sequence length (query embeddings)
j - sequence length (keys / values contexts)
h - attention heads
"""
h = self.heads
enformer_kwargs = dict()
if exists(target) and self.auto_set_target_length:
enformer_kwargs = dict(target_length = target.shape[-2])
if self.discrete_key_value_bottleneck:
embeddings = self.enformer(seq, return_only_embeddings = True, **enformer_kwargs)
else:
embeddings = get_enformer_embeddings(self.enformer, seq, freeze = freeze_enformer, train_layernorms_only = finetune_enformer_ln_only, train_last_n_layers_only = finetune_last_n_layers_only, enformer_kwargs = enformer_kwargs)
# perform cross attention from genetic -> context
if context.ndim == 2:
context = rearrange(context, 'b d -> b 1 d')
q = self.to_queries(self.query_norm(embeddings))
k, v = self.to_key_values(self.key_values_norm(context)).chunk(2, dim = -1)
null_k, null_v = map(lambda t: repeat(t, 'd -> b 1 d', b = context.shape[0]), (self.null_key, self.null_value))
k = torch.cat((null_k, k), dim = 1)
v = torch.cat((null_v, v), dim = 1)
# split out head
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
sim = einsum('b h i d, c h j d -> b c h i j', q, k) * self.scale
# masking
if exists(context_mask):
context_mask = F.pad(context_mask, (1, 0), value = True)
context_mask =rearrange(context_mask, 'b j -> b 1 1 1 j')
sim = sim.masked_fill(~context_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
# aggregate
out = einsum('b c h i j, c h j d -> b c h i d', attn, v)
out = rearrange(out, 'b c h n d -> b c n (h d)', h = h)
# combine heads
branch_out = self.to_out(out)
# residual
embeddings = embeddings + branch_out
# to prediction
pred = self.to_pred(embeddings)
if not exists(target):
return pred
return poisson_loss(pred, target)
| enformer-pytorch-main | enformer_pytorch/finetune.py |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
import polars as pl
import numpy as np
from random import randrange, random
from pathlib import Path
from pyfaidx import Fasta
# helper functions
def exists(val):
return val is not None
def identity(t):
return t
def cast_list(t):
return t if isinstance(t, list) else [t]
def coin_flip():
return random() > 0.5
# genomic function transforms
seq_indices_embed = torch.zeros(256).long()
seq_indices_embed[ord('a')] = 0
seq_indices_embed[ord('c')] = 1
seq_indices_embed[ord('g')] = 2
seq_indices_embed[ord('t')] = 3
seq_indices_embed[ord('n')] = 4
seq_indices_embed[ord('A')] = 0
seq_indices_embed[ord('C')] = 1
seq_indices_embed[ord('G')] = 2
seq_indices_embed[ord('T')] = 3
seq_indices_embed[ord('N')] = 4
seq_indices_embed[ord('.')] = -1
one_hot_embed = torch.zeros(256, 4)
one_hot_embed[ord('a')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('c')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('g')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('t')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('n')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('A')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('C')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('G')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('T')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('N')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('.')] = torch.Tensor([0.25, 0.25, 0.25, 0.25])
reverse_complement_map = torch.Tensor([3, 2, 1, 0, 4]).long()
def torch_fromstring(seq_strs):
batched = not isinstance(seq_strs, str)
seq_strs = cast_list(seq_strs)
np_seq_chrs = list(map(lambda t: np.fromstring(t, dtype = np.uint8), seq_strs))
seq_chrs = list(map(torch.from_numpy, np_seq_chrs))
return torch.stack(seq_chrs) if batched else seq_chrs[0]
def str_to_seq_indices(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return seq_indices_embed[seq_chrs.long()]
def str_to_one_hot(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return one_hot_embed[seq_chrs.long()]
def seq_indices_to_one_hot(t, padding = -1):
is_padding = t == padding
t = t.clamp(min = 0)
one_hot = F.one_hot(t, num_classes = 5)
out = one_hot[..., :4].float()
out = out.masked_fill(is_padding[..., None], 0.25)
return out
# augmentations
def seq_indices_reverse_complement(seq_indices):
complement = reverse_complement_map[seq_indices.long()]
return torch.flip(complement, dims = (-1,))
def one_hot_reverse_complement(one_hot):
*_, n, d = one_hot.shape
assert d == 4, 'must be one hot encoding with last dimension equal to 4'
return torch.flip(one_hot, (-1, -2))
# processing bed files
class FastaInterval():
def __init__(
self,
*,
fasta_file,
context_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
self.context_length = context_length
self.shift_augs = shift_augs
self.rc_aug = rc_aug
def __call__(self, chr_name, start, end, return_augs = False):
interval_length = end - start
chromosome = self.seqs[chr_name]
chromosome_length = len(chromosome)
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
if exists(self.context_length) and interval_length < self.context_length:
extra_seq = self.context_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
seq = ('.' * left_padding) + str(chromosome[start:end]) + ('.' * right_padding)
should_rc_aug = self.rc_aug and coin_flip()
if self.return_seq_indices:
seq = str_to_seq_indices(seq)
if should_rc_aug:
seq = seq_indices_reverse_complement(seq)
return seq
one_hot = str_to_one_hot(seq)
if should_rc_aug:
one_hot = one_hot_reverse_complement(one_hot)
if not return_augs:
return one_hot
# returns the shift integer as well as the bool (for whether reverse complement was activated)
# for this particular genomic sequence
rand_shift_tensor = torch.tensor([rand_shift])
rand_aug_bool_tensor = torch.tensor([should_rc_aug])
return one_hot, rand_shift_tensor, rand_aug_bool_tensor
class GenomeIntervalDataset(Dataset):
def __init__(
self,
bed_file,
fasta_file,
filter_df_fn = identity,
chr_bed_to_fasta_map = dict(),
context_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False,
return_augs = False
):
super().__init__()
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
df = pl.read_csv(str(bed_path), separator = '\t', has_header = False)
df = filter_df_fn(df)
self.df = df
# if the chromosome name in the bed file is different than the keyname in the fasta
# can remap on the fly
self.chr_bed_to_fasta_map = chr_bed_to_fasta_map
self.fasta = FastaInterval(
fasta_file = fasta_file,
context_length = context_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug
)
self.return_augs = return_augs
def __len__(self):
return len(self.df)
def __getitem__(self, ind):
interval = self.df.row(ind)
chr_name, start, end = (interval[0], interval[1], interval[2])
chr_name = self.chr_bed_to_fasta_map.get(chr_name, chr_name)
return self.fasta(chr_name, start, end, return_augs = self.return_augs)
| enformer-pytorch-main | enformer_pytorch/data.py |
from einops import rearrange
def copy_bn(mod, vars, path):
bn_offset = vars[f'{path}offset:0']
bn_scale = vars[f'{path}scale:0']
ema_path = '/'.join(path.split('/')[:-1]) + '/'
bn_running_mean = vars[f'{ema_path}moving_mean/average:0']
bn_running_var = vars[f'{ema_path}moving_variance/average:0']
mod.weight.data.copy_(bn_scale)
mod.bias.data.copy_(bn_offset)
mod.running_var.data.copy_(rearrange(bn_running_var, '1 1 d -> d'))
mod.running_mean.data.copy_(rearrange(bn_running_mean, '1 1 d -> d'))
def copy_conv(mod, vars, path):
bias = vars[f'{path}b:0']
weight = vars[f'{path}w:0']
mod.weight.data.copy_(rearrange(weight, 'k i o -> o i k'))
mod.bias.data.copy_(bias)
def copy_attn_pool(mod, vars, path):
attn_pool_proj = vars[path]
mod.to_attn_logits.weight.data.copy_(rearrange(attn_pool_proj, 'i o -> o i 1 1'))
def copy_linear(mod, vars, path, has_bias = True):
weight = vars[f'{path}w:0']
mod.weight.data.copy_(rearrange(weight, 'i o -> o i'))
if not has_bias:
return
bias = vars[f'{path}b:0']
mod.bias.data.copy_(bias)
def copy_ln(mod, vars, path):
weight = vars[f'{path}scale:0']
bias = vars[f'{path}offset:0']
mod.weight.data.copy_(weight)
mod.bias.data.copy_(bias)
def get_tf_vars(tf_model):
return {v.name: (torch.from_numpy(v.numpy()) if isinstance(v.numpy(), np.ndarray) else None) for v in tf_model.variables}
def copy_tf_to_pytorch(tf_model, pytorch_model):
tf_vars = get_tf_vars(tf_model)
stem_conv = pytorch_model.stem[0]
stem_point_bn = pytorch_model.stem[1].fn[0]
stem_point_conv = pytorch_model.stem[1].fn[2]
stem_attn_pool = pytorch_model.stem[2]
copy_conv(stem_conv, tf_vars, 'enformer/trunk/stem/conv1_d/')
copy_bn(stem_point_bn, tf_vars, 'enformer/trunk/stem/pointwise_conv_block/cross_replica_batch_norm/')
copy_conv(stem_point_conv, tf_vars, 'enformer/trunk/stem/pointwise_conv_block/conv1_d/')
copy_attn_pool(stem_attn_pool, tf_vars, 'enformer/trunk/stem/softmax_pooling/linear/w:0')
for ind, tower_block in enumerate(pytorch_model.conv_tower):
tower_bn = tower_block[0][0]
tower_conv = tower_block[0][2]
tower_point_bn = tower_block[1].fn[0]
tower_point_conv = tower_block[1].fn[2]
tower_attn_pool = tower_block[2]
conv_path = f'enformer/trunk/conv_tower/conv_tower_block_{ind}/conv_block/conv1_d/'
bn_path = f'enformer/trunk/conv_tower/conv_tower_block_{ind}/conv_block/cross_replica_batch_norm/'
point_conv_path = f'enformer/trunk/conv_tower/conv_tower_block_{ind}/pointwise_conv_block/conv1_d/'
point_bn_path = f'enformer/trunk/conv_tower/conv_tower_block_{ind}/pointwise_conv_block/cross_replica_batch_norm/'
attn_pool_path = f'enformer/trunk/conv_tower/conv_tower_block_{ind}/softmax_pooling/linear/w:0'
copy_bn(tower_bn, tf_vars, bn_path)
copy_conv(tower_conv, tf_vars, conv_path)
copy_bn(tower_point_bn, tf_vars, point_bn_path)
copy_conv(tower_point_conv, tf_vars, point_conv_path)
copy_attn_pool(tower_attn_pool, tf_vars, attn_pool_path)
for ind, transformer_block in enumerate(pytorch_model.transformer):
attn_ln_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/layer_norm/'
attn_q_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/q_layer/'
attn_k_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/k_layer/'
attn_r_k_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/r_k_layer/'
attn_v_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/v_layer/'
attn_out_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/embedding_layer/'
attn_content_bias_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/r_w_bias:0'
attn_rel_bias_path = f'enformer/trunk/transformer/transformer_block_{ind}/mha/attention_{ind}/r_r_bias:0'
ff_ln_path = f'enformer/trunk/transformer/transformer_block_{ind}/mlp/layer_norm/'
# https://github.com/deepmind/deepmind-research/blob/master/enformer/enformer.py#L119
# needs to be edited to snt.Linear(channels * 2, name = 'project_in') and snt.Linear(channels, name = 'project_out') or variables are not accessible
ff_linear1_path = f'enformer/trunk/transformer/transformer_block_{ind}/mlp/project_in/'
ff_linear2_path = f'enformer/trunk/transformer/transformer_block_{ind}/mlp/project_out/'
attn = transformer_block[0]
attn_ln = attn.fn[0]
mha = attn.fn[1]
copy_linear(mha.to_q, tf_vars, attn_q_path, has_bias = False)
copy_linear(mha.to_k, tf_vars, attn_k_path, has_bias = False)
copy_linear(mha.to_rel_k, tf_vars, attn_r_k_path, has_bias = False)
copy_linear(mha.to_v, tf_vars, attn_v_path, has_bias = False)
copy_linear(mha.to_out, tf_vars, attn_out_path)
mha.rel_content_bias.data.copy_(tf_vars[attn_content_bias_path])
mha.rel_pos_bias.data.copy_(tf_vars[attn_rel_bias_path])
ff = transformer_block[-1]
ff_ln = ff.fn[0]
ff_linear1 = ff.fn[1]
ff_linear2 = ff.fn[4]
copy_ln(attn_ln, tf_vars, attn_ln_path)
copy_ln(ff_ln, tf_vars, ff_ln_path)
copy_linear(ff_linear1, tf_vars, ff_linear1_path)
copy_linear(ff_linear2, tf_vars, ff_linear2_path)
final_bn = pytorch_model.final_pointwise[1][0]
final_conv = pytorch_model.final_pointwise[1][2]
copy_bn(final_bn, tf_vars, 'enformer/trunk/final_pointwise/conv_block/cross_replica_batch_norm/')
copy_conv(final_conv, tf_vars, 'enformer/trunk/final_pointwise/conv_block/conv1_d/')
human_linear = pytorch_model._heads['human'][0]
mouse_linear = pytorch_model._heads['mouse'][0]
copy_linear(human_linear, tf_vars, 'enformer/heads/head_human/linear/')
copy_linear(mouse_linear, tf_vars, 'enformer/heads/head_mouse/linear/')
print('success') | enformer-pytorch-main | scripts/tf_to_torch.py |
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['big_sleep']
from version import __version__
setup(
name = 'big-sleep',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'dream = big_sleep.cli:main',
],
},
version = __version__,
license='MIT',
description = 'Big Sleep',
author = 'Ryan Murdock, Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/big-sleep',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'text to image',
'generative adversarial networks'
],
install_requires=[
'torch>=1.7.1',
'einops>=0.3',
'fire',
'ftfy',
'pytorch-pretrained-biggan',
'regex',
'torchvision>=0.8.2',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| big-sleep-main | setup.py |
import time
import shutil
import torch
from big_sleep import Imagine
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
num_attempts = 4
for attempt in range(num_attempts):
dream = Imagine(
text = "an armchair in the form of pikachu\\an armchair imitating pikachu\\abstract",
text_min = "blur\\zoom",
lr = 7e-2,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 5,
iterations = 50,
save_progress = False,
bilinear = False,
open_folder = False,
seed = None,
torch_deterministic = False,
max_classes = 20,
class_temperature = 2.,
save_date_time = False,
save_best = True,
experimental_resample = True,
ema_decay = 0.99
)
dream()
shutil.copy(dream.textpath + ".best.png", f"{attempt}.png")
try:
time.sleep(2)
del dream
time.sleep(2)
torch.cuda.empty_cache()
except Exception:
torch.cuda.empty_cache() | big-sleep-main | test/multi_prompt_minmax.py |
__version__ = '0.9.1'
| big-sleep-main | big_sleep/version.py |
"""Good differentiable image resampling for PyTorch."""
from functools import update_wrapper
import math
import torch
from torch.nn import functional as F
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def odd(fn):
return update_wrapper(lambda x: torch.sign(x) * fn(abs(x)), fn)
def _to_linear_srgb(input):
cond = input <= 0.04045
a = input / 12.92
b = ((input + 0.055) / 1.055)**2.4
return torch.where(cond, a, b)
def _to_nonlinear_srgb(input):
cond = input <= 0.0031308
a = 12.92 * input
b = 1.055 * input**(1/2.4) - 0.055
return torch.where(cond, a, b)
to_linear_srgb = odd(_to_linear_srgb)
to_nonlinear_srgb = odd(_to_nonlinear_srgb)
def resample(input, size, align_corners=True, is_srgb=False):
n, c, h, w = input.shape
dh, dw = size
if is_srgb:
input = to_linear_srgb(input)
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 3), 3).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 3), 3).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
input = F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
if is_srgb:
input = to_nonlinear_srgb(input)
return input
| big-sleep-main | big_sleep/resample.py |
# Exponential Moving Average (from https://gist.github.com/crowsonkb/76b94d5238272722290734bf4725d204)
"""Exponential moving average for PyTorch. Adapted from
https://www.zijianhu.com/post/pytorch/ema/ by crowsonkb
"""
from copy import deepcopy
import torch
from torch import nn
class EMA(nn.Module):
def __init__(self, model, decay):
super().__init__()
self.model = model
self.decay = decay
self.register_buffer('accum', torch.tensor(1.))
self._biased = deepcopy(self.model)
self.average = deepcopy(self.model)
for param in self._biased.parameters():
param.detach_().zero_()
for param in self.average.parameters():
param.detach_().zero_()
self.update()
@torch.no_grad()
def update(self):
assert self.training, 'Update should only be called during training'
self.accum *= self.decay
model_params = dict(self.model.named_parameters())
biased_params = dict(self._biased.named_parameters())
average_params = dict(self.average.named_parameters())
assert model_params.keys() == biased_params.keys() == average_params.keys(), f'Model parameter keys incompatible with EMA stored parameter keys'
for name, param in model_params.items():
biased_params[name].mul_(self.decay)
biased_params[name].add_((1 - self.decay) * param)
average_params[name].copy_(biased_params[name])
average_params[name].div_(1 - self.accum)
model_buffers = dict(self.model.named_buffers())
biased_buffers = dict(self._biased.named_buffers())
average_buffers = dict(self.average.named_buffers())
assert model_buffers.keys() == biased_buffers.keys() == average_buffers.keys()
for name, buffer in model_buffers.items():
biased_buffers[name].copy_(buffer)
average_buffers[name].copy_(buffer)
def forward(self, *args, **kwargs):
if self.training:
return self.model(*args, **kwargs)
return self.average(*args, **kwargs)
| big-sleep-main | big_sleep/ema.py |
from big_sleep.big_sleep import BigSleep, Imagine
| big-sleep-main | big_sleep/__init__.py |
# this code is a copy from huggingface
# with some minor modifications
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import json
import copy
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BIGGAN_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
Path.home() / '.pytorch_pretrained_biggan'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BIGGAN_CACHE = os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_biggan'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PRETRAINED_MODEL_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-pytorch_model.bin",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-pytorch_model.bin",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-config.json",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-config.json",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-config.json",
}
WEIGHTS_NAME = 'pytorch_model.bin'
CONFIG_NAME = 'config.json'
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
class BigGANConfig(object):
""" Configuration class to store the configuration of a `BigGAN`.
Defaults are for the 128x128 model.
layers tuple are (up-sample in the layer ?, input channels, output channels)
"""
def __init__(self,
output_dim=128,
z_dim=128,
class_embed_dim=128,
channel_width=128,
num_classes=1000,
layers=[(False, 16, 16),
(True, 16, 16),
(False, 16, 16),
(True, 16, 8),
(False, 8, 8),
(True, 8, 4),
(False, 4, 4),
(True, 4, 2),
(False, 2, 2),
(True, 2, 1)],
attention_layer_position=8,
eps=1e-4,
n_stats=51):
"""Constructs BigGANConfig. """
self.output_dim = output_dim
self.z_dim = z_dim
self.class_embed_dim = class_embed_dim
self.channel_width = channel_width
self.num_classes = num_classes
self.layers = layers
self.attention_layer_position = attention_layer_position
self.eps = eps
self.n_stats = n_stats
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BigGANConfig` from a Python dictionary of parameters."""
config = BigGANConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BigGANConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def snconv2d(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Conv2d(**kwargs), eps=eps)
def snlinear(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Linear(**kwargs), eps=eps)
def sn_embedding(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Embedding(**kwargs), eps=eps)
class SelfAttn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels, eps=1e-12):
super(SelfAttn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_o_conv = snconv2d(in_channels=in_channels//2, out_channels=in_channels,
kernel_size=1, bias=False, eps=eps)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g - o_conv
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_o_conv(attn_g)
# Out
out = x + self.gamma*attn_g
return out
class BigGANBatchNorm(nn.Module):
""" This is a batch norm module that can handle conditional input and can be provided with pre-computed
activation means and variances for various truncation parameters.
We cannot just rely on torch.batch_norm since it cannot handle
batched weights (pytorch 1.0.1). We computate batch_norm our-self without updating running means and variances.
If you want to train this model you should add running means and variance computation logic.
"""
def __init__(self, num_features, condition_vector_dim=None, n_stats=51, eps=1e-4, conditional=True):
super(BigGANBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.conditional = conditional
# We use pre-computed statistics for n_stats values of truncation between 0 and 1
self.register_buffer('running_means', torch.zeros(n_stats, num_features))
self.register_buffer('running_vars', torch.ones(n_stats, num_features))
self.step_size = 1.0 / (n_stats - 1)
if conditional:
assert condition_vector_dim is not None
self.scale = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
self.offset = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
else:
self.weight = torch.nn.Parameter(torch.Tensor(num_features))
self.bias = torch.nn.Parameter(torch.Tensor(num_features))
def forward(self, x, truncation, condition_vector=None):
# Retreive pre-computed statistics associated to this truncation
coef, start_idx = math.modf(truncation / self.step_size)
start_idx = int(start_idx)
if coef != 0.0: # Interpolate
running_mean = self.running_means[start_idx] * coef + self.running_means[start_idx + 1] * (1 - coef)
running_var = self.running_vars[start_idx] * coef + self.running_vars[start_idx + 1] * (1 - coef)
else:
running_mean = self.running_means[start_idx]
running_var = self.running_vars[start_idx]
if self.conditional:
running_mean = running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
running_var = running_var.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
weight = 1 + self.scale(condition_vector).unsqueeze(-1).unsqueeze(-1)
bias = self.offset(condition_vector).unsqueeze(-1).unsqueeze(-1)
out = (x - running_mean) / torch.sqrt(running_var + self.eps) * weight + bias
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias,
training=False, momentum=0.0, eps=self.eps)
return out
class GenBlock(nn.Module):
def __init__(self, in_size, out_size, condition_vector_dim, reduction_factor=4, up_sample=False,
n_stats=51, eps=1e-12):
super(GenBlock, self).__init__()
self.up_sample = up_sample
self.drop_channels = (in_size != out_size)
middle_size = in_size // reduction_factor
self.bn_0 = BigGANBatchNorm(in_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_0 = snconv2d(in_channels=in_size, out_channels=middle_size, kernel_size=1, eps=eps)
self.bn_1 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_1 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_2 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_2 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_3 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_3 = snconv2d(in_channels=middle_size, out_channels=out_size, kernel_size=1, eps=eps)
self.relu = nn.ReLU()
def forward(self, x, cond_vector, truncation):
x0 = x
x = self.bn_0(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_0(x)
x = self.bn_1(x, truncation, cond_vector)
x = self.relu(x)
if self.up_sample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv_1(x)
x = self.bn_2(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_2(x)
x = self.bn_3(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_3(x)
if self.drop_channels:
new_channels = x0.shape[1] // 2
x0 = x0[:, :new_channels, ...]
if self.up_sample:
x0 = F.interpolate(x0, scale_factor=2, mode='nearest')
out = x + x0
return out
class Generator(nn.Module):
def __init__(self, config):
super(Generator, self).__init__()
self.config = config
ch = config.channel_width
condition_vector_dim = config.z_dim * 2
self.gen_z = snlinear(in_features=condition_vector_dim,
out_features=4 * 4 * 16 * ch, eps=config.eps)
layers = []
for i, layer in enumerate(config.layers):
if i == config.attention_layer_position:
layers.append(SelfAttn(ch*layer[1], eps=config.eps))
layers.append(GenBlock(ch*layer[1],
ch*layer[2],
condition_vector_dim,
up_sample=layer[0],
n_stats=config.n_stats,
eps=config.eps))
self.layers = nn.ModuleList(layers)
self.bn = BigGANBatchNorm(ch, n_stats=config.n_stats, eps=config.eps, conditional=False)
self.relu = nn.ReLU()
self.conv_to_rgb = snconv2d(in_channels=ch, out_channels=ch, kernel_size=3, padding=1, eps=config.eps)
self.tanh = nn.Tanh()
def forward(self, cond_vector, truncation):
z = self.gen_z(cond_vector[0].unsqueeze(0))
# We use this conversion step to be able to use TF weights:
# TF convention on shape is [batch, height, width, channels]
# PT convention on shape is [batch, channels, height, width]
z = z.view(-1, 4, 4, 16 * self.config.channel_width)
z = z.permute(0, 3, 1, 2).contiguous()
next_available_latent_index = 1
for layer in self.layers:
if isinstance(layer, GenBlock):
z = layer(z, cond_vector[next_available_latent_index].unsqueeze(0), truncation)
next_available_latent_index += 1
else:
z = layer(z)
z = self.bn(z, truncation)
z = self.relu(z)
z = self.conv_to_rgb(z)
z = z[:, :3, ...]
z = self.tanh(z)
return z
class BigGAN(nn.Module):
"""BigGAN Generator."""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
model_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
model_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
try:
resolved_model_file = cached_path(model_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error("Wrong model name, should be a valid path to a folder containing "
"a {} file and a {} file or a model name in {}".format(
WEIGHTS_NAME, CONFIG_NAME, PRETRAINED_MODEL_ARCHIVE_MAP.keys()))
raise
logger.info("loading model {} from cache at {}".format(pretrained_model_name_or_path, resolved_model_file))
# Load config
config = BigGANConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
state_dict = torch.load(resolved_model_file, map_location='cpu' if not torch.cuda.is_available() else None)
model.load_state_dict(state_dict, strict=False)
return model
def __init__(self, config):
super(BigGAN, self).__init__()
self.config = config
self.embeddings = nn.Linear(config.num_classes, config.z_dim, bias=False)
self.generator = Generator(config)
def forward(self, z, class_label, truncation):
assert 0 < truncation <= 1
embed = self.embeddings(class_label)
cond_vector = torch.cat((z, embed), dim=1)
z = self.generator(cond_vector, truncation)
return z
| big-sleep-main | big_sleep/biggan.py |
import fire
import random as rnd
from big_sleep import Imagine, version
from pathlib import Path
from .version import __version__;
def train(
text=None,
img=None,
text_min="",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
epochs = 20,
iterations = 1050,
save_every = 50,
overwrite = False,
save_progress = False,
save_date_time = False,
bilinear = False,
open_folder = True,
seed = 0,
append_seed = False,
random = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_best = False,
experimental_resample = False,
ema_decay = 0.5,
num_cutouts = 128,
center_bias = False,
larger_model = False
):
print(f'Starting up... v{__version__}')
if random:
seed = rnd.randint(0, 1e6)
imagine = Imagine(
text=text,
img=img,
text_min=text_min,
lr = lr,
image_size = image_size,
gradient_accumulate_every = gradient_accumulate_every,
epochs = epochs,
iterations = iterations,
save_every = save_every,
save_progress = save_progress,
bilinear = bilinear,
seed = seed,
append_seed = append_seed,
torch_deterministic = torch_deterministic,
open_folder = open_folder,
max_classes = max_classes,
class_temperature = class_temperature,
save_date_time = save_date_time,
save_best = save_best,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_model
)
if not overwrite and imagine.filename.exists():
answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()
if answer not in ('yes', 'y'):
exit()
imagine()
def main():
fire.Fire(train)
| big-sleep-main | big_sleep/cli.py |
import os
import sys
import subprocess
import signal
import string
import re
from datetime import datetime
from pathlib import Path
import random
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from torchvision.utils import save_image
import torchvision.transforms as T
from PIL import Image
from tqdm import tqdm, trange
from big_sleep.ema import EMA
from big_sleep.resample import resample
from big_sleep.biggan import BigGAN
from big_sleep.clip import load, tokenize
assert torch.cuda.is_available(), 'CUDA must be available in order to use Big Sleep'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum,frame):
print('detecting keyboard interrupt, gracefully exiting')
global terminate
terminate = True
signal.signal(signal.SIGINT,signal_handling)
# helpers
def exists(val):
return val is not None
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/','\\')]
if cmd_list == None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def create_text_path(text=None, img=None, encoding=None):
input_name = ""
if text is not None:
input_name += text
if img is not None:
if isinstance(img, str):
img_name = "".join(img.split(".")[:-1]) # replace spaces by underscores, remove img extension
img_name = img_name.split("/")[-1] # only take img name, not path
else:
img_name = "PIL_img"
input_name += "_" + img_name
if encoding is not None:
input_name = "your_encoding"
return input_name.replace("-", "_").replace(",", "").replace(" ", "_").replace("|", "--").strip('-_')[:255]
# tensor helpers
def differentiable_topk(x, k, temperature=1.):
n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == (k - 1)
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x = x.scatter(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(n, k, dim).sum(dim = 1)
def create_clip_img_transform(image_width):
clip_mean = [0.48145466, 0.4578275, 0.40821073]
clip_std = [0.26862954, 0.26130258, 0.27577711]
transform = T.Compose([
#T.ToPILImage(),
T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor(),
T.Normalize(mean=clip_mean, std=clip_std)
])
return transform
def rand_cutout(image, size, center_bias=False, center_focus=2):
width = image.shape[-1]
min_offset = 0
max_offset = width - size
if center_bias:
# sample around image center
center = max_offset / 2
std = center / center_focus
offset_x = int(random.gauss(mu=center, sigma=std))
offset_y = int(random.gauss(mu=center, sigma=std))
# resample uniformly if over boundaries
offset_x = random.randint(min_offset, max_offset) if (offset_x > max_offset or offset_x < min_offset) else offset_x
offset_y = random.randint(min_offset, max_offset) if (offset_y > max_offset or offset_y < min_offset) else offset_y
else:
offset_x = random.randint(min_offset, max_offset)
offset_y = random.randint(min_offset, max_offset)
cutout = image[:, :, offset_x:offset_x + size, offset_y:offset_y + size]
return cutout
# load biggan
class Latents(torch.nn.Module):
def __init__(
self,
num_latents = 15,
num_classes = 1000,
z_dim = 128,
max_classes = None,
class_temperature = 2.
):
super().__init__()
self.normu = torch.nn.Parameter(torch.zeros(num_latents, z_dim).normal_(std = 1))
self.cls = torch.nn.Parameter(torch.zeros(num_latents, num_classes).normal_(mean = -3.9, std = .3))
self.register_buffer('thresh_lat', torch.tensor(1))
assert not exists(max_classes) or max_classes > 0 and max_classes <= num_classes, f'max_classes must be between 0 and {num_classes}'
self.max_classes = max_classes
self.class_temperature = class_temperature
def forward(self):
if exists(self.max_classes):
classes = differentiable_topk(self.cls, self.max_classes, temperature = self.class_temperature)
else:
classes = torch.sigmoid(self.cls)
return self.normu, classes
class Model(nn.Module):
def __init__(
self,
image_size,
max_classes = None,
class_temperature = 2.,
ema_decay = 0.99
):
super().__init__()
assert image_size in (128, 256, 512), 'image size must be one of 128, 256, or 512'
self.biggan = BigGAN.from_pretrained(f'biggan-deep-{image_size}')
self.max_classes = max_classes
self.class_temperature = class_temperature
self.ema_decay\
= ema_decay
self.init_latents()
def init_latents(self):
latents = Latents(
num_latents = len(self.biggan.config.layers) + 1,
num_classes = self.biggan.config.num_classes,
z_dim = self.biggan.config.z_dim,
max_classes = self.max_classes,
class_temperature = self.class_temperature
)
self.latents = EMA(latents, self.ema_decay)
def forward(self):
self.biggan.eval()
out = self.biggan(*self.latents(), 1)
return (out + 1) / 2
class BigSleep(nn.Module):
def __init__(
self,
num_cutouts = 128,
loss_coef = 100,
image_size = 512,
bilinear = False,
max_classes = None,
class_temperature = 2.,
experimental_resample = False,
ema_decay = 0.99,
center_bias = False,
larger_clip = False
):
super().__init__()
self.loss_coef = loss_coef
self.image_size = image_size
self.num_cutouts = num_cutouts
self.experimental_resample = experimental_resample
self.center_bias = center_bias
self.interpolation_settings = {'mode': 'bilinear', 'align_corners': False} if bilinear else {'mode': 'nearest'}
model_name = 'ViT-B/32' if not larger_clip else 'ViT-L/14'
self.perceptor, self.normalize_image = load(model_name, jit = False)
self.model = Model(
image_size = image_size,
max_classes = max_classes,
class_temperature = class_temperature,
ema_decay = ema_decay
)
def reset(self):
self.model.init_latents()
def sim_txt_to_img(self, text_embed, img_embed, text_type="max"):
sign = -1
if text_type == "min":
sign = 1
return sign * self.loss_coef * torch.cosine_similarity(text_embed, img_embed, dim = -1).mean()
def forward(self, text_embeds, text_min_embeds=[], return_loss = True):
width, num_cutouts = self.image_size, self.num_cutouts
out = self.model()
if not return_loss:
return out
pieces = []
for ch in range(num_cutouts):
# sample cutout size
size = int(width * torch.zeros(1,).normal_(mean=.8, std=.3).clip(.5, .95))
# get cutout
apper = rand_cutout(out, size, center_bias=self.center_bias)
if (self.experimental_resample):
apper = resample(apper, (224, 224))
else:
apper = F.interpolate(apper, (224, 224), **self.interpolation_settings)
pieces.append(apper)
into = torch.cat(pieces)
into = self.normalize_image(into)
image_embed = self.perceptor.encode_image(into)
latents, soft_one_hot_classes = self.model.latents()
num_latents = latents.shape[0]
latent_thres = self.model.latents.model.thresh_lat
lat_loss = torch.abs(1 - torch.std(latents, dim=1)).mean() + \
torch.abs(torch.mean(latents, dim = 1)).mean() + \
4 * torch.max(torch.square(latents).mean(), latent_thres)
for array in latents:
mean = torch.mean(array)
diffs = array - mean
var = torch.mean(torch.pow(diffs, 2.0))
std = torch.pow(var, 0.5)
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0))
kurtoses = torch.mean(torch.pow(zscores, 4.0)) - 3.0
lat_loss = lat_loss + torch.abs(kurtoses) / num_latents + torch.abs(skews) / num_latents
cls_loss = ((50 * torch.topk(soft_one_hot_classes, largest = False, dim = 1, k = 999)[0]) ** 2).mean()
results = []
for txt_embed in text_embeds:
results.append(self.sim_txt_to_img(txt_embed, image_embed))
for txt_min_embed in text_min_embeds:
results.append(self.sim_txt_to_img(txt_min_embed, image_embed, "min"))
sim_loss = sum(results).mean()
return out, (lat_loss, cls_loss, sim_loss)
class Imagine(nn.Module):
def __init__(
self,
*,
text=None,
img=None,
encoding=None,
text_min = "",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 20,
iterations = 1050,
save_progress = False,
bilinear = False,
open_folder = True,
seed = None,
append_seed = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_date_time = False,
save_best = False,
experimental_resample = False,
ema_decay = 0.99,
num_cutouts = 128,
center_bias = False,
larger_clip = False
):
super().__init__()
if torch_deterministic:
assert not bilinear, 'the deterministic (seeded) operation does not work with interpolation (PyTorch 1.7.1)'
torch.set_deterministic(True)
self.seed = seed
self.append_seed = append_seed
if exists(seed):
print(f'setting seed of {seed}')
if seed == 0:
print('you can override this with --seed argument in the command line, or --random for a randomly chosen one')
torch.manual_seed(seed)
self.epochs = epochs
self.iterations = iterations
model = BigSleep(
image_size = image_size,
bilinear = bilinear,
max_classes = max_classes,
class_temperature = class_temperature,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_clip
).cuda()
self.model = model
self.lr = lr
self.optimizer = Adam(model.model.latents.model.parameters(), lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_progress = save_progress
self.save_date_time = save_date_time
self.save_best = save_best
self.current_best_score = 0
self.open_folder = open_folder
self.total_image_updates = (self.epochs * self.iterations) / self.save_every
self.encoded_texts = {
"max": [],
"min": []
}
# create img transform
self.clip_transform = create_clip_img_transform(224)
# create starting encoding
self.set_clip_encoding(text=text, img=img, encoding=encoding, text_min=text_min)
@property
def seed_suffix(self):
return f'.{self.seed}' if self.append_seed and exists(self.seed) else ''
def set_text(self, text):
self.set_clip_encoding(text = text)
def create_clip_encoding(self, text=None, img=None, encoding=None):
self.text = text
self.img = img
if encoding is not None:
encoding = encoding.cuda()
#elif self.create_story:
# encoding = self.update_story_encoding(epoch=0, iteration=1)
elif text is not None and img is not None:
encoding = (self.create_text_encoding(text) + self.create_img_encoding(img)) / 2
elif text is not None:
encoding = self.create_text_encoding(text)
elif img is not None:
encoding = self.create_img_encoding(img)
return encoding
def create_text_encoding(self, text):
tokenized_text = tokenize(text).cuda()
with torch.no_grad():
text_encoding = self.model.perceptor.encode_text(tokenized_text).detach()
return text_encoding
def create_img_encoding(self, img):
if isinstance(img, str):
img = Image.open(img)
normed_img = self.clip_transform(img).unsqueeze(0).cuda()
with torch.no_grad():
img_encoding = self.model.perceptor.encode_image(normed_img).detach()
return img_encoding
def encode_multiple_phrases(self, text, img=None, encoding=None, text_type="max"):
if text is not None and "|" in text:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=prompt_min, img=img, encoding=encoding) for prompt_min in text.split("|")]
else:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=text, img=img, encoding=encoding)]
def encode_max_and_min(self, text, img=None, encoding=None, text_min=""):
self.encode_multiple_phrases(text, img=img, encoding=encoding)
if text_min is not None and text_min != "":
self.encode_multiple_phrases(text_min, img=img, encoding=encoding, text_type="min")
def set_clip_encoding(self, text=None, img=None, encoding=None, text_min=""):
self.current_best_score = 0
self.text = text
self.text_min = text_min
if len(text_min) > 0:
text = text + "_wout_" + text_min[:255] if text is not None else "wout_" + text_min[:255]
text_path = create_text_path(text=text, img=img, encoding=encoding)
if self.save_date_time:
text_path = datetime.now().strftime("%y%m%d-%H%M%S-") + text_path
self.text_path = text_path
self.filename = Path(f'./{text_path}{self.seed_suffix}.png')
self.encode_max_and_min(text, img=img, encoding=encoding, text_min=text_min) # Tokenize and encode each prompt
def reset(self):
self.model.reset()
self.model = self.model.cuda()
self.optimizer = Adam(self.model.model.latents.parameters(), self.lr)
def train_step(self, epoch, i, pbar=None):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
loss = sum(losses) / self.gradient_accumulate_every
total_loss += loss
loss.backward()
self.optimizer.step()
self.model.model.latents.update()
self.optimizer.zero_grad()
if (i + 1) % self.save_every == 0:
with torch.no_grad():
self.model.model.latents.eval()
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
top_score, best = torch.topk(losses[2], k=1, largest=False)
image = self.model.model()[best].cpu()
self.model.model.latents.train()
save_image(image, str(self.filename))
if pbar is not None:
pbar.update(1)
else:
print(f'image updated at "./{str(self.filename)}"')
if self.save_progress:
total_iterations = epoch * self.iterations + i
num = total_iterations // self.save_every
save_image(image, Path(f'./{self.text_path}.{num}{self.seed_suffix}.png'))
if self.save_best and top_score.item() < self.current_best_score:
self.current_best_score = top_score.item()
save_image(image, Path(f'./{self.text_path}{self.seed_suffix}.best.png'))
return out, total_loss
def forward(self):
penalizing = ""
if len(self.text_min) > 0:
penalizing = f'penalizing "{self.text_min}"'
print(f'Imagining "{self.text_path}" {penalizing}...')
with torch.no_grad():
self.model(self.encoded_texts["max"][0]) # one warmup step due to issue with CLIP and CUDA
if self.open_folder:
open_folder('./')
self.open_folder = False
image_pbar = tqdm(total=self.total_image_updates, desc='image update', position=2, leave=True)
epoch_pbar = trange(self.epochs, desc = ' epochs', position=0, leave=True)
for epoch in (ep for ep in epoch_pbar if not terminate):
pbar = trange(self.iterations, desc=' iteration', position=1, leave=True)
image_pbar.update(0)
for i in (it for it in pbar if not terminate):
out, loss = self.train_step(epoch, i, image_pbar)
pbar.set_description(f'loss: {loss.item():04.2f}')
| big-sleep-main | big_sleep/big_sleep.py |
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from pathlib import Path
import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform():
return Compose([
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
import gzip
_tokenizer = SimpleTokenizer()
| big-sleep-main | big_sleep/clip.py |
from setuptools import setup, find_packages
setup(
name = 'rotary-embedding-torch',
packages = find_packages(),
version = '0.3.0',
license='MIT',
description = 'Rotary Embedding - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/rotary-embedding-torch',
keywords = [
'artificial intelligence',
'deep learning',
'positional embedding'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| rotary-embedding-torch-main | setup.py |
from rotary_embedding_torch.rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding, broadcat, apply_learned_rotations
| rotary-embedding-torch-main | rotary_embedding_torch/__init__.py |
from math import pi, log
import torch
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# rotary embedding helper functions
def rotate_half(x):
x = rearrange(x, '... (d r) -> ... d r', r = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d r -> ... (d r)')
def apply_rotary_emb(freqs, t, start_index = 0, scale = 1.):
rot_dim, seq_len = freqs.shape[-1], t.shape[-2]
freqs = freqs[-seq_len:, :]
freqs = freqs.to(t)
end_index = start_index + rot_dim
assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
return torch.cat((t_left, t, t_right), dim = -1)
# learned rotation helpers
def apply_learned_rotations(rotations, t, start_index = 0, freq_ranges = None):
if exists(freq_ranges):
rotations = einsum('..., f -> ... f', rotations, freq_ranges)
rotations = rearrange(rotations, '... r f -> ... (r f)')
rotations = repeat(rotations, '... n -> ... (n r)', r = 2)
return apply_rotary_emb(rotations, t, start_index = start_index)
# classes
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
custom_freqs = None,
freqs_for = 'lang',
theta = 10000,
max_freq = 10,
num_freqs = 1,
learned_freq = False,
use_xpos = False,
xpos_scale_base = 512,
interpolate_factor = 1.,
theta_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
theta *= theta_rescale_factor ** (dim / (dim - 2))
if exists(custom_freqs):
freqs = custom_freqs
elif freqs_for == 'lang':
freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
elif freqs_for == 'pixel':
freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
elif freqs_for == 'constant':
freqs = torch.ones(num_freqs).float()
else:
raise ValueError(f'unknown modality {freqs_for}')
self.cache = dict()
self.cache_scale = dict()
self.freqs = nn.Parameter(freqs, requires_grad = learned_freq)
# interpolation factors
assert interpolate_factor >= 1.
self.interpolate_factor = interpolate_factor
# xpos
self.use_xpos = use_xpos
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = xpos_scale_base
self.register_buffer('scale', scale)
def get_seq_pos(self, seq_len, device, dtype, offset = 0):
return (torch.arange(seq_len, device = device, dtype = dtype) + offset) / self.interpolate_factor
def rotate_queries_or_keys(self, t, seq_dim = -2, offset = 0, freq_seq_len = None):
assert not self.use_xpos, 'you must use `.rotate_queries_and_keys` method instead and pass in both queries and keys, for length extrapolatable rotary embeddings'
device, dtype, seq_len = t.device, t.dtype, t.shape[seq_dim]
if exists(freq_seq_len):
assert freq_seq_len >= seq_len
seq_len = freq_seq_len
freqs = self.forward(lambda: self.get_seq_pos(seq_len, device = device, dtype = dtype, offset = offset), cache_key = f'freqs:{seq_len}|offset:{offset}')
return apply_rotary_emb(freqs, t)
def rotate_queries_with_cached_keys(self, q, k, seq_dim = -2):
q_len, k_len = q.shape[seq_dim], k.shape[seq_dim]
assert q_len <= k_len
q = self.rotate_queries_or_keys(q, seq_dim = seq_dim, freq_seq_len = k_len)
k = self.rotate_queries_or_keys(k, seq_dim = seq_dim)
return q, k
def rotate_queries_and_keys(self, q, k, seq_dim = -2):
assert self.use_xpos
device, dtype, seq_len = q.device, q.dtype, q.shape[seq_dim]
seq = self.get_seq_pos(seq_len, dtype = dtype, device = device)
freqs = self.forward(lambda: seq, cache_key = f'freqs:{seq_len}')
scale = self.get_scale(lambda: seq, cache_key = f'scale:{seq_len}').to(dtype)
rotated_q = apply_rotary_emb(freqs, q, scale = scale)
rotated_k = apply_rotary_emb(freqs, k, scale = scale ** -1)
return rotated_q, rotated_k
def get_scale(self, t, cache_key = None):
assert self.use_xpos
if exists(cache_key) and cache_key in self.cache:
return self.cache[cache_key]
if callable(t):
t = t()
scale = 1.
if self.use_xpos:
power = (t - len(t) // 2) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
if exists(cache_key):
self.cache[cache_key] = scale
return scale
def forward(self, t, cache_key = None):
if exists(cache_key) and cache_key in self.cache:
return self.cache[cache_key]
if callable(t):
t = t()
freqs = self.freqs
freqs = einsum('..., f -> ... f', t.type(freqs.dtype), freqs)
freqs = repeat(freqs, '... n -> ... (n r)', r = 2)
if exists(cache_key):
self.cache[cache_key] = freqs
return freqs
| rotary-embedding-torch-main | rotary_embedding_torch/rotary_embedding_torch.py |
from setuptools import setup, find_packages
setup(
name = 'lambda-networks',
packages = find_packages(),
version = '0.4.0',
license='MIT',
description = 'Lambda Networks - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/lambda-networks',
keywords = [
'artificial intelligence',
'attention mechanism',
'image recognition'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | lambda-networks-main | setup.py |
from lambda_networks.lambda_networks import LambdaLayer
λLayer = LambdaLayer | lambda-networks-main | lambda_networks/__init__.py |
import torch
from torch import nn, einsum
from einops import rearrange
# helpers functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_rel_pos(n):
pos = torch.meshgrid(torch.arange(n), torch.arange(n))
pos = rearrange(torch.stack(pos), 'n i j -> (i j) n') # [n*n, 2] pos[n] = (i, j)
rel_pos = pos[None, :] - pos[:, None] # [n*n, n*n, 2] rel_pos[n, m] = (rel_i, rel_j)
rel_pos += n - 1 # shift value range from [-n+1, n-1] to [0, 2n-2]
return rel_pos
# lambda layer
class LambdaLayer(nn.Module):
def __init__(
self,
dim,
*,
dim_k,
n = None,
r = None,
heads = 4,
dim_out = None,
dim_u = 1):
super().__init__()
dim_out = default(dim_out, dim)
self.u = dim_u # intra-depth dimension
self.heads = heads
assert (dim_out % heads) == 0, 'values dimension must be divisible by number of heads for multi-head query'
dim_v = dim_out // heads
self.to_q = nn.Conv2d(dim, dim_k * heads, 1, bias = False)
self.to_k = nn.Conv2d(dim, dim_k * dim_u, 1, bias = False)
self.to_v = nn.Conv2d(dim, dim_v * dim_u, 1, bias = False)
self.norm_q = nn.BatchNorm2d(dim_k * heads)
self.norm_v = nn.BatchNorm2d(dim_v * dim_u)
self.local_contexts = exists(r)
if exists(r):
assert (r % 2) == 1, 'Receptive kernel size should be odd'
self.pos_conv = nn.Conv3d(dim_u, dim_k, (1, r, r), padding = (0, r // 2, r // 2))
else:
assert exists(n), 'You must specify the window size (n=h=w)'
rel_lengths = 2 * n - 1
self.rel_pos_emb = nn.Parameter(torch.randn(rel_lengths, rel_lengths, dim_k, dim_u))
self.rel_pos = calc_rel_pos(n)
def forward(self, x):
b, c, hh, ww, u, h = *x.shape, self.u, self.heads
q = self.to_q(x)
k = self.to_k(x)
v = self.to_v(x)
q = self.norm_q(q)
v = self.norm_v(v)
q = rearrange(q, 'b (h k) hh ww -> b h k (hh ww)', h = h)
k = rearrange(k, 'b (u k) hh ww -> b u k (hh ww)', u = u)
v = rearrange(v, 'b (u v) hh ww -> b u v (hh ww)', u = u)
k = k.softmax(dim=-1)
λc = einsum('b u k m, b u v m -> b k v', k, v)
Yc = einsum('b h k n, b k v -> b h v n', q, λc)
if self.local_contexts:
v = rearrange(v, 'b u v (hh ww) -> b u v hh ww', hh = hh, ww = ww)
λp = self.pos_conv(v)
Yp = einsum('b h k n, b k v n -> b h v n', q, λp.flatten(3))
else:
n, m = self.rel_pos.unbind(dim = -1)
rel_pos_emb = self.rel_pos_emb[n, m]
λp = einsum('n m k u, b u v m -> b n k v', rel_pos_emb, v)
Yp = einsum('b h k n, b n k v -> b h v n', q, λp)
Y = Yc + Yp
out = rearrange(Y, 'b h v (hh ww) -> b (h v) hh ww', hh = hh, ww = ww)
return out
| lambda-networks-main | lambda_networks/lambda_networks.py |
import tensorflow as tf
from einops.layers.tensorflow import Rearrange
from tensorflow.keras.layers import Conv2D, BatchNormalization, Conv3D, ZeroPadding3D, Softmax, Lambda, Add, Layer
from tensorflow.keras import initializers
from tensorflow import einsum, nn, meshgrid
# helpers functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_rel_pos(n):
pos = tf.stack(meshgrid(tf.range(n), tf.range(n), indexing = 'ij'))
pos = Rearrange('n i j -> (i j) n')(pos) # [n*n, 2] pos[n] = (i, j)
rel_pos = pos[None, :] - pos[:, None] # [n*n, n*n, 2] rel_pos[n, m] = (rel_i, rel_j)
rel_pos += n - 1 # shift value range from [-n+1, n-1] to [0, 2n-2]
return rel_pos
# lambda layer
class LambdaLayer(Layer):
def __init__(
self,
*,
dim_k,
n = None,
r = None,
heads = 4,
dim_out = None,
dim_u = 1):
super(LambdaLayer, self).__init__()
self.out_dim = dim_out
self.u = dim_u # intra-depth dimension
self.heads = heads
assert (dim_out % heads) == 0, 'values dimension must be divisible by number of heads for multi-head query'
self.dim_v = dim_out // heads
self.dim_k = dim_k
self.heads = heads
self.to_q = Conv2D(self.dim_k * heads, 1, use_bias=False)
self.to_k = Conv2D(self.dim_k * dim_u, 1, use_bias=False)
self.to_v = Conv2D(self.dim_v * dim_u, 1, use_bias=False)
self.norm_q = BatchNormalization()
self.norm_v = BatchNormalization()
self.local_contexts = exists(r)
if exists(r):
assert (r % 2) == 1, 'Receptive kernel size should be odd'
self.pos_conv = Conv3D(dim_k, (1, r, r), padding='same')
else:
assert exists(n), 'You must specify the window length (n = h = w)'
rel_length = 2 * n - 1
self.rel_pos_emb = self.add_weight(name='pos_emb',
shape=(rel_length, rel_length, dim_k, dim_u),
initializer=initializers.random_normal,
trainable=True)
self.rel_pos = calc_rel_pos(n)
def call(self, x, **kwargs):
b, hh, ww, c, u, h = *x.get_shape().as_list(), self.u, self.heads
q = self.to_q(x)
k = self.to_k(x)
v = self.to_v(x)
q = self.norm_q(q)
v = self.norm_v(v)
q = Rearrange('b hh ww (h k) -> b h k (hh ww)', h=h)(q)
k = Rearrange('b hh ww (u k) -> b u k (hh ww)', u=u)(k)
v = Rearrange('b hh ww (u v) -> b u v (hh ww)', u=u)(v)
k = nn.softmax(k)
Lc = einsum('b u k m, b u v m -> b k v', k, v)
Yc = einsum('b h k n, b k v -> b n h v', q, Lc)
if self.local_contexts:
v = Rearrange('b u v (hh ww) -> b v hh ww u', hh=hh, ww=ww)(v)
Lp = self.pos_conv(v)
Lp = Rearrange('b v h w k -> b v k (h w)')(Lp)
Yp = einsum('b h k n, b v k n -> b n h v', q, Lp)
else:
rel_pos_emb = tf.gather_nd(self.rel_pos_emb, self.rel_pos)
Lp = einsum('n m k u, b u v m -> b n k v', rel_pos_emb, v)
Yp = einsum('b h k n, b n k v -> b n h v', q, Lp)
Y = Yc + Yp
out = Rearrange('b (hh ww) h v -> b hh ww (h v)', hh = hh, ww = ww)(Y)
return out
def compute_output_shape(self, input_shape):
return (*input_shape[:2], self.out_dim)
def get_config(self):
config = {'output_dim': (*self.input_shape[:2], self.out_dim)}
base_config = super(LambdaLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| lambda-networks-main | lambda_networks/tfkeras.py |
from setuptools import setup, find_packages
setup(
name = 'speculative-decoding',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Speculative Decoding',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/speculative-decoding',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'efficient decoding'
],
install_requires=[
'beartype',
'einops>=0.6.1',
'rotary-embedding-torch>=0.3.0',
'torch>=2.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| speculative-decoding-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import time
from functools import wraps
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from speculative_decoding import (
Decoder,
base_decoding,
speculative_decoding
)
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRAD_ACCUM_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
DEVICE_STR = 'cuda' if torch.cuda.is_available() else 'cpu'
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
def benchmark(fn):
@wraps(fn)
def inner(*args, **kwargs):
start_time = time.time()
out = fn(*args, **kwargs)
end_time = time.time()
return out, end_time - start_time
return inner
# instantiate transformer
device = torch.device(DEVICE_STR)
model = Decoder(
num_tokens = 256,
dim = 512,
depth = 8
).to(device)
# small model
small_model = Decoder(
num_tokens = 256,
dim = 256,
depth = 2
).to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
small_optim = Adam(small_model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10.0, desc = "training"):
model.train()
small_model.train()
for _ in range(GRAD_ACCUM_EVERY):
data = next(train_loader)
loss = model(data, return_loss = True)
small_loss = small_model(data, return_loss = True)
(loss / GRAD_ACCUM_EVERY).backward()
(small_loss / GRAD_ACCUM_EVERY).backward()
print(f"training loss: {loss.item():.3f}")
print(f"training small loss: {small_loss.item():.3f}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(small_model.parameters(), 0.5)
optim.step()
optim.zero_grad()
small_optim.step()
small_optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
valid_data = next(val_loader)
loss = model(valid_data, return_loss = True)
print(f"validation loss: {loss.item():.3f}")
small_loss = small_model(valid_data, return_loss = True)
print(f"validation small loss: {small_loss.item():.3f}")
if i % GENERATE_EVERY == 0:
model.eval()
small_model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
prompt = inp[None, ...]
sampled, base_decode_elapsed = benchmark(base_decoding)(model, prompt, GENERATE_LENGTH)
spec_decoding_sampled, spec_decoding_elapsed = benchmark(speculative_decoding)(model, small_model, prompt, GENERATE_LENGTH)
base_decode_output = decode_tokens(sampled[0])
spec_decode_output = decode_tokens(spec_decoding_sampled[0])
print("\nbase decoding:\n\n", base_decode_output, "\n")
print("\nspec decoding:\n\n", spec_decode_output, "\n")
print(f'base decoding in: {base_decode_elapsed:.3f}s\n')
print(f'spec decoding in: {spec_decoding_elapsed:.3f}s\n')
| speculative-decoding-main | train.py |
from speculative_decoding.speculative_decoding import (
Decoder,
base_decoding,
speculative_decoding
)
| speculative-decoding-main | speculative_decoding/__init__.py |
import math
import torch
from torch.nn import Module, ModuleList
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from rotary_embedding_torch import RotaryEmbedding
from beartype import beartype
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(-1, ind, val)
return probs
# different decoding strategies
@torch.no_grad()
def base_decoding(
net: Module,
prompt: Tensor,
seq_len: int,
temperature = 1.,
filter_thres = 0.9,
):
prompt_seq_len, out = prompt.shape[-1], prompt.clone()
sample_num_times = max(0, seq_len - prompt_seq_len)
cache = None
for _ in range(sample_num_times):
logits, cache = net(out, cache = cache, return_cache = True)
logits = logits[:, -1]
logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(logits, temperature = temperature, dim = -1)
out = torch.cat((out, sample[..., None]), dim = -1)
return out[..., prompt_seq_len:]
@torch.no_grad()
def speculative_decoding(
net: Module,
small_net: Module,
prompt: Tensor,
seq_len: int,
gamma: int = 5,
temperature = 1.,
filter_thres = 0.9,
lenience = 1.
):
"""
eq. algorithm 1 in paper https://arxiv.org/abs/2211.17192
"""
prompt_seq_len, out, device = prompt.shape[-1], prompt.clone(), prompt.device
sample_num_times = max(0, seq_len - prompt_seq_len)
assert prompt.shape[0] == 1, 'batched spec decoding not supported yet'
cache = None
small_cache = None
while out.shape[-1] < seq_len:
# predict with smaller network
all_small_logits = []
q_sampled_out = []
for _ in range(gamma):
small_logits, small_cache = small_net(out, cache = small_cache, return_cache = True)
small_logits = small_logits[:, -1]
small_logits = top_k(small_logits, thres = filter_thres)
all_small_logits.append(small_logits)
sample = gumbel_sample(small_logits, temperature = temperature, dim = -1)
out = torch.cat((out, sample[..., None]), dim = -1)
q_sampled_out.append(rearrange(sample, 'b -> b 1 1'))
q_sampled_out = torch.cat(q_sampled_out, dim = -2)
small_logits = torch.stack(all_small_logits, dim = -2)
# verify with larger network
logits, cache = net(out, cache = cache, return_cache = True)
logits = logits[..., -(gamma + 1):, :]
logits = top_k(logits, thres = filter_thres)
# prob and prob of small model (p(x) and q(x) in algorithm 1)
prob = (logits / temperature).softmax(dim = -1)
small_prob = (small_logits / temperature).softmax(dim = -1)
p = prob[:, :-1].gather(-1, q_sampled_out)
q = small_prob.gather(-1, q_sampled_out) * lenience
r = random_uniform = torch.zeros_like(q).float().uniform_(0, 1)
n = accepted = (((r > (p / q)).cumsum(dim = -1)) == 0).sum().item()
prob_next = prob[:, -1]
if n < gamma:
adjusted_prob = F.relu(prob[:, n] - small_prob[:, n])
prob_next = adjusted_prob / adjusted_prob.sum(dim = -1, keepdim = True)
out = out[:, :-(gamma - n)]
# adjust cache
next_seq_len = out.shape[-1]
cache = cache[..., :next_seq_len, :]
small_cache = small_cache[..., :next_seq_len, :]
# sample the additional token
next_token = torch.multinomial(prob_next, 1)
out = torch.cat((out, next_token), dim = -1)
return out[..., prompt_seq_len:]
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# attention and feedforward
class CausalAttention(Module):
def __init__(
self,
dim,
*,
rotary_emb: RotaryEmbedding,
dim_head = 64,
heads = 8,
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
dim_inner = dim_head * heads
self.norm = RMSNorm(dim)
self.rotary_emb = rotary_emb
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim, bias = False)
def forward(
self,
x,
cache = None
):
h, device = self.heads, x.device
x = self.norm(x)
q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv = 3, h = h)
if exists(cache):
ck, cv = cache
k = torch.cat((ck, k), dim = -2)
v = torch.cat((cv, v), dim = -2)
cached_kv = torch.stack((k, v))
q, k = self.rotary_emb.rotate_queries_with_cached_keys(q, k)
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out, cached_kv
def FeedForward(dim, mult = 4):
dim_inner = dim * mult
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner),
nn.GELU(),
nn.Linear(dim_inner, dim)
)
# main class
class Decoder(Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
heads = 8,
dim_head = 64,
ff_mult = 4,
weight_tie_layers = False,
ignore_index = -1
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.layers = ModuleList([])
rotary_emb = RotaryEmbedding(dim = dim_head)
attn = None
ff = None
for _ in range(depth):
if not weight_tie_layers or not (exists(attn) and exists(ff)):
attn = CausalAttention(dim = dim, dim_head = dim_head, heads = heads, rotary_emb = rotary_emb)
ff = FeedForward(dim = dim, mult = ff_mult)
self.layers.append(ModuleList([attn, ff]))
self.to_logits = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, num_tokens, bias = False)
)
self.ignore_index = ignore_index
def forward(
self,
x,
return_loss = False,
return_cache = False,
cache = None
):
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
x = self.token_emb(x)
# next cache
new_cached_kvs = []
# if cache passed in, just use the last token
if exists(cache):
assert not self.training
num_tokens_keep = x.shape[-2] - cache.shape[-2]
x = x[:, -num_tokens_keep:]
cache = default(cache, [])
iter_cache = iter(cache)
for attn, ff in self.layers:
residual = x
attn_out, cached_kv = attn(x, cache = next(iter_cache, None))
x = residual + attn_out
new_cached_kvs.append(cached_kv)
x = ff(x) + x
new_cached_kvs = torch.stack(new_cached_kvs)
logits = self.to_logits(x)
if not return_loss:
if not return_cache:
return logits
return logits, new_cached_kvs
return F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
labels,
ignore_index = self.ignore_index
)
| speculative-decoding-main | speculative_decoding/speculative_decoding.py |
from setuptools import setup, find_packages
setup(
name = 'TPDNE-utils',
packages = find_packages(exclude=[]),
version = '0.0.11',
license='MIT',
description = 'TPDNE',
include_package_data = True,
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/TPDNE',
keywords = [
'thispersondoesnotexist'
],
install_requires = [
'beartype',
'einops>=0.6',
'jinja2',
'numpy',
'pillow'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| TPDNE-main | setup.py |
import os
import sys
import numpy as np
from time import time, sleep
from pathlib import Path
from functools import wraps
from PIL import Image
from beartype import beartype
from beartype.typing import Callable, Optional
from einops import rearrange, repeat
# templating
from jinja2 import Environment, FileSystemLoader
script_path = Path(__file__)
current_dir = script_path.parents[0]
environment = Environment(loader = FileSystemLoader(str(current_dir)))
nginx_template = environment.get_template('nginx.conf.tmpl')
systemd_service_template = environment.get_template('tpdne.service.tmpl')
# helper functions
def exists(val):
return val is not None
# handle everything that was confusing to me when first encountering image tensors
def auto_handle_image_tensor(t):
if t.ndim == 4:
# assume batch is first dimension and take first sample
t = t[0]
if t.ndim == 2:
# very rare case, but assume greyscale
t = rearrange(t, 'h w -> h w 1')
if t.shape[0] <= 3:
# channel first
t = rearrange(t, 'c h w -> h w c')
assert t.shape[-1] <= 3, 'image tensor must be returned in the shape (height, width, channels), where channels is either 3 or 1'
if t.shape[-1] == 1:
t = repeat(t, 'h w 1 -> h w c', c = 3)
# handle scale
if t.dtype == np.float:
has_negatives = np.any(t < 0)
if has_negatives:
t = t * 127.5 + 128
else:
t = t * 255
t = t.astype(np.uint8)
return t.clip(0, 255)
# main function
@beartype
def sample_image_and_save_repeatedly(
fn: Callable[..., np.ndarray], # function that returns a ndarray of shape (3, <width>, <height>)
output_path: str = './out/random', # path to the output image, without extension (will be saved as webp)
*,
call_every_ms: int = 250, # how often to sample
tmp_dir: str = '/tmp', # to store temporary images, before symbolically linking to the output path
num_rotated_tmp_images: int = 10,
image_format: str = 'jpeg',
verbose: bool = True,
quality = 99,
resize_image_to: Optional[int] = None,
generate_favicon: bool = True,
favicon_size: int = 32,
generate_nginx_conf: bool = True,
symbolic_link_nginx_conf: bool = True,
nginx_sites_available_path: str = '/etc/nginx/sites-available',
nginx_conf_filename = 'default',
generate_systemd_service_conf: bool = False,
systemd_service_path: str = '/etc/systemd/system',
systemd_service_name = 'tpdne',
domain_name = '_'
):
assert 0 < quality <= 100
assert favicon_size in {16, 32}
assert image_format in {'jpeg', 'png', 'webp'}
tmp_dir = Path(tmp_dir)
output_path = Path(output_path)
assert output_path.suffix == '', 'output path suffix will be automatically determined by `image_format` keyword arg'
output_path = output_path.with_suffix(f'.{image_format}')
call_every_seconds = call_every_ms / 1000
assert tmp_dir.is_dir()
root = output_path.parents[0]
root.mkdir(parents = True, exist_ok = True)
tmp_image_index = 0
# linking nginx
if generate_nginx_conf:
nginx_sites_path = Path(nginx_sites_available_path)
nginx_sites_conf_path = nginx_sites_path / nginx_conf_filename
assert nginx_sites_path.is_dir()
nginx_conf_text = nginx_template.render(
root = str(root.resolve()),
index = output_path.name,
server_name = domain_name
)
tmp_conf_path = Path(tmp_dir / 'nginx.server.conf')
tmp_conf_path.write_text(nginx_conf_text)
print(f'nginx server conf generated at {str(tmp_conf_path)}')
if symbolic_link_nginx_conf:
os.system(f'ln -nfs {str(tmp_conf_path)} {nginx_sites_conf_path}')
print(f'nginx conf linked to {nginx_sites_conf_path}\nrun `systemctl reload nginx` for it to be in effect')
if generate_systemd_service_conf and not exists(os.getenv('LAUNCHED_FROM_SYSTEMD', None)):
systemd_service_path = Path(systemd_service_path)
systemd_service_conf_path = systemd_service_path / f'{systemd_service_name}.service'
assert systemd_service_path.is_dir()
systemd_conf_text = systemd_service_template.render(
working_directory = str(current_dir.resolve()),
python_executable = sys.executable,
script_path = str(script_path.resolve())
)
tmp_service_path = Path(tmp_dir / 'tpdne.services')
tmp_service_path.write_text(systemd_conf_text)
os.system(f'ln -nfs {str(tmp_service_path)} {str(systemd_service_conf_path)}')
print(f'service {systemd_service_name}.service created at {str(systemd_service_conf_path)}')
print(f'run `systemctl enable {systemd_service_name}.service` to start this script')
print(f'then run `systemctl status {systemd_service_name}.service` to see the status')
exit()
# invoke `fn` in a while loop
while True:
start = time()
image_tensor = fn()
image_tensor = auto_handle_image_tensor(image_tensor)
tmp_image_index = (tmp_image_index + 1) % num_rotated_tmp_images
tmp_path = str(tmp_dir / f'{tmp_image_index}.{image_format}')
pil_image = Image.fromarray(image_tensor, 'RGB')
if exists(resize_image_to):
pil_image = pil_image.resize((resize_image_to, resize_image_to))
# depending on image format, pass in different kwargs on pillow image save
image_save_kwargs = dict()
if image_format == 'jpeg':
image_save_kwargs = dict(optimize = True, progressive = True)
elif image_format == 'webp':
image_save_kwargs = dict(format = 'webp')
# save image to tmp path
pil_image.save(tmp_path, quality = quality, **image_save_kwargs)
# symbolically link to the live output path
# if one tries to serve directly from the tmp path, client can receive incomplete images
os.system(f'ln -nfs {tmp_path} {output_path}')
if generate_favicon:
tmp_favicon_path = str(tmp_dir / f'favicon_{tmp_image_index}.png')
output_favicon_path = output_path.parents[0] / 'favicon.png'
small_pil_image = pil_image.resize((favicon_size, favicon_size))
small_pil_image.save(tmp_favicon_path)
os.system(f'ln -nfs {tmp_favicon_path} {output_favicon_path}')
elapsed = time() - start
if verbose:
print(f'{elapsed:.3f}s - tmp image at {tmp_path}, output image at {output_path}')
# make sure images are generated at least after `call_every_ms` milliseconds
if elapsed >= call_every_seconds:
continue
sleep(call_every_seconds - elapsed)
| TPDNE-main | TPDNE_utils/tpdne.py |
from TPDNE_utils.tpdne import sample_image_and_save_repeatedly
| TPDNE-main | TPDNE_utils/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'unet_stylegan2',
packages = find_packages(),
scripts=['bin/unet_stylegan2'],
version = '0.5.1',
license='GPLv3+',
description = 'StyleGan2 with UNet Discriminator, in Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/unet-stylegan2',
keywords = ['generative adversarial networks', 'artificial intelligence'],
install_requires=[
'fire',
'numpy',
'retry',
'tqdm',
'torch',
'torchvision',
'pillow',
'linear_attention_transformer>=0.12.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | unet-stylegan2-master | setup.py |
import torch
import torch.nn.functional as F
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous(memory_format = torch.contiguous_format)
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
| unet-stylegan2-master | unet_stylegan2/diff_augment.py |
from unet_stylegan2.unet_stylegan2 import Trainer, StyleGAN2, NanException | unet-stylegan2-master | unet_stylegan2/__init__.py |
import os
import sys
import math
import fire
import json
from tqdm import tqdm
from math import floor, log2
from random import random
from shutil import rmtree
from functools import partial
import multiprocessing
import numpy as np
import torch
from torch import nn
from torch.utils import data
import torch.nn.functional as F
from torch.optim import Adam
from torch.autograd import grad as torch_grad
import torchvision
from torchvision import transforms
from linear_attention_transformer import ImageLinearAttention
from PIL import Image
from pathlib import Path
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
from unet_stylegan2.diff_augment import DiffAugment
assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.'
num_cores = multiprocessing.cpu_count()
# constants
EXTS = ['jpg', 'jpeg', 'png', 'webp']
EPS = 1e-8
# helper classes
class NanException(Exception):
pass
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
class RandomApply(nn.Module):
def __init__(self, prob, fn, fn_else = lambda x: x):
super().__init__()
self.fn = fn
self.fn_else = fn_else
self.prob = prob
def forward(self, x):
fn = self.fn if random() < self.prob else self.fn_else
return fn(x)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class Flatten(nn.Module):
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x.flatten(self.index)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),
Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
# helpers
def default(value, d):
return d if value is None else value
def cycle(iterable):
while True:
for i in iterable:
yield i
def cast_list(el):
return el if isinstance(el, list) else [el]
def is_empty(t):
if isinstance(t, torch.Tensor):
return t.nelement() == 0
return t is None
def raise_if_nan(t):
if torch.isnan(t):
raise NanException
def loss_backwards(fp16, loss, optimizer, **kwargs):
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(**kwargs)
else:
loss.backward(**kwargs)
def gradient_penalty(images, outputs, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(outputs=outputs, inputs=images,
grad_outputs=list(map(lambda t: torch.ones(t.size()).cuda(), outputs)),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(batch_size, -1)
return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
def calc_pl_lengths(styles, images):
num_pixels = images.shape[2] * images.shape[3]
pl_noise = torch.randn(images.shape).cuda() / math.sqrt(num_pixels)
outputs = (images * pl_noise).sum()
pl_grads = torch_grad(outputs=outputs, inputs=styles,
grad_outputs=torch.ones(outputs.shape).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
return (pl_grads ** 2).sum(dim=2).mean(dim=1).sqrt()
def noise(n, latent_dim):
return torch.randn(n, latent_dim).cuda()
def noise_list(n, layers, latent_dim):
return [(noise(n, latent_dim), layers)]
def mixed_list(n, layers, latent_dim):
tt = int(torch.rand(()).numpy() * layers)
return noise_list(n, tt, latent_dim) + noise_list(n, layers - tt, latent_dim)
def latent_to_w(style_vectorizer, latent_descr):
return [(style_vectorizer(z), num_layers) for z, num_layers in latent_descr]
def image_noise(n, im_size):
return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).cuda()
def leaky_relu(p=0.2):
return nn.LeakyReLU(p)
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def styles_def_to_tensor(styles_def):
return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)
def set_requires_grad(model, bool):
for p in model.parameters():
p.requires_grad = bool
def slerp(val, low, high):
low_norm = low / torch.norm(low, dim=1, keepdim=True)
high_norm = high / torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
return res
def warmup(start, end, max_steps, current_step):
if current_step > max_steps:
return end
return (end - start) * (current_step / max_steps) + start
def log(t, eps = 1e-6):
return torch.log(t + eps)
def cutmix_coordinates(height, width, alpha = 1.):
lam = np.random.beta(alpha, alpha)
cx = np.random.uniform(0, width)
cy = np.random.uniform(0, height)
w = width * np.sqrt(1 - lam)
h = height * np.sqrt(1 - lam)
x0 = int(np.round(max(cx - w / 2, 0)))
x1 = int(np.round(min(cx + w / 2, width)))
y0 = int(np.round(max(cy - h / 2, 0)))
y1 = int(np.round(min(cy + h / 2, height)))
return ((y0, y1), (x0, x1)), lam
def cutmix(source, target, coors, alpha = 1.):
source, target = map(torch.clone, (source, target))
((y0, y1), (x0, x1)), _ = coors
source[:, :, y0:y1, x0:x1] = target[:, :, y0:y1, x0:x1]
return source
def mask_src_tgt(source, target, mask):
return source * mask + (1 - mask) * target
# dataset
def convert_rgb_to_transparent(image):
if image.mode == 'RGB':
return image.convert('RGBA')
return image
def convert_transparent_to_rgb(image):
if image.mode == 'RGBA':
return image.convert('RGB')
return image
class expand_greyscale(object):
def __init__(self, num_channels):
self.num_channels = num_channels
def __call__(self, tensor):
return tensor.expand(self.num_channels, -1, -1)
def resize_to_minimum_size(min_size, image):
if max(*image.size) < min_size:
return torchvision.transforms.functional.resize(image, min_size)
return image
class Dataset(data.Dataset):
def __init__(self, folder, image_size, transparent = False, aug_prob = 0.):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]
convert_image_fn = convert_transparent_to_rgb if not transparent else convert_rgb_to_transparent
num_channels = 3 if not transparent else 4
self.transform = transforms.Compose([
transforms.Lambda(convert_image_fn),
transforms.Lambda(partial(resize_to_minimum_size, image_size)),
transforms.Resize(image_size),
RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale(num_channels))
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# augmentations
def random_float(lo, hi):
return lo + (hi - lo) * random()
def random_crop_and_resize(tensor, scale):
b, c, h, _ = tensor.shape
new_width = int(h * scale)
delta = h - new_width
h_delta = int(random() * delta)
w_delta = int(random() * delta)
cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()
return F.interpolate(cropped, size=(h, h), mode='bilinear')
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size, types):
super().__init__()
self.D = D
self.types = types
def forward(self, images, prob = 0., detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=self.types)
if detach:
images.detach_()
return self.D(images), images
# stylegan2 classes
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul = 1, bias = True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class StyleVectorizer(nn.Module):
def __init__(self, emb, depth, lr_mul = 0.1):
super().__init__()
layers = []
for i in range(depth):
layers.extend([EqualLinear(emb, emb, lr_mul), leaky_relu()])
self.net = nn.Sequential(*layers)
def forward(self, x):
x = F.normalize(x, dim=1)
return self.net(x)
class RGBBlock(nn.Module):
def __init__(self, latent_dim, input_channel, upsample, rgba = False):
super().__init__()
self.input_channel = input_channel
self.to_style = nn.Linear(latent_dim, input_channel)
out_filters = 3 if not rgba else 4
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
self.upsample = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False) if upsample else None
def forward(self, x, prev_rgb, istyle):
b, c, h, w = x.shape
style = self.to_style(istyle)
x = self.conv(x, style)
if prev_rgb is not None:
x = x + prev_rgb
if self.upsample is not None:
x = self.upsample(x)
return x
class Conv2DMod(nn.Module):
def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, **kwargs):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))
nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + EPS)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class GeneratorBlock(nn.Module):
def __init__(self, latent_dim, input_channels, filters, upsample = True, upsample_rgb = True, rgba = False):
super().__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None
self.to_style1 = nn.Linear(latent_dim, input_channels)
self.to_noise1 = nn.Linear(1, filters)
self.conv1 = Conv2DMod(input_channels, filters, 3)
self.to_style2 = nn.Linear(latent_dim, filters)
self.to_noise2 = nn.Linear(1, filters)
self.conv2 = Conv2DMod(filters, filters, 3)
self.activation = leaky_relu()
self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, rgba)
def forward(self, x, prev_rgb, istyle, inoise):
if self.upsample is not None:
x = self.upsample(x)
inoise = inoise[:, :x.shape[2], :x.shape[3], :]
noise1 = self.to_noise1(inoise).permute((0, 3, 2, 1))
noise2 = self.to_noise2(inoise).permute((0, 3, 2, 1))
style1 = self.to_style1(istyle)
x = self.conv1(x, style1)
x = self.activation(x + noise1)
style2 = self.to_style2(istyle)
x = self.conv2(x, style2)
x = self.activation(x + noise2)
rgb = self.to_rgb(x, prev_rgb, istyle)
return x, rgb
def double_conv(chan_in, chan_out):
return nn.Sequential(
nn.Conv2d(chan_in, chan_out, 3, padding=1),
leaky_relu(),
nn.Conv2d(chan_out, chan_out, 3, padding=1),
leaky_relu()
)
class DownBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = double_conv(input_channels, filters)
self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
unet_res = x
if self.down is not None:
x = self.down(x)
x = x + res
return x, unet_res
class UpBlock(nn.Module):
def __init__(self, input_channels, filters):
super().__init__()
self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)
self.net = double_conv(input_channels, filters)
self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)
self.input_channels = input_channels
self.filters = filters
def forward(self, x, res):
*_, h, w = x.shape
conv_res = self.conv_res(x, output_size = (h * 2, w * 2))
x = self.up(x)
x = torch.cat((x, res), dim=1)
x = self.net(x)
x = x + conv_res
return x
class Generator(nn.Module):
def __init__(self, image_size, latent_dim, network_capacity = 16, transparent = False, no_const = False, fmap_max = 512):
super().__init__()
self.image_size = image_size
self.latent_dim = latent_dim
self.num_layers = int(log2(image_size) - 1)
filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
init_channels = filters[0]
filters = [init_channels, *filters]
in_out_pairs = zip(filters[:-1], filters[1:])
self.no_const = no_const
if no_const:
self.to_initial_block = nn.ConvTranspose2d(latent_dim, init_channels, 4, 1, 0, bias=False)
else:
self.initial_block = nn.Parameter(torch.randn((1, init_channels, 4, 4)))
self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)
self.blocks = nn.ModuleList([])
self.attns = nn.ModuleList([])
for ind, (in_chan, out_chan) in enumerate(in_out_pairs):
not_first = ind != 0
not_last = ind != (self.num_layers - 1)
num_layer = self.num_layers - ind
attn_fn = attn_and_ff(in_chan)
self.attns.append(attn_fn)
block = GeneratorBlock(
latent_dim,
in_chan,
out_chan,
upsample = not_first,
upsample_rgb = not_last,
rgba = transparent
)
self.blocks.append(block)
def forward(self, styles, input_noise):
batch_size = styles.shape[0]
image_size = self.image_size
if self.no_const:
avg_style = styles.mean(dim=1)[:, :, None, None]
x = self.to_initial_block(avg_style)
else:
x = self.initial_block.expand(batch_size, -1, -1, -1)
x = self.initial_conv(x)
styles = styles.transpose(0, 1)
rgb = None
for style, block, attn in zip(styles, self.blocks, self.attns):
if attn is not None:
x = attn(x)
x, rgb = block(x, rgb, style, input_noise)
return rgb
class Discriminator(nn.Module):
def __init__(self, image_size, network_capacity = 16, transparent = False, fmap_max = 512):
super().__init__()
num_layers = int(log2(image_size) - 3)
num_init_filters = 3 if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
filters[-1] = filters[-2]
chan_in_out = list(zip(filters[:-1], filters[1:]))
chan_in_out = list(map(list, chan_in_out))
down_blocks = []
attn_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DownBlock(in_chan, out_chan, downsample = is_not_last)
down_blocks.append(block)
attn_fn = attn_and_ff(out_chan)
attn_blocks.append(attn_fn)
self.down_blocks = nn.ModuleList(down_blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
last_chan = filters[-1]
self.to_logit = nn.Sequential(
leaky_relu(),
nn.AvgPool2d(image_size // (2 ** num_layers)),
Flatten(1),
nn.Linear(last_chan, 1)
)
self.conv = double_conv(last_chan, last_chan)
dec_chan_in_out = chan_in_out[:-1][::-1]
self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))
self.conv_out = nn.Conv2d(3, 1, 1)
def forward(self, x):
b, *_ = x.shape
residuals = []
for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):
x, unet_res = down_block(x)
residuals.append(unet_res)
if attn_block is not None:
x = attn_block(x)
x = self.conv(x) + x
enc_out = self.to_logit(x)
for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):
x = up_block(x, res)
dec_out = self.conv_out(x)
return enc_out.squeeze(), dec_out
class StyleGAN2(nn.Module):
def __init__(self, image_size, latent_dim = 512, fmap_max = 512, style_depth = 8, network_capacity = 16, transparent = False, fp16 = False, steps = 1, lr = 1e-4, ttur_mult = 2, no_const = False, lr_mul = 0.1, aug_types = ['translation', 'cutout']):
super().__init__()
self.lr = lr
self.steps = steps
self.ema_updater = EMA(0.995)
self.S = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mul)
self.G = Generator(image_size, latent_dim, network_capacity, transparent = transparent, no_const = no_const, fmap_max = fmap_max)
self.D = Discriminator(image_size, network_capacity, transparent = transparent, fmap_max = fmap_max)
self.SE = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mul)
self.GE = Generator(image_size, latent_dim, network_capacity, transparent = transparent, no_const = no_const)
# wrapper for augmenting all images going into the discriminator
self.D_aug = AugWrapper(self.D, image_size, aug_types)
set_requires_grad(self.SE, False)
set_requires_grad(self.GE, False)
generator_params = list(self.G.parameters()) + list(self.S.parameters())
self.G_opt = Adam(generator_params, lr = self.lr, betas=(0.5, 0.9))
self.D_opt = Adam(self.D.parameters(), lr = self.lr * ttur_mult, betas=(0.5, 0.9))
self._init_weights()
self.reset_parameter_averaging()
self.cuda()
self.fp16 = fp16
if fp16:
(self.S, self.G, self.D, self.SE, self.GE), (self.G_opt, self.D_opt) = amp.initialize([self.S, self.G, self.D, self.SE, self.GE], [self.G_opt, self.D_opt], opt_level='O1')
def _init_weights(self):
for m in self.modules():
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
for block in self.G.blocks:
nn.init.zeros_(block.to_noise1.weight)
nn.init.zeros_(block.to_noise2.weight)
nn.init.zeros_(block.to_noise1.bias)
nn.init.zeros_(block.to_noise2.bias)
def EMA(self):
def update_moving_average(ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
update_moving_average(self.SE, self.S)
update_moving_average(self.GE, self.G)
def reset_parameter_averaging(self):
self.SE.load_state_dict(self.S.state_dict())
self.GE.load_state_dict(self.G.state_dict())
def forward(self, x):
return x
class Trainer():
def __init__(self, name, results_dir, models_dir, image_size, network_capacity, transparent = False, batch_size = 4, mixed_prob = 0.9, gradient_accumulate_every=1, lr = 2e-4, ttur_mult = 2, num_workers = None, save_every = 1000, trunc_psi = 0.6, fp16 = False, no_const = False, aug_prob = 0., dataset_aug_prob = 0., cr_weight = 0.2, apply_pl_reg = False, lr_mul = 0.1, *args, **kwargs):
self.GAN_params = [args, kwargs]
self.GAN = None
self.name = name
self.results_dir = Path(results_dir)
self.models_dir = Path(models_dir)
self.config_path = self.models_dir / name / '.config.json'
assert log2(image_size).is_integer(), 'image size must be a power of 2 (64, 128, 256, 512, 1024)'
self.image_size = image_size
self.network_capacity = network_capacity
self.transparent = transparent
self.no_const = no_const
self.aug_prob = aug_prob
self.lr = lr
self.ttur_mult = ttur_mult
self.lr_mul = lr_mul
self.batch_size = batch_size
self.num_workers = num_workers
self.mixed_prob = mixed_prob
self.save_every = save_every
self.steps = 0
self.av = None
self.trunc_psi = trunc_psi
self.apply_pl_reg = apply_pl_reg
self.pl_mean = None
self.gradient_accumulate_every = gradient_accumulate_every
assert not fp16 or fp16 and APEX_AVAILABLE, 'Apex is not available for you to use mixed precision training'
self.fp16 = fp16
self.d_loss = 0
self.g_loss = 0
self.last_gp_loss = 0
self.last_cr_loss = 0
self.pl_length_ma = EMA(0.99)
self.init_folders()
self.loader = None
self.dataset_aug_prob = dataset_aug_prob
self.cr_weight = cr_weight
def init_GAN(self):
args, kwargs = self.GAN_params
self.GAN = StyleGAN2(lr = self.lr, ttur_mult = self.ttur_mult, lr_mul = self.lr_mul, image_size = self.image_size, network_capacity = self.network_capacity, transparent = self.transparent, fp16 = self.fp16, no_const = self.no_const, *args, **kwargs)
def write_config(self):
self.config_path.write_text(json.dumps(self.config()))
def load_config(self):
config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text())
self.image_size = config['image_size']
self.network_capacity = config['network_capacity']
self.transparent = config['transparent']
self.no_const = config.pop('no_const', False)
del self.GAN
self.init_GAN()
def config(self):
return {'image_size': self.image_size, 'network_capacity': self.network_capacity, 'transparent': self.transparent, 'no_const': self.no_const}
def set_data_src(self, folder):
self.dataset = Dataset(folder, self.image_size, transparent = self.transparent, aug_prob = self.dataset_aug_prob)
self.loader = cycle(data.DataLoader(self.dataset, num_workers = default(self.num_workers, num_cores), batch_size = self.batch_size, drop_last = True, shuffle=True, pin_memory=True))
def train(self):
assert self.loader is not None, 'You must first initialize the data source with `.set_data_src(<folder of images>)`'
if self.GAN is None:
self.init_GAN()
self.GAN.train()
total_disc_loss = torch.tensor(0.).cuda()
total_gen_loss = torch.tensor(0.).cuda()
batch_size = self.batch_size
image_size = self.GAN.G.image_size
latent_dim = self.GAN.G.latent_dim
num_layers = self.GAN.G.num_layers
aug_prob = self.aug_prob
apply_gradient_penalty = self.steps < 4000 or self.steps % 4 == 0
apply_path_penalty = self.apply_pl_reg and self.steps % 32 == 0
dec_loss_coef = warmup(0, 1., 30000, self.steps)
cutmix_prob = warmup(0, 0.25, 30000, self.steps)
apply_cutmix = random() < cutmix_prob
backwards = partial(loss_backwards, self.fp16)
# train discriminator
avg_pl_length = self.pl_mean
self.GAN.D_opt.zero_grad()
for i in range(self.gradient_accumulate_every):
get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list
style = get_latents_fn(batch_size, num_layers, latent_dim)
noise = image_noise(batch_size, image_size)
w_space = latent_to_w(self.GAN.S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = self.GAN.G(w_styles, noise).clone().detach()
(fake_enc_out, fake_dec_out), fake_aug_images = self.GAN.D_aug(generated_images, detach = True, prob = aug_prob)
real_images = next(self.loader).cuda()
real_images.requires_grad_()
(real_enc_out, real_dec_out), real_aug_images = self.GAN.D_aug(real_images, prob = aug_prob)
enc_divergence = (F.relu(1 + real_enc_out) + F.relu(1 - fake_enc_out)).mean()
dec_divergence = (F.relu(1 + real_dec_out) + F.relu(1 - fake_dec_out)).mean()
divergence = enc_divergence + dec_divergence * dec_loss_coef
disc_loss = divergence
if apply_cutmix:
mask = cutmix(
torch.ones_like(real_dec_out),
torch.zeros_like(real_dec_out),
cutmix_coordinates(image_size, image_size)
)
if random() > 0.5:
mask = 1 - mask
cutmix_images = mask_src_tgt(real_aug_images, fake_aug_images, mask)
cutmix_enc_out, cutmix_dec_out = self.GAN.D(cutmix_images)
cutmix_enc_divergence = F.relu(1 - cutmix_enc_out).mean()
cutmix_dec_divergence = F.relu(1 + (mask * 2 - 1) * cutmix_dec_out).mean()
disc_loss = disc_loss + cutmix_enc_divergence + cutmix_dec_divergence
cr_cutmix_dec_out = mask_src_tgt(real_dec_out, fake_dec_out, mask)
cr_loss = F.mse_loss(cutmix_dec_out, cr_cutmix_dec_out) * self.cr_weight
self.last_cr_loss = cr_loss.clone().detach().item()
disc_loss = disc_loss + cr_loss * dec_loss_coef
if apply_gradient_penalty:
if random() < 0.5:
gp = gradient_penalty(real_images, (real_enc_out,))
else:
gp = gradient_penalty(real_images, (real_dec_out,)) * dec_loss_coef
self.last_gp_loss = gp.clone().detach().item()
disc_loss = disc_loss + gp
disc_loss = disc_loss / self.gradient_accumulate_every
disc_loss.register_hook(raise_if_nan)
backwards(disc_loss, self.GAN.D_opt)
total_disc_loss += divergence.detach().item() / self.gradient_accumulate_every
self.d_loss = float(total_disc_loss)
self.GAN.D_opt.step()
# train generator
self.GAN.G_opt.zero_grad()
for i in range(self.gradient_accumulate_every):
style = get_latents_fn(batch_size, num_layers, latent_dim)
noise = image_noise(batch_size, image_size)
w_space = latent_to_w(self.GAN.S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = self.GAN.G(w_styles, noise)
(fake_enc_output, fake_dec_output), _ = self.GAN.D_aug(generated_images, prob = aug_prob)
loss = fake_enc_output.mean() + F.relu(1 + fake_dec_output).mean()
gen_loss = loss
if apply_path_penalty:
pl_lengths = calc_pl_lengths(w_styles, generated_images)
avg_pl_length = np.mean(pl_lengths.detach().cpu().numpy())
if not is_empty(self.pl_mean):
pl_loss = ((pl_lengths - self.pl_mean) ** 2).mean()
if not torch.isnan(pl_loss):
gen_loss = gen_loss + pl_loss
gen_loss = gen_loss / self.gradient_accumulate_every
gen_loss.register_hook(raise_if_nan)
backwards(gen_loss, self.GAN.G_opt)
total_gen_loss += loss.detach().item() / self.gradient_accumulate_every
self.g_loss = float(total_gen_loss)
self.GAN.G_opt.step()
# calculate moving averages
if apply_path_penalty and not np.isnan(avg_pl_length):
self.pl_mean = self.pl_length_ma.update_average(self.pl_mean, avg_pl_length)
if self.steps % 10 == 0 and self.steps > 20000:
self.GAN.EMA()
if self.steps <= 25000 and self.steps % 1000 == 2:
self.GAN.reset_parameter_averaging()
# save from NaN errors
checkpoint_num = floor(self.steps / self.save_every)
if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)):
print(f'NaN detected for generator or discriminator. Loading from checkpoint #{checkpoint_num}')
self.load(checkpoint_num)
raise NanException
# periodically save results
if self.steps % self.save_every == 0:
self.save(checkpoint_num)
if self.steps % 1000 == 0 or (self.steps % 100 == 0 and self.steps < 2500):
self.evaluate(floor(self.steps / 1000))
self.steps += 1
self.av = None
@torch.no_grad()
def evaluate(self, num = 0, num_image_tiles = 8, trunc = 1.0):
self.GAN.eval()
ext = 'jpg' if not self.transparent else 'png'
num_rows = num_image_tiles
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
# latents and noise
latents = noise_list(num_rows ** 2, num_layers, latent_dim)
n = image_noise(num_rows ** 2, image_size)
# regular
generated_images = self.generate_truncated(self.GAN.S, self.GAN.G, latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}.{ext}'), nrow=num_rows)
# moving averages
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-ema.{ext}'), nrow=num_rows)
# mixing regularities
def tile(a, dim, n_tile):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).cuda()
return torch.index_select(a, dim, order_index)
nn = noise(num_rows, latent_dim)
tmp1 = tile(nn, 0, num_rows)
tmp2 = nn.repeat(num_rows, 1)
tt = int(num_layers / 2)
mixed_latents = [(tmp1, tt), (tmp2, num_layers - tt)]
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, mixed_latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-mr.{ext}'), nrow=num_rows)
@torch.no_grad()
def generate_truncated(self, S, G, style, noi, trunc_psi = 0.75, num_image_tiles = 8):
latent_dim = G.latent_dim
if self.av is None:
z = noise(2000, latent_dim)
samples = evaluate_in_chunks(self.batch_size, S, z).cpu().numpy()
self.av = np.mean(samples, axis = 0)
self.av = np.expand_dims(self.av, axis = 0)
w_space = []
for tensor, num_layers in style:
tmp = S(tensor)
av_torch = torch.from_numpy(self.av).cuda()
tmp = trunc_psi * (tmp - av_torch) + av_torch
w_space.append((tmp, num_layers))
w_styles = styles_def_to_tensor(w_space)
generated_images = evaluate_in_chunks(self.batch_size, G, w_styles, noi)
return generated_images.clamp_(0., 1.)
@torch.no_grad()
def generate_interpolation(self, num = 0, num_image_tiles = 8, trunc = 1.0, save_frames = False):
self.GAN.eval()
ext = 'jpg' if not self.transparent else 'png'
num_rows = num_image_tiles
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
# latents and noise
latents_low = noise(num_rows ** 2, latent_dim)
latents_high = noise(num_rows ** 2, latent_dim)
n = image_noise(num_rows ** 2, image_size)
ratios = torch.linspace(0., 8., 100)
frames = []
for ratio in tqdm(ratios):
interp_latents = slerp(ratio, latents_low, latents_high)
latents = [(interp_latents, num_layers)]
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)
images_grid = torchvision.utils.make_grid(generated_images, nrow = num_rows)
pil_image = transforms.ToPILImage()(images_grid.cpu())
frames.append(pil_image)
frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True)
if save_frames:
folder_path = (self.results_dir / self.name / f'{str(num)}')
folder_path.mkdir(parents=True, exist_ok=True)
for ind, frame in enumerate(frames):
frame.save(str(folder_path / f'{str(ind)}.{ext}'))
def print_log(self):
pl_mean = default(self.pl_mean, 0)
print(f'G: {self.g_loss:.2f} | D: {self.d_loss:.2f} | GP: {self.last_gp_loss:.2f} | PL: {pl_mean:.2f} | CR: {self.last_cr_loss:.2f}')
def model_name(self, num):
return str(self.models_dir / self.name / f'model_{num}.pt')
def init_folders(self):
(self.results_dir / self.name).mkdir(parents=True, exist_ok=True)
(self.models_dir / self.name).mkdir(parents=True, exist_ok=True)
def clear(self):
rmtree(f'./models/{self.name}', True)
rmtree(f'./results/{self.name}', True)
rmtree(str(self.config_path), True)
self.init_folders()
def save(self, num):
save_data = {'GAN': self.GAN.state_dict()}
if self.GAN.fp16:
save_data['amp'] = amp.state_dict()
torch.save(save_data, self.model_name(num))
self.write_config()
def load(self, num = -1):
self.load_config()
name = num
if num == -1:
file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')]
saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))
if len(saved_nums) == 0:
return
name = saved_nums[-1]
print(f'continuing from previous epoch - {name}')
self.steps = name * self.save_every
load_data = torch.load(self.model_name(name))
self.GAN.load_state_dict(load_data['GAN'])
if self.GAN.fp16 and 'amp' in load_data:
amp.load_state_dict(load_data['amp'])
| unet-stylegan2-master | unet_stylegan2/unet_stylegan2.py |
from setuptools import setup, find_packages
setup(
name = 'transformer-in-transformer',
packages = find_packages(),
version = '0.1.2',
license='MIT',
description = 'Transformer in Transformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/transformer-in-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformer',
'image classification'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| transformer-in-transformer-main | setup.py |
from transformer_in_transformer.tnt import TNT
| transformer-in-transformer-main | transformer_in_transformer/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(val, divisor):
return (val % divisor) == 0
def unfold_output_size(image_size, kernel_size, stride, padding):
return int(((image_size - kernel_size + (2 * padding)) / stride) + 1)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.
):
super().__init__()
inner_dim = heads * dim_head
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
b, n, d, h = *x.shape, self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
return self.to_out(out)
# main class
class TNT(nn.Module):
def __init__(
self,
*,
image_size,
patch_dim,
pixel_dim,
patch_size,
pixel_size,
depth,
num_classes,
channels = 3,
heads = 8,
dim_head = 64,
ff_dropout = 0.,
attn_dropout = 0.,
unfold_args = None
):
super().__init__()
assert divisible_by(image_size, patch_size), 'image size must be divisible by patch size'
assert divisible_by(patch_size, pixel_size), 'patch size must be divisible by pixel size for now'
num_patch_tokens = (image_size // patch_size) ** 2
self.image_size = image_size
self.patch_size = patch_size
self.patch_tokens = nn.Parameter(torch.randn(num_patch_tokens + 1, patch_dim))
unfold_args = default(unfold_args, (pixel_size, pixel_size, 0))
unfold_args = (*unfold_args, 0) if len(unfold_args) == 2 else unfold_args
kernel_size, stride, padding = unfold_args
pixel_width = unfold_output_size(patch_size, kernel_size, stride, padding)
num_pixels = pixel_width ** 2
self.to_pixel_tokens = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> (b h w) c p1 p2', p1 = patch_size, p2 = patch_size),
nn.Unfold(kernel_size = kernel_size, stride = stride, padding = padding),
Rearrange('... c n -> ... n c'),
nn.Linear(channels * kernel_size ** 2, pixel_dim)
)
self.patch_pos_emb = nn.Parameter(torch.randn(num_patch_tokens + 1, patch_dim))
self.pixel_pos_emb = nn.Parameter(torch.randn(num_pixels, pixel_dim))
layers = nn.ModuleList([])
for _ in range(depth):
pixel_to_patch = nn.Sequential(
nn.LayerNorm(pixel_dim),
Rearrange('... n d -> ... (n d)'),
nn.Linear(pixel_dim * num_pixels, patch_dim),
)
layers.append(nn.ModuleList([
PreNorm(pixel_dim, Attention(dim = pixel_dim, heads = heads, dim_head = dim_head, dropout = attn_dropout)),
PreNorm(pixel_dim, FeedForward(dim = pixel_dim, dropout = ff_dropout)),
pixel_to_patch,
PreNorm(patch_dim, Attention(dim = patch_dim, heads = heads, dim_head = dim_head, dropout = attn_dropout)),
PreNorm(patch_dim, FeedForward(dim = patch_dim, dropout = ff_dropout)),
]))
self.layers = layers
self.mlp_head = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, num_classes)
)
def forward(self, x):
b, _, h, w, patch_size, image_size = *x.shape, self.patch_size, self.image_size
assert divisible_by(h, patch_size) and divisible_by(w, patch_size), f'height {h} and width {w} of input must be divisible by the patch size'
num_patches_h = h // patch_size
num_patches_w = w // patch_size
n = num_patches_w * num_patches_h
pixels = self.to_pixel_tokens(x)
patches = repeat(self.patch_tokens[:(n + 1)], 'n d -> b n d', b = b)
patches += rearrange(self.patch_pos_emb[:(n + 1)], 'n d -> () n d')
pixels += rearrange(self.pixel_pos_emb, 'n d -> () n d')
for pixel_attn, pixel_ff, pixel_to_patch_residual, patch_attn, patch_ff in self.layers:
pixels = pixel_attn(pixels) + pixels
pixels = pixel_ff(pixels) + pixels
patches_residual = pixel_to_patch_residual(pixels)
patches_residual = rearrange(patches_residual, '(b h w) d -> b (h w) d', h = num_patches_h, w = num_patches_w)
patches_residual = F.pad(patches_residual, (0, 0, 1, 0), value = 0) # cls token gets residual of 0
patches = patches + patches_residual
patches = patch_attn(patches) + patches
patches = patch_ff(patches) + patches
cls_token = patches[:, 0]
return self.mlp_head(cls_token)
| transformer-in-transformer-main | transformer_in_transformer/tnt.py |
from setuptools import setup, find_packages
setup(
name = 'learning-to-expire-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.0.2',
license='MIT',
description = 'Learning to Expire - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/learning-to-expire-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'memory'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| learning-to-expire-pytorch-main | setup.py |
from learning_to_expire_pytorch.learning_to_expire_pytorch import ExpireSpanTransformerXL
| learning-to-expire-pytorch-main | learning_to_expire_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from collections import namedtuple
# constants
Memory = namedtuple('Memory', ['mems', 'elapsed_times'])
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def safe_cat(tensors, dim = -1):
tensors = list(filter(exists, tensors))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim = dim)
def safe_add(tensor, n):
if not exists(tensor):
return None
return tensor + n
# positional embedding
def rel_shift(t):
b, h, i, j, device, dtype = *t.shape, t.device, t.dtype
zero_pad = torch.zeros((b, h, i, 1), device = device, dtype = dtype)
concatted = torch.cat([zero_pad, t], dim = -1)
shifted = concatted.view(b, h, j + 1, i)[:, :, 1:]
return shifted.view_as(t)
class SinusoidalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
n, device = x.shape[1], x.device
t = torch.arange(n - 1, -1, -1, device = device).type_as(self.inv_freq)
sinusoid_inp = einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim = -1)
return emb
# expire span logic
class ExpireSpan(nn.Module):
def __init__(self, dim, max_mem_len, ramp_length):
super().__init__()
self.max_mem_len = max_mem_len
self.ramp_length = ramp_length
self.to_expiration = nn.Linear(dim, 1)
nn.init.constant_(self.to_expiration.bias.data, val = -self.max_mem_len)
def forward(self, mem, time, seq_len):
exps = self.to_expiration(mem).squeeze(-1).sigmoid() * self.max_mem_len
exps = rearrange(exps, 'b j -> b () () j')
t = rearrange(time, 'b j -> b () () j')
r = F.pad(exps - t, (0, seq_len), value = 1.)
mask = torch.clamp((r / self.ramp_length) + 1, min = 0., max = 1.)
return exps, mask
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class CausalAttention(nn.Module):
def __init__(self, dim, heads = 8):
super().__init__()
dim_head = dim // heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_pos = nn.Linear(dim, dim_head)
self.to_q = nn.Linear(dim, dim)
self.to_kv = nn.Linear(dim, dim * 2)
self.to_out = nn.Linear(dim, dim)
def forward(self, x, pos_emb, mem = None, expire_mask = None):
n, h, scale, device = x.shape[1], self.heads, self.scale, x.device
q = self.to_q(x)
mem_len = mem.shape[1] if exists(mem) else 0
context = safe_cat((mem, x), dim = 1)
kv = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, *kv))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale
# calculate relative positional contribution
pos = self.to_pos(pos_emb)
pos_dots = einsum('b h i d, j d -> b h i j', q, pos) * scale
pos_dots = rel_shift(pos_dots)
pos_dots = F.pad(pos_dots, (mem_len, 0), value = 0)
dots += pos_dots
# causal mask
mask = torch.ones(dots.shape[-2:], device = device).triu_(mem_len + 1).bool()
mask = rearrange(mask, 'i j -> () () i j')
dots.masked_fill_(mask, float('-inf'))
del mask
# attention
attn = dots.softmax(dim = -1)
if exists(expire_mask):
attn = attn * expire_mask
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class ExpireSpanTransformerXL(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
seq_len,
heads = 8,
num_memory_blocks = 10,
expire_loss_coef = 1e-6,
ramp_length = 128):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.sinusoidal_emb = SinusoidalEmbedding(dim)
self.dim = dim
self.depth = depth
self.seq_len = seq_len
self.max_mem_len = num_memory_blocks * seq_len
self.expire_loss_coef = expire_loss_coef
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
ExpireSpan(dim, self.max_mem_len, ramp_length),
PreNorm(dim, CausalAttention(dim, heads = heads)),
PreNorm(dim, FeedForward(dim)),
]))
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x, memory = None):
b, n, d, device = *x.shape, self.dim, x.device
x = self.token_emb(x)
pos_emb = self.sinusoidal_emb(x)
hidden_states = []
expire_masks_layers = []
mems_layers = memory.mems if exists(memory) else ((None,) * self.depth)
times_layers = memory.elapsed_times if exists(memory) else ((None,) * self.depth)
aux_loss = torch.tensor(0., requires_grad = True)
for (mem, time, (expire_span, attn, ff)) in zip(mems_layers, times_layers, self.layers):
hidden_states.append(x)
exps, expire_mask = expire_span(mem, time, seq_len = n) if exists(mem) else (None, None)
expire_masks_layers.append(expire_mask)
if self.training and exists(time):
forget_time_thres = torch.randint(0, self.max_mem_len, (b, 1), device = device)
forget_dropout_mask = (time < forget_time_thres).float()
forget_dropout_mask = rearrange(forget_dropout_mask, 'b n -> b () () n')
forget_dropout_mask = F.pad(forget_dropout_mask, (0, n), value = 1.)
expire_mask *= forget_dropout_mask
x = attn(x, pos_emb = pos_emb, mem = mem, expire_mask = expire_mask) + x
x = ff(x) + x
if exists(exps):
# unsure if this is implemented correctly
# paper seems to suggest only adding l1 auxiliary loss for expirations that yield a soft masking value on the ramp (between 0 or 1)
expiring_exps_mask = (expire_mask > 0) & (expire_mask < 1.)
expiring_exps = exps.masked_select(expiring_exps_mask[..., :-n])
aux_loss = aux_loss + (expiring_exps / self.seq_len).sum() * self.expire_loss_coef
logits = self.to_logits(x)
if self.seq_len == n:
if exists(expire_mask):
mems_layers_new = []
times_layers_new = []
for mems, times, expire_mask in zip(mems_layers, times_layers, expire_masks_layers):
expire_mask = rearrange(expire_mask, 'b () () i -> b i')
# discard expired memories
expired_exps_mask = (expire_mask <= 0)[..., :-n]
# it is not possible to expire different amounts of memories across batches
# for now, will just expire the minimum of the expired memories across batches
num_to_expire = min(expired_exps_mask.sum(dim = -1))
_, indices = expired_exps_mask.float().topk(k = num_to_expire, dim = -1)
even_expired_exps_mask = torch.zeros_like(expired_exps_mask, device = device).scatter(-1, indices, 1.).bool()
mems = mems.masked_select(~even_expired_exps_mask.unsqueeze(-1))
mems = mems.reshape(b, -1, d)
mems_layers_new.append(mems)
times = times.masked_select(~even_expired_exps_mask)
times = times.reshape(b, -1)
times_layers_new.append(times)
mems_layers = mems_layers_new
times_layers = times_layers_new
new_memories = map(lambda t: safe_cat(t, dim = 1), list(zip(mems_layers, hidden_states)))
new_memories = map(lambda t: t[:, -self.max_mem_len:].detach(), new_memories)
new_times = torch.arange(n - 1, -1, -1, device = device)
new_times = repeat(new_times, 'n -> b n', b = b)
new_elapsed_times = map(lambda t: safe_cat((safe_add(t, n), new_times), dim = 1), times_layers)
new_elapsed_times = map(lambda t: t[-self.max_mem_len:], new_elapsed_times)
memory = Memory(list(new_memories), list(new_elapsed_times))
return logits, memory, aux_loss
| learning-to-expire-pytorch-main | learning_to_expire_pytorch/learning_to_expire_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'simple-hierarchical-transformer',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Simple Hierarchical Transformer',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/simple-hierarchical-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'hierarchical'
],
install_requires=[
'accelerate',
'einops>=0.4',
'local-attention',
'torch>=1.6',
'vector-quantize-pytorch>=1.1.5'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| simple-hierarchical-transformer-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from simple_hierarchical_transformer import HierarchicalTransformer
from accelerate import Accelerator
# hf accelerator
accelerator = Accelerator()
device = accelerator.device
acc_print = accelerator.print
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 2
GRADIENT_ACCUMULATE_EVERY = 8
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
SEQ_LEN = 2048
GENERATE_LENGTH = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate transformer
model = HierarchicalTransformer(
num_tokens = 256,
dim = 1024,
depth = 8,
seq_len = SEQ_LEN,
hierarchies = (1, 2),
window_sizes = (32, 64),
use_flash_attn = True
).to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
model, optim, train_loader, val_loader = accelerator.prepare(
model, optim, train_loader, val_loader
)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10.0, desc = "training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss, (ce_loss, recon_loss, prophet_loss) = model(next(train_loader), return_loss = True)
accelerator.backward(loss / GRADIENT_ACCUMULATE_EVERY)
acc_print(f"training loss: {ce_loss.item()}")
accelerator.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
_, (ce_loss, *_) = model(next(val_loader), return_loss = True)
acc_print(f"validation loss: {ce_loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
acc_print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
acc_print(output_str, "\n")
| simple-hierarchical-transformer-main | train.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
use_flash_attn = False
):
super().__init__()
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash_attn = use_flash_attn
assert not (use_flash_attn and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash_attn:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash_attn:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
| simple-hierarchical-transformer-main | simple_hierarchical_transformer/attention.py |
import math
from functools import partial
from itertools import zip_longest
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from simple_hierarchical_transformer.attention import Attend
from typing import Tuple
from local_attention import LocalMHA
from vector_quantize_pytorch import RandomProjectionQuantizer
# constants
mlist = nn.ModuleList
Linear = partial(nn.Linear, bias = False)
LocalMHA = partial(LocalMHA, causal = True, prenorm = True)
# helper functions
def exists(val):
return val is not None
def is_power_of_two(n):
return math.log2(n).is_integer()
def all_unique(arr):
return len(set(arr)) == len(arr)
def apply_fns(fns, tensors):
return [fn(tensor) for fn, tensor in zip(fns, tensors)]
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def default(*vals):
for val in vals:
if exists(val):
return val
return None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def log(t, eps = 1e-20):
return t.clamp(min = eps).log()
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, -torch.finfo(logits.dtype).max)
probs.scatter_(1, ind, val)
return probs
# rotary positional embedding w/ xpos
# https://arxiv.org/abs/2104.09864
# https://arxiv.org/abs/2212.10554v1
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
scale_base = 512,
use_xpos = True
):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.use_xpos = use_xpos
self.scale_base = scale_base
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale)
@property
def device(self):
return next(self.buffers()).device
def forward(self, seq_len):
device = self.device
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not self.use_xpos:
return freqs, torch.ones(1, device = device)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t, scale = 1.):
seq_len = t.shape[-2]
pos = pos[..., -seq_len:, :]
if not isinstance(scale, (int, float)):
scale = scale[..., -seq_len:, :]
return (t * pos.cos() * scale) + (rotate_half(t) * pos.sin() * scale)
def apply_rotary_pos_emb_qk(rotary_emb, q, k):
freqs, scale = rotary_emb
q = apply_rotary_pos_emb(freqs, q, scale)
k = apply_rotary_pos_emb(freqs, k, scale ** -1)
return q, k
# token shift, from Peng et al of RWKV
def token_shift(t):
t, t_shift = t.chunk(2, dim = -1)
t_shift = F.pad(t_shift, (0, 0, 1, -1))
return torch.cat((t, t_shift), dim = -1)
# hierarchy related classes
def pad_seq_to_multiple(t, mult):
seq_len = t.shape[-2]
next_seq_len_mult = math.ceil(seq_len / mult) * mult
remainder = next_seq_len_mult - seq_len
if remainder == 0:
return t, seq_len
t = F.pad(t, (0, 0, 0, remainder), value = 0.)
return t, seq_len
def curtail_seq_to_multiple(t, mult):
seq_len = t.shape[-2]
prev_seq_len_mult = (seq_len // mult) * mult
remainder = seq_len - prev_seq_len_mult
if remainder == 0:
return t
t = t[..., :prev_seq_len_mult, :]
return t
def hierarchical_cat(tokens, strides: Tuple[int, ...]):
assert len(tokens) == len(strides)
if all([s == 1 for s in strides]):
return torch.cat(tokens, dim = -1)
tokens = [repeat(t, 'b n d -> b (n s) d', s = s) for t, s in zip(tokens, strides)]
min_seq_len = min([t.shape[-2] for t in tokens])
tokens = [t[..., :min_seq_len, :] for t in tokens]
return torch.cat(tokens, dim = -1)
class CausalConv(nn.Module):
def __init__(
self,
dim_in,
dim_out,
kernel_size,
stride = 1
):
super().__init__()
self.causal_padding = kernel_size - 1
self.conv = nn.Conv1d(dim_in, dim_out, kernel_size, stride = stride)
def forward(self, x):
x = F.pad(x, (self.causal_padding, 0))
return self.conv(x)
class Compress(nn.Module):
def __init__(
self,
*,
dim,
dim_out,
num_tokens = None,
stride = 1,
compress_factor = 1,
expansion_factor = 4,
dim_head = 64,
heads = 8,
ignore_index = 0,
should_recon = False,
should_prophet = False,
prophet_num_predictions = None
):
super().__init__()
assert compress_factor > 0 and is_power_of_two(compress_factor)
self.stride = stride
self.no_compress = compress_factor == 1
self.compress_factor = compress_factor
self.should_recon = should_recon
self.should_prophet = should_prophet
if self.no_compress:
self.compress_fn = Linear(dim, dim_out) if dim != dim_out else nn.Identity()
return
dim_inner = int(dim * expansion_factor)
self.compress_fn = nn.Sequential(
Rearrange('b n d -> b d n'),
CausalConv(dim, dim_inner, compress_factor, stride = stride),
nn.SiLU(),
nn.Conv1d(dim_inner, dim_out, 1),
Rearrange('b d n -> b n d')
)
if should_recon:
assert exists(num_tokens)
self.to_recon = Linear(dim_out, compress_factor * num_tokens)
if should_prophet:
assert exists(prophet_num_predictions)
self.to_prophet = Linear(dim_out, prophet_num_predictions)
self.ignore_index = ignore_index
def prophet(self, h, ids):
if not self.should_prophet:
return torch.zeros((), device = h.device).requires_grad_()
c = self.compress_factor
seq_len = ids.shape[-1]
prophet_logits = self.to_prophet(h)
prophet_logits = rearrange(prophet_logits, 'b n (c d) -> (b c) d n', c = c)
prophet_ids = F.pad(ids, (-1, c), value = self.ignore_index)
prophet_ids = tuple(prophet_ids[:, i:(seq_len + i)] for i in range(c))
prophet_ids = torch.stack(prophet_ids, dim = 1)
prophet_ids = rearrange(prophet_ids, 'b c n -> (b c) n')
if self.stride > 1:
prophet_ids = prophet_ids[..., ::self.stride]
prophet_loss = F.cross_entropy(prophet_logits, prophet_ids, ignore_index = self.ignore_index)
return prophet_loss
def recon(self, h, ids):
assert self.should_recon
if self.no_compress:
return torch.zeros((), device = h.device).requires_grad_()
c = self.compress_factor
seq_len = ids.shape[-1]
recon_logits = self.to_recon(h)
recon_logits = rearrange(recon_logits, 'b n (c d) -> (b c) d n', c = c)
recon_ids = F.pad(ids, (c - 1, 0), value = self.ignore_index)
recon_ids = tuple(recon_ids[:, i:(seq_len + i)] for i in range(c))
recon_ids = torch.stack(recon_ids, dim = 1)
recon_ids = rearrange(recon_ids, 'b c n -> (b c) n')
if self.stride > 1:
recon_ids = recon_ids[..., ::self.stride]
recon_loss = F.cross_entropy(recon_logits, recon_ids, ignore_index = self.ignore_index)
return recon_loss
def forward(self, x):
return self.compress_fn(x)
class HierarchicalMerge(nn.Module):
def __init__(
self,
dims: Tuple[int, ...],
dim_out,
h_strides = 1
):
super().__init__()
dim = sum(dims)
strides = cast_tuple(h_strides, len(dims))
assert len(strides) == len(dims)
self.strides = strides
self.net = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_out * 2),
nn.SiLU(),
nn.Linear(dim_out * 2, dim_out)
)
def forward(self, tokens):
x = hierarchical_cat(tokens, self.strides)
return self.net(x)
# classes
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
dim_inner = int(dim * mult)
self.net = nn.Sequential(
RMSNorm(dim),
Linear(dim, dim_inner),
nn.GELU(),
Linear(dim_inner, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
use_flash_attn = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
dim_inner = dim_head * heads
self.norm = RMSNorm(dim)
self.rotary_emb = RotaryEmbedding(dim_head)
self.attend = Attend(causal = True, use_flash_attn = use_flash_attn)
self.to_qkv = Linear(dim, dim_inner * 3)
self.to_out = Linear(dim_inner, dim)
def forward(self, x):
n = x.shape[-2]
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
rotary_emb = self.rotary_emb(n)
q, k = apply_rotary_pos_emb_qk(rotary_emb, q, k)
out = self.attend(q, k, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class HierarchicalBlock(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
window_size = None,
compress_factor = 1,
stride = 1,
ff_mult = 4
):
super().__init__()
self.stride = stride
assert is_power_of_two(compress_factor)
self.compress_factor = compress_factor
self.no_compress = compress_factor == 1
assert not exists(window_size) or window_size >= 0
self.has_attn = window_size != 0
self.attn = None
if self.has_attn:
attn_klass = Attention
if exists(window_size):
attn_klass = partial(LocalMHA, window_size = window_size)
self.attn = attn_klass(dim = dim, dim_head = dim_head, heads = heads)
self.ff = FeedForward(dim = dim, mult = ff_mult)
def forward(self, x):
c = self.compress_factor
axial_dim = c // self.stride
x, orig_seq_len = pad_seq_to_multiple(x, axial_dim)
# hierarchical attention is performed with a simple axial attention
# this, and using a convolution for compressing at the beginning
# is one of the improvements on top of hourglass transformer
# the downside is that the savings are only O(c) instead of O(c ** 2) as in hourglass transformer
# you can get the O(c ** 2) saving by setting the hierarchical stride == c, but you'll see that performance is much worse, as some tokens will have a c - 1 token gap to the last hierarchical token
if not self.no_compress:
x = rearrange(x, 'b (n c) d -> (b c) n d', c = axial_dim)
if exists(self.attn):
x = self.attn(token_shift(x)) + x
x = self.ff(token_shift(x)) + x
if not self.no_compress:
x = rearrange(x, '(b c) n d -> b (n c) d', c = axial_dim)
return x[:, :orig_seq_len]
class HierarchicalTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
seq_len = 2048,
dim_head = 64,
heads = 8,
ff_mult = 4,
hierarchies = 1,
window_sizes = None,
hierarchical_stride = 1,
hierarchy_merge_all = False, # whether to pass the pooled hierarchical information back to all hierarchies or just one doing the prediction
ignore_index = 0,
use_flash_attn = False,
recon_loss_weight = 0.1,
prophet_loss_weight = 0.,
prophet_loss_use_quantized = False, # for prophet, whether to use the next 1x token ids, or use the ids from random projection quantization
prophet_quantized_use_embed = False,
predict_hierarchy = None,
predict_use_all_hierarchy = False,
rq_num_codebooks = 4,
rq_codebook_dim = 256,
rq_codebook_size = 1024,
):
super().__init__()
self.seq_len = seq_len
hierarchies = cast_tuple(hierarchies)
assert all_unique(hierarchies), 'hierarchies compression factors must be all unique integers'
assert all([*map(is_power_of_two, hierarchies)]), 'only powers of two allowed for hierarchies'
self.hierarchies = hierarchies
# just use a simple tuple list per hyperparameter to customize each hierarchy
num_hierarchies = len(hierarchies)
dims = cast_tuple(dim, num_hierarchies)
assert len(dims) == num_hierarchies
window_sizes = cast_tuple(window_sizes, num_hierarchies)
assert len(window_sizes) == num_hierarchies
dim_head = cast_tuple(dim_head, num_hierarchies)
assert len(dim_head) == num_hierarchies
heads = cast_tuple(heads, num_hierarchies)
assert len(heads) == num_hierarchies
ff_mult = cast_tuple(ff_mult, num_hierarchies)
assert len(ff_mult) == num_hierarchies
hierarchical_stride = cast_tuple(hierarchical_stride, num_hierarchies)
assert all([*map(is_power_of_two, hierarchical_stride)]), 'all hierarchical strides must be power of two'
assert all([s <= h for s, h in zip(hierarchical_stride, hierarchies)]), 'all strides must be less than the compression factor of the hierarchy'
self.h_strides = hierarchical_stride
assert len(hierarchical_stride) == num_hierarchies
# this determines to which hierarchy is everything pooled into for final prediction
# however, final next token prediction can also use all hierarchies with `predict_use_all_hierarchy`
predict_hierarchy = default(predict_hierarchy, min(hierarchies))
self.predict_hierarchy_index = hierarchies.index(predict_hierarchy)
hierarchy_predict_dim = dims[self.predict_hierarchy_index]
self.hierarchy_merge_all = hierarchy_merge_all
assert hierarchy_merge_all or self.h_strides[self.predict_hierarchy_index] == 1, 'the hierarchy level being used for final next token prediction must have compression stride of 1'
# training related loss weights
self.recon_loss_weight = recon_loss_weight
self.prophet_loss_weight = prophet_loss_weight
should_recon = recon_loss_weight > 0
should_prophet = prophet_loss_weight > 0
self.should_recon = should_recon
self.should_prophet = should_prophet
self.prophet_loss_use_quantized = prophet_loss_use_quantized
self.prophet_quantized_use_embed = prophet_quantized_use_embed
# token embedding
dim_token_emb = max(dims)
self.token_emb = nn.Embedding(num_tokens, dim_token_emb)
# hierarchy compressions - 1x just uses the base token_emb weights
self.compressors = mlist([])
for dim, hierarchy, stride in zip(dims, hierarchies, hierarchical_stride):
self.compressors.append(Compress(
dim = dim_token_emb,
dim_out = dim,
num_tokens = num_tokens,
compress_factor = hierarchy,
stride = stride,
should_recon = should_recon,
should_prophet = should_prophet,
prophet_num_predictions = ((hierarchy * num_tokens) if not prophet_loss_use_quantized else (rq_num_codebooks * rq_codebook_size))
))
# post token embedding norms
self.post_token_emb_norms = mlist([nn.LayerNorm(dim) for dim in dims])
# layers
self.layers = mlist([])
self.dims = dims
self.hierarchical_merges = mlist([])
self.need_hierarchical_merge = num_hierarchies > 1
for _ in range(depth):
hierarchical_layer = mlist([])
# add a transformer block for each layer in the hierarchy
for hierarchy, h_stride, h_dim, h_window_size, h_dim_head, h_heads, h_ff_mult in zip(hierarchies, hierarchical_stride, dims, window_sizes, dim_head, heads, ff_mult):
# make sure the window size never exceeds the effective sequence length
effective_seq_len = seq_len // hierarchy
if exists(h_window_size) and h_window_size > effective_seq_len:
print(f'window size for hierarchy {hierarchy}x is greater than effective sequence length - setting window size to None (which would use normal full attention)')
h_window_size = None
# add attention and feedforward
hierarchical_layer.append(
HierarchicalBlock(
dim = h_dim,
dim_head = h_dim_head,
heads = h_heads,
window_size = h_window_size,
compress_factor = hierarchy,
stride = h_stride,
ff_mult = h_ff_mult
)
)
self.layers.append(hierarchical_layer)
# for merging the information across hierarchies
# for now, only one direction, from all hierarchies to the hierarchy that is being used to make predictions on, set by predict_hierarchy_index above
if not self.need_hierarchical_merge:
continue
merge = HierarchicalMerge(
dims = dims,
dim_out = hierarchy_predict_dim if not self.hierarchy_merge_all else sum(dims),
h_strides = hierarchical_stride
)
self.hierarchical_merges.append(merge)
# final post-transformer norms, for all hierarchies
self.norms = mlist([nn.LayerNorm(dim) for dim in dims])
# random projection quantizer, for another approach to hierarchical predictive coding
if self.prophet_loss_use_quantized:
rpq_klass = partial(
RandomProjectionQuantizer,
num_codebooks = rq_num_codebooks,
codebook_dim = rq_codebook_dim,
codebook_size = rq_codebook_size
)
self.rand_proj_quantizers = mlist([rpq_klass(dim = dim) for dim in dims])
self.rq_num_codebooks = rq_num_codebooks
# to logit, for hierarchy set at predict_hierarchy_index, or all hierarchies
self.predict_use_all_hierarchy = predict_use_all_hierarchy
logit_dim_in = sum(dims) if predict_use_all_hierarchy else hierarchy_predict_dim
self.to_logits = Linear(logit_dim_in, num_tokens)
# training related loss parameters
self.ignore_index = ignore_index
@torch.no_grad()
@eval_decorator
def generate(
self,
prompt,
seq_len,
temperature = 1.0,
filter_thres = 0.9,
**kwargs
):
b, t, device = *prompt.shape, prompt.device
out = prompt
for _ in range(seq_len):
logits = self.forward(out[:, -self.seq_len:], **kwargs)[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature)
sample = rearrange(sample, 'b -> b 1')
out = torch.cat((out, sample), dim = -1)
return out[:, t:]
@property
def device(self):
return next(self.parameters()).device
def forward(
self,
ids,
return_loss = False,
return_hierarchical_token_embeds = False,
return_hierarchical_embeds = False,
ablate_hierarchical_merge = False,
return_random_proj_quantize_ids = False
):
"""
einops notation:
b - batch
n - sequence length
c - compression factor
d - dimension
"""
# if training, predict next token in sequence
if return_loss:
ids, labels = ids[:, :-1], ids[:, 1:]
# assert seq len
assert ids.shape[-1] <= self.seq_len
# get token embeddings, and pad to multiple of compression factor
x = self.token_emb(ids)
# for every hierarchy, compress token embeddings appropriately to the hierarchical embeddings
tokens = []
for compress in self.compressors:
tokens.append(compress(x))
# save hierarchical tokens right before norm for random projection quantization, if needed
post_compressed_tokens = tokens
# post embedding norms
tokens = apply_fns(self.post_token_emb_norms, tokens)
# if one wants all the compressed token embeds
# just to investigate the space
if return_hierarchical_token_embeds:
return tokens
# layers
for layer, merge in zip_longest(self.layers, self.hierarchical_merges):
tokens = apply_fns(layer, tokens)
# pool the information all hierarchies
# and then update the tokens that will be used to make the final autoregressive prediction
if not self.need_hierarchical_merge or ablate_hierarchical_merge:
continue
pooled = merge(tokens)
if self.hierarchy_merge_all:
tokens = [(t + p[..., ::s, :]) for t, p, s in zip(tokens, pooled.split(self.dims, dim = -1), self.h_strides)]
else:
predict_tokens = tokens[self.predict_hierarchy_index]
predict_tokens = predict_tokens + pooled
tokens[self.predict_hierarchy_index] = predict_tokens
# final normalized embeddings
embeds = apply_fns(self.norms, tokens)
# if the researcher wants the randomly projected ids of either compressed tokens or embeddings of the hierarchies
if return_random_proj_quantize_ids:
assert self.prophet_loss_use_quantized
quantize_input = embeds if self.prophet_quantized_use_embed else post_compressed_tokens
hierarchical_ids = apply_fns(self.rand_proj_quantizers, quantize_input)
return hierarchical_ids
# if one wants all the normalized hierarchical embeds
if return_hierarchical_embeds:
return embeds
# select the hierarchical embeddings that will be doing the predicting
if self.predict_use_all_hierarchy:
predict_embed = hierarchical_cat(embeds, self.h_strides)
else:
predict_embed = embeds[self.predict_hierarchy_index]
# logits for predicting next token
logits = self.to_logits(predict_embed)
if not return_loss:
return logits
ce_loss_fn = partial(F.cross_entropy, ignore_index = self.ignore_index)
# autoregressive loss (predictive coding)
logits = rearrange(logits, 'b n c -> b c n')
ce_loss = ce_loss_fn(logits, labels)
# reconstruction losses for hierarchy tokens
recon_losses = prophet_losses = torch.zeros((), device = self.device).requires_grad_()
if self.should_recon:
for compress, t in zip(self.compressors, embeds):
recon_loss = compress.recon(t, ids)
recon_losses = recon_losses + recon_loss
# prophet losses for hierarchy tokens
if self.should_prophet:
if self.prophet_loss_use_quantized:
# using random projected quantizer of the next hierarchical token
quantize_input = embeds if self.prophet_quantized_use_embed else post_compressed_tokens
hierarchical_ids = apply_fns(self.rand_proj_quantizers, quantize_input)
for hierarchy, stride, compress, embed, pred_ids in zip(self.hierarchies, self.h_strides, self.compressors, embeds, hierarchical_ids):
if hierarchy == 1:
continue
prophet_logits = compress.to_prophet(embed)
axial_dim = hierarchy // stride
prophet_logits = curtail_seq_to_multiple(prophet_logits, axial_dim)
pred_ids = curtail_seq_to_multiple(pred_ids, axial_dim)
prophet_logits, pred_ids = map(lambda t: rearrange(t, 'b (n c) ... -> (b c) n ...', c = axial_dim), (prophet_logits, pred_ids))
prophet_logits = rearrange(prophet_logits[:, :-1], 'b n (q c) -> (b q) c n', q = self.rq_num_codebooks)
pred_ids = rearrange(pred_ids[:, 1:], 'b n q -> (b q) n')
prophet_loss = ce_loss_fn(prophet_logits, pred_ids)
prophet_losses = prophet_losses + prophet_loss
else:
# or predicting the next N 1x base token ids
# like prophetnet paper
for compress, t in zip(self.compressors, embeds):
prophet_loss = compress.prophet(t, ids)
prophet_losses = prophet_losses + prophet_loss
# total loss
total_loss = ce_loss + recon_losses * self.recon_loss_weight + prophet_losses * self.prophet_loss_weight
return total_loss, (ce_loss, recon_losses, prophet_losses)
| simple-hierarchical-transformer-main | simple_hierarchical_transformer/simple_hierarchical_transformer.py |
from simple_hierarchical_transformer.simple_hierarchical_transformer import HierarchicalTransformer
| simple-hierarchical-transformer-main | simple_hierarchical_transformer/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'flamingo-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Flamingo - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/flamingo-pytorch',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'visual question answering'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| flamingo-pytorch-main | setup.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops_exts import rearrange_many, repeat_many
def exists(val):
return val is not None
def FeedForward(dim, mult = 4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias = False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias = False)
)
class PerceiverAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm_media = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, latents):
"""
einstein notation
b - batch
t - time
n - sequence
d - dimension
"""
x = self.norm_media(x)
latents = self.norm_latents(latents)
b, m, h = *x.shape[:2], self.heads
q = self.to_q(latents)
# the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to
kv_input = torch.cat((x, latents), dim = -2)
k, v = self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = rearrange_many((q, k, v), 'b t n (h d) -> b h t n d', h = h)
q = q * self.scale
# attention
sim = einsum('... i d, ... j d -> ... i j', q, k)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
out = einsum('... i j, ... j d -> ... i d', attn, v)
out = rearrange(out, 'b h t n d -> b t n (h d)', h = h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 4,
ff_mult = 4
):
super().__init__()
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.media_pos_emb = nn.Parameter(torch.randn(num_media_embeds, 1, dim))
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = nn.LayerNorm(dim)
def forward(self, x):
if x.ndim == 3:
x = rearrange(x, 'b n d -> b 1 n d')
times = x.shape[1]
x = x + self.media_pos_emb[:times]
latents = repeat(self.latents, 'n d -> b m n d', b = x.shape[0], m = x.shape[1])
for attn, ff in self.layers:
latents = attn(x, latents) + latents
latents = ff(latents) + latents
return self.norm(latents)
# gated cross attention
class MaskedCrossAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
only_attend_immediate_media = True
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# whether for text to only attend to immediate preceding image, or all images
self.only_attend_immediate_media = only_attend_immediate_media
def forward(
self,
x,
media,
media_locations = None
):
b, t, m = media.shape[:3]
h = self.heads
x = self.norm(x)
q = self.to_q(x)
media = rearrange(media, 'b t n d -> b (t n) d')
k, v = self.to_kv(media).chunk(2, dim = -1)
q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)
q = q * self.scale
sim = einsum('... i d, ... j d -> ... i j', q, k)
if exists(media_locations):
text_time = media_locations.cumsum(dim = -1) # at each boolean of True, increment the time counter (relative to media time)
media_time = torch.arange(t, device = x.device) + 1
# text time must equal media time if only attending to most immediate image
# otherwise, as long as text time is greater than media time (if attending to all previous images / media)
mask_op = torch.eq if self.only_attend_immediate_media else torch.ge
text_to_media_mask = mask_op(rearrange(text_time, 'b i -> b 1 i 1'), repeat(media_time, 'j -> 1 1 1 (j m)', m = m))
sim = sim.masked_fill(~text_to_media_mask, -torch.finfo(sim.dtype).max)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
if exists(media_locations) and self.only_attend_immediate_media:
# any text without a preceding media needs to have attention zeroed out
text_without_media_mask = text_time == 0
text_without_media_mask = rearrange(text_without_media_mask, 'b i -> b 1 i 1')
attn = attn.masked_fill(text_without_media_mask, 0.)
out = einsum('... i j, ... j d -> ... i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class GatedCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
ff_mult = 4,
only_attend_immediate_media = True
):
super().__init__()
self.attn = MaskedCrossAttention(dim = dim, dim_head = dim_head, heads = heads, only_attend_immediate_media = only_attend_immediate_media)
self.attn_gate = nn.Parameter(torch.tensor([0.]))
self.ff = FeedForward(dim, mult = ff_mult)
self.ff_gate = nn.Parameter(torch.tensor([0.]))
def forward(
self,
x,
media, # media tensor, encoded by perceiver resample - (batch, time, latents, dim)
media_locations = None # boolean tensor indicating positions of media - (batch, sequence)
):
x = self.attn(x, media, media_locations = media_locations) * self.attn_gate.tanh() + x
x = self.ff(x) * self.ff_gate.tanh() + x
return x
| flamingo-pytorch-main | flamingo_pytorch/flamingo_pytorch.py |
from flamingo_pytorch.flamingo_pytorch import PerceiverResampler, GatedCrossAttentionBlock
from flamingo_pytorch.flamingo_palm import FlamingoPaLM
| flamingo-pytorch-main | flamingo_pytorch/__init__.py |
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from torch import einsum, nn
from flamingo_pytorch.flamingo_pytorch import GatedCrossAttentionBlock, PerceiverResampler
# helper functions
def exists(val):
return val is not None
# for controlling freezing during training of flamingo
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
def freeze_model_and_make_eval_(model):
model.eval()
freeze_all_layers_(model)
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# transformer
class FlamingoPaLM(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
dim_head=64,
heads=8,
ff_mult=4,
media_token_id=3,
cross_attn_every=3,
img_encoder=None,
perceiver_num_latents=64,
perceiver_depth=2,
max_video_frames = None,
only_attend_immediate_media=True
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.media_token_id = media_token_id # you need to reserve a special token id for media
self.video_frame_pos_emb = nn.Parameter(torch.randn(max_video_frames, dim)) if exists(max_video_frames) else None
self.img_encoder = img_encoder
freeze_model_and_make_eval_(self.img_encoder)
self.perceiver_resampler = PerceiverResampler(
dim=dim,
depth=perceiver_depth,
dim_head=dim_head,
heads=heads,
num_latents=perceiver_num_latents
)
self.layers = nn.ModuleList([])
for ind in range(depth):
self.layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
GatedCrossAttentionBlock(dim=dim, dim_head=dim_head, heads=heads, only_attend_immediate_media=only_attend_immediate_media) if not (ind % cross_attn_every) else None
]))
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
def forward(
self,
text,
*,
images=None,
videos=None,
embeds=None
):
batch, device = text.shape[0], text.device
flamingo_mode = any([exists(t) for t in (images, videos, embeds)])
# automatically take care of freezing or unfreezing depending on what is passed in
if flamingo_mode:
# in flamingo mode, freeze everything but perceiver and gated cross attention
freeze_all_layers_(self)
unfreeze_all_layers_(self.perceiver_resampler)
[unfreeze_all_layers_(cross_attn) for _, cross_attn in self.layers if exists(cross_attn)]
else:
unfreeze_all_layers_(self)
# derive the media token ids (as a boolean tensor), for calculating the masked cross attention
if flamingo_mode:
media_locations = text == self.media_token_id
text_tokens = self.token_emb(text)
assert not (exists(embeds) and (exists(images) or exists(video)))
# encode videos or images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image embeddings
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
images = rearrange(images, 'b t ... -> (b t) ...')
with torch.no_grad():
embeds = self.img_encoder(images)
embeds = rearrange(embeds, '(b t) ... -> b t ...', b = batch)
if exists(videos):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic video encoding'
batch, media, num_times, *_ = videos.shape
videos = rearrange(videos, '... c h w -> (...) c h w')
with torch.no_grad():
embeds = self.img_encoder(videos)
embeds = rearrange(embeds, '(b m t) ... -> b m t ...', b = batch, m = media, t = num_times)
video_time_pos_emb = repeat(self.video_frame_pos_emb[:num_times], 't d -> b m t n d', b = batch, m = media, n = embeds.shape[-2])
embeds = embeds + video_time_pos_emb
embeds = rearrange(embeds, 'b m t n d -> b m (t n) d')
if exists(embeds):
embeds = self.perceiver_resampler(embeds)
# go through layers
for attn_ff, flamingo_cross_attn in self.layers:
text_tokens = attn_ff(text_tokens)
# if image embeds exist and flamingo cross attention set for the layer
# do the cross attention
if exists(flamingo_cross_attn) and exists(embeds):
text_tokens = flamingo_cross_attn(
text_tokens,
embeds,
media_locations = media_locations
)
return self.to_logits(text_tokens)
| flamingo-pytorch-main | flamingo_pytorch/flamingo_palm.py |
from setuptools import setup, find_packages
setup(
name = 'cross-transformers-pytorch',
packages = find_packages(),
version = '0.0.2',
license='MIT',
description = 'Cross Transformers - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/cross-transformers-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'cross attention',
'few shot learning'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| cross-transformers-pytorch-main | setup.py |
from cross_transformers_pytorch.cross_transformers_pytorch import CrossTransformer
| cross-transformers-pytorch-main | cross_transformers_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
class CrossTransformer(nn.Module):
def __init__(
self,
dim = 512,
dim_key = 128,
dim_value = 128
):
super().__init__()
self.scale = dim_key ** -0.5
self.to_qk = nn.Conv2d(dim, dim_key, 1, bias = False)
self.to_v = nn.Conv2d(dim, dim_value, 1, bias = False)
def forward(self, model, img_query, img_supports):
"""
dimensions names:
b - batch
k - num classes
n - num images in a support class
c - channels
h, i - height
w, j - width
"""
b, k, *_ = img_supports.shape
query_repr = model(img_query)
*_, h, w = query_repr.shape
img_supports = rearrange(img_supports, 'b k n c h w -> (b k n) c h w', b = b)
supports_repr = model(img_supports)
query_q, query_v = self.to_qk(query_repr), self.to_v(query_repr)
supports_k, supports_v = self.to_qk(supports_repr), self.to_v(supports_repr)
supports_k, supports_v = map(lambda t: rearrange(t, '(b k n) c h w -> b k n c h w', b = b, k = k), (supports_k, supports_v))
sim = einsum('b c h w, b k n c i j -> b k h w n i j', query_q, supports_k) * self.scale
sim = rearrange(sim, 'b k h w n i j -> b k h w (n i j)')
attn = sim.softmax(dim = -1)
attn = rearrange(attn, 'b k h w (n i j) -> b k h w n i j', i = h, j = w)
out = einsum('b k h w n i j, b k n c i j -> b k c h w', attn, supports_v)
out = rearrange(out, 'b k c h w -> b k (c h w)')
query_v = rearrange(query_v, 'b c h w -> b () (c h w)')
euclidean_dist = ((query_v - out) ** 2).sum(dim = -1) / (h * w)
return -euclidean_dist
| cross-transformers-pytorch-main | cross_transformers_pytorch/cross_transformers_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'spear-tts-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.1',
license='MIT',
description = 'Spear-TTS - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/spear-tts-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'text-to-speech'
],
install_requires=[
'audiolm-pytorch>=1.2.8',
'beartype',
'einops>=0.6.1',
'rotary-embedding-torch>=0.3.0',
'torch>=1.6',
'tqdm',
'x-clip>=0.12.2'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| spear-tts-pytorch-main | setup.py |
from spear_tts_pytorch.spear_tts_pytorch import (
TextToSemantic,
SpeechSpeechPretrainWrapper,
SemanticToTextWrapper,
TextToSemanticWrapper,
SemanticToTextDatasetGenerator
)
from spear_tts_pytorch.trainer import (
SpeechSpeechPretrainer,
SemanticToTextTrainer,
TextToSemanticTrainer
)
from spear_tts_pytorch.data import (
GeneratedAudioTextDataset,
MockDataset
) | spear-tts-pytorch-main | spear_tts_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, i, j, device):
n = max(i, j)
if exists(self.mask) and self.mask.shape[-1] >= n:
mask = self.mask[:n, :n]
else:
mask = torch.ones((n, n), device = device, dtype = torch.bool).triu(1)
self.register_buffer("mask", mask, persistent = False)
return mask[-i:, :]
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, causal, is_cuda, device = *q.shape, k.shape[-2], self.causal, q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# if q and k lengths differ (caching of key/values), and causal, manually construct causal attn mask as float, as not supported (flash attn 2 will support this eventually)
if causal and q_len != k_len:
causal_mask = self.get_mask(q_len, k_len, device = device)
if exists(mask):
mask = mask & ~causal_mask
else:
mask = ~causal_mask
causal = False
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = self.get_mask(i, j, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
| spear-tts-pytorch-main | spear_tts_pytorch/attend.py |
import math
from pathlib import Path
from functools import partial
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch import Tensor, nn, einsum, FloatTensor, IntTensor, LongTensor
from torch.nn import Module, ModuleList
from torch.utils.data import Dataset
from einops import rearrange, repeat, pack
from audiolm_pytorch import FairseqVQWav2Vec, HubertWithKmeans
from audiolm_pytorch.data import get_dataloader
from rotary_embedding_torch import RotaryEmbedding
from beartype import beartype
from beartype.door import is_bearable
from beartype.typing import Optional, Union, Callable, Literal, Tuple, List
from x_clip.tokenizer import tokenizer
from spear_tts_pytorch.attend import Attend
from tqdm import tqdm
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def empty(t: Tensor):
return t.numel() == 0
def set_eos_id(t: Tensor, eos_id: int, pad_id: int):
eos_indices = ((t == pad_id).cumsum(dim = -1) == 0).sum(dim = -1, keepdim = True).long()
batch_range = torch.arange(t.shape[0], device = t.device, dtype = torch.long)
batch_range = rearrange(batch_range, '... -> ... 1')
t = F.pad(t, (0, 1), value = pad_id)
t[batch_range, eos_indices] = eos_id
return t
def batch_unique_consecutive(t, pad_value = 0.):
unique_arr = [torch.unique_consecutive(el) for el in t.unbind(dim = 0)]
return pad_sequence(unique_arr, batch_first = True, padding_value = pad_value)
def mask_after_eos(target, eos_id, pad_id):
mask = (target == eos_id).cumsum(dim = -1) > 0
mask = F.pad(mask, (1, -1), value = False)
return target.masked_fill(mask, pad_id)
# freezing and unfreezing helpers
def set_requires_grad_(module: Module, requires_grad: bool):
for p in module.parameters():
p.requires_grad = requires_grad
def freeze(module: Module):
set_requires_grad_(module, False)
def unfreeze(module: Module):
set_requires_grad_(module, True)
# sampling helpers
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# rmsnorm
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, dropout = 0.):
dim_inner = int(dim * mult * 2 / 3)
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim_inner, dim)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
causal = False,
dim_context = None,
dropout = 0.,
rotary_emb: Optional[RotaryEmbedding] = None,
flash = False
):
super().__init__()
dim_context = default(dim_context, dim)
self.heads = heads
self.scale = dim_head ** -0.5
dim_inner = heads * dim_head
self.rotary_emb = rotary_emb
self.attend = Attend(
causal = causal,
flash = flash,
dropout = dropout
)
self.norm = RMSNorm(dim)
self.attn_dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, dim_inner, bias = False)
self.to_kv = nn.Linear(dim_context, dim_inner * 2, bias = False)
self.to_out = nn.Linear(dim_inner, dim, bias = False)
def forward(
self,
x,
context = None,
mask = None
):
has_context = exists(context)
h = self.heads
x = self.norm(x)
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
if exists(self.rotary_emb):
assert not has_context
q, k = self.rotary_emb.rotate_queries_with_cached_keys(q, k)
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
causal = False,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
cross_attend = False,
attn_flash = False
):
super().__init__()
rotary_emb = RotaryEmbedding(dim_head)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, causal = causal, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_emb = rotary_emb, flash = attn_flash),
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, flash = attn_flash) if cross_attend else None,
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.final_norm = RMSNorm(dim)
def forward(
self,
x,
mask = None,
context = None,
context_mask = None
):
has_context = exists(context)
for attn, maybe_cross_attn, ff in self.layers:
x = attn(x, mask = mask) + x
if exists(maybe_cross_attn):
assert has_context
x = maybe_cross_attn(x, context = context, mask = context_mask) + x
x = ff(x) + x
return self.final_norm(x)
# class
SpeechOrTextLiteral = Union[
Literal['speech'],
Literal['text']
]
SemanticModelType = Union[
FairseqVQWav2Vec,
HubertWithKmeans
]
class TextToSemantic(Module):
@beartype
def __init__(
self,
dim,
*,
num_text_token_ids,
source_depth,
target_depth,
tokenizer_encode: Optional[Callable] = None,
use_openai_tokenizer = False,
wav2vec: Optional[SemanticModelType] = None,
num_semantic_token_ids = None,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
semantic_pad_id = -1,
text_pad_id = 0,
autoset_semantic_eos_id = True,
autoset_text_eos_id = True,
attn_flash = False
):
super().__init__()
self.dim = dim
self.wav2vec = wav2vec
self.tokenizer_encode = tokenizer_encode
if use_openai_tokenizer:
assert not exists(tokenizer_encode)
self.tokenizer_encode = tokenizer.tokenize
num_semantic_token_ids = wav2vec.codebook_size if exists(wav2vec) else num_semantic_token_ids
assert exists(num_semantic_token_ids), 'you need to either pass in a wav2vec model from audiolm-pytorch, or specify the number of semantic token ids with num_semantic_token_ids'
self.num_semantic_token_ids = num_semantic_token_ids
self.num_text_token_ids = num_text_token_ids
# padding id, for deriving attention mask automatically if not passed in
self.semantic_pad_id = semantic_pad_id
self.text_pad_id = text_pad_id
self.pad_id = dict(
speech = semantic_pad_id,
text = text_pad_id
)
# eos id
self.autoset_eos_id = dict(
speech = autoset_semantic_eos_id,
text = autoset_text_eos_id
)
self.eos_id = dict(
speech = num_semantic_token_ids,
text = num_text_token_ids
)
# embedding
semantic_token_emb = nn.Embedding(num_semantic_token_ids + int(autoset_semantic_eos_id), dim)
text_token_emb = nn.Embedding(num_text_token_ids + int(autoset_text_eos_id), dim)
self.semantic_token_emb = semantic_token_emb
self.token_emb = nn.ModuleDict(dict(
speech = semantic_token_emb,
text = text_token_emb
))
# respective start tokens
self.start_token = nn.ParameterDict(dict(
speech = nn.Parameter(torch.randn(dim)),
text = nn.Parameter(torch.randn(dim))
))
# projection to logits
to_semantic_logit = nn.Linear(dim, num_semantic_token_ids, bias = False)
to_text_logit = nn.Linear(dim, num_text_token_ids, bias = False)
to_semantic_logit.weight = semantic_token_emb.weight
to_text_logit.weight = text_token_emb.weight
self.to_logits = nn.ModuleDict(dict(
speech = to_semantic_logit,
text = to_text_logit
))
# source and target attention layers
self.source_transformer = Transformer(
dim = dim,
dim_head = dim_head,
heads = heads,
depth = source_depth,
attn_dropout = attn_dropout,
ff_mult = ff_mult,
ff_dropout = ff_dropout,
causal = False,
attn_flash = attn_flash
)
self.target_transformer = Transformer(
dim = dim,
dim_head = dim_head,
heads = heads,
depth = target_depth,
attn_dropout = attn_dropout,
ff_mult = ff_mult,
ff_dropout = ff_dropout,
causal = True,
cross_attend = True,
attn_flash = attn_flash
)
@property
def device(self):
return next(self.parameters()).device
def load(self, path, strict = True):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
self.load_state_dict(pkg['model'], strict = strict)
return pkg
# a set of freezing / unfreezing utils
# then rely on get_optimizer to filter out the parameters that do not require grad from being exposed to optimizer
def unfreeze_all(self):
unfreeze(self)
def freeze_encoder(self):
freeze(self.source_transformer)
def freeze_encoder_below_layer(self, layer: int):
"""
for the final training of text-to-semantic on pseudo-labelled dataset
they freeze the encoder part way up to a certain layer
"""
unfreeze(self.source_transformer)
for ind, module in enumerate(self.source_transformer.layers):
current_layer = ind + 1
if current_layer <= layer:
freeze(module)
def freeze_decoder(self):
freeze(self.target_transformer)
def freeze_speech_emb(self):
freeze(self.token_emb['speech'])
self.start_token['speech'].requires_grad = False
def freeze_text_emb(self):
freeze(self.token_emb['text'])
self.start_token['text'].requires_grad = False
# sampling function
@torch.no_grad()
@eval_decorator
@beartype
def generate(
self,
source: Union[List[str], Tensor],
*,
source_type: SpeechOrTextLiteral,
target_type: SpeechOrTextLiteral,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
source_mask: Optional[Tensor] = None,
max_length = 2048,
beam_search_decode = False,
beam_size = 4,
return_source = False
):
if isinstance(source, (FloatTensor)) and source_type == 'speech':
assert exists(self.wav2vec), 'wav2vec should be passed in, if generating with source as raw soundwave'
source = self.wav2vec(source)
if is_bearable(source, List[str]):
assert exists(self.tokenizer_encode)
source = self.tokenizer_encode(source)
source = source.to(self.device)
batch = source.shape[0]
source_token_emb = self.token_emb[source_type]
source_pad_id = self.pad_id[source_type]
# all target modules and parameters
target_token_emb = self.token_emb[target_type]
target_start_token = self.start_token[target_type]
target_to_logit = self.to_logits[target_type]
target_pad_id = self.pad_id[target_type]
target_eos_id = self.eos_id[target_type]
# auto set eos id
if self.autoset_eos_id[source_type]:
source_eos_id = self.eos_id[source_type]
source = set_eos_id(source, source_eos_id, pad_id = source_pad_id)
# if source mask is not passed in
# automatically derive by the padding id of the modality
if not exists(source_mask) and source.dtype == torch.long:
source_mask = source != source_pad_id
# source embedding
source_emb = source_token_emb(source)
source_emb = self.source_transformer(source_emb, mask = source_mask)
# decode target
target = torch.empty((batch, 0), dtype = torch.long, device = self.device)
start_token = repeat(target_start_token, 'd -> b 1 d', b = batch)
# loop to decode
if not beam_search_decode:
for _ in tqdm(range(max_length)):
target_emb = target_token_emb(target)
target_emb = torch.cat((start_token, target_emb), dim = 1)
# target attention
target_emb = self.target_transformer(target_emb, context = source_emb, context_mask = source_mask)
# decoder logits
logits = target_to_logit(target_emb)
logits = logits[:, -1]
logits = filter_logits_fn(logits, thres = filter_thres)
sampled = gumbel_sample(logits, temperature = temperature)
target, _ = pack((target, sampled), 'b *')
if not self.autoset_eos_id[target_type]:
continue
is_eos = target == target_eos_id
all_eos = is_eos.any(dim = -1).all()
if not all_eos:
continue
target = mask_after_eos(target, target_eos_id, target_pad_id)
break
else:
beam = [(target, 0.0)]
batch_range = torch.arange(batch, device = self.device, dtype = torch.long)
batch_range = rearrange(batch_range, 'b -> b 1')
for _ in tqdm(range(max_length)):
all_candidates = []
for sentence, sentence_prob in beam:
target_emb = target_token_emb(sentence)
target_emb = torch.cat((start_token, target_emb), dim = 1)
# target attention
target_emb = self.target_transformer(target_emb, context = source_emb, context_mask = source_mask)
# decoder logits
logits = target_to_logit(target_emb)
logits = logits[:, -1]
log_probs = torch.log_softmax(logits / max(temperature, 1e-10), dim = -1)
topk_log_probs, topk_ids = log_probs.topk(beam_size, dim = -1)
for i in range(beam_size):
candidate = torch.cat([sentence, topk_ids[..., i:i + 1]], dim = -1)
candidate_prob = sentence_prob + topk_log_probs[..., i]
all_candidates.append((candidate, candidate_prob))
# concat into shape (beam, batch, seq), (beam, batch)
candidates, candidate_probs = map(partial(torch.stack, dim = 1), zip(*all_candidates))
# sort by candidate scores across beams
sorted_indices = candidate_probs.sort(dim = 1, descending = True).indices
sorted_candidates = candidates[batch_range, sorted_indices]
sorted_candidate_probs = candidate_probs[batch_range, sorted_indices]
# reconstitute ordered List[Tuple[Tensor, Tensor]]
ordered = list(zip(*map(partial(torch.unbind, dim = 1), (sorted_candidates, sorted_candidate_probs))))
beam = ordered[:beam_size]
# check if we've hit eos for all sequences
all_eos = all([((sentence == target_eos_id).any(dim = -1)).all() for sentence, _ in beam])
if all_eos:
break
target = beam[0][0]
if exists(target_eos_id):
target = mask_after_eos(target, target_eos_id, target_pad_id)
if not return_source:
return target
return source, target
@beartype
def forward(
self,
source: Union[List[str], Tensor],
target: Union[List[str], Tensor],
*,
source_type: SpeechOrTextLiteral,
target_type: SpeechOrTextLiteral,
source_mask: Optional[Tensor] = None,
target_mask: Optional[Tensor] = None,
return_loss = False,
return_logits = False
):
if isinstance(source, FloatTensor) and source_type == 'speech':
assert exists(self.wav2vec), 'wav2vec should be passed in, if generating with source as raw soundwave'
source = self.wav2vec(source)
if is_bearable(source, List[str]):
assert exists(self.tokenizer_encode)
source = self.tokenizer_encode(source)
source = source.to(self.device)
if is_bearable(target, List[str]):
assert exists(self.tokenizer_encode)
target = self.tokenizer_encode(target)
target = target.to(self.device)
assert source.shape[0] == target.shape[0]
batch = source.shape[0]
source_token_emb = self.token_emb[source_type]
source_pad_id = self.pad_id[source_type]
# all target modules and parameters
target_token_emb = self.token_emb[target_type]
target_start_token = self.start_token[target_type]
target_to_logit = self.to_logits[target_type]
target_pad_id = self.pad_id[target_type]
# auto set eos id
if self.autoset_eos_id[source_type]:
source_eos_id = self.eos_id[source_type]
source = set_eos_id(source, source_eos_id, pad_id = source_pad_id)
if self.autoset_eos_id[target_type] and return_loss:
target_eos_id = self.eos_id[target_type]
target = set_eos_id(target, target_eos_id, pad_id = target_pad_id)
# if source/target mask is not passed in
# automatically derive by the padding id of the modality
if not exists(source_mask) and source.dtype == torch.long:
source_mask = source != source_pad_id
if not exists(target_mask) and target.dtype == torch.long:
target_mask = target != target_pad_id
# attend to bos
target_mask = F.pad(target_mask, (1, 0), value = True)
# embedding
source_emb = source_token_emb(source)
target_emb = target_token_emb(target)
start_token = repeat(target_start_token, 'd -> b 1 d', b = batch)
target_emb = torch.cat((start_token, target_emb), dim = 1)
# source attention
source_emb = self.source_transformer(source_emb, source_mask)
# target attention
target_emb = self.target_transformer(target_emb, mask = target_mask, context = source_emb, context_mask = source_mask)
# decoder logits
logits = target_to_logit(target_emb)
if not return_loss:
return logits
assert not empty(target)
logits = rearrange(logits[:, :-1], 'b n c -> b c n')
loss = F.cross_entropy(
logits,
target,
ignore_index = target_pad_id
)
if return_logits:
return loss, logits
else:
return loss
# pretraining modules
def get_mask_subset_prob(mask, prob, min_mask = 0):
batch, seq, device = *mask.shape, mask.device
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
class SpeechSpeechPretrainWrapper(nn.Module):
@beartype
def __init__(
self,
model: TextToSemantic,
wav2vec: Optional[SemanticModelType] = None,
deletion_prob: float = 0.6,
reconstruct_seq: bool = False,
mask_id = None
):
super().__init__()
self.model = model
self.wav2vec = default(wav2vec, model.wav2vec)
self.deletion_prob = deletion_prob
self.reconstruct_seq = reconstruct_seq # whether to reconstruct the entire sequence, or just output the deleted ones in order
self.mask_id = mask_id
def forward(
self,
x
):
is_raw_audio = x.dtype == torch.float
if is_raw_audio:
assert exists(self.wav2vec)
with torch.no_grad():
self.wav2vec.eval()
x = self.wav2vec(x, flatten = False)
batch = x.shape[0]
mask = torch.ones_like(x, dtype = torch.bool, device = self.model.device)
if exists(self.mask_id):
assert self.reconstruct_seq, 'reconstruct_seq must be true if mask id is provided'
mask = mask.masked_fill(x == self.model.semantic_pad_id, False)
delete_mask = get_mask_subset_prob(mask, self.deletion_prob)
source = x.masked_fill(delete_mask, self.mask_id)
else:
delete_mask = get_mask_subset_prob(mask, self.deletion_prob)
source = rearrange(x[~delete_mask], '(b n) -> b n', b = batch)
if self.reconstruct_seq:
target = x
else:
target = rearrange(x[delete_mask], '(b n) -> b n', b = batch)
loss, logits = self.model(
source, target,
source_type = 'speech',
target_type = 'speech',
return_loss = True,
return_logits = True
)
return loss, logits
# wrapper for backtranslation task
class SemanticToTextWrapper(nn.Module):
@beartype
def __init__(
self,
model: TextToSemantic
):
super().__init__()
self.model = model
def forward(
self,
semantic_token_ids,
grapheme_token_ids,
):
source = semantic_token_ids
target = grapheme_token_ids
loss, logits = self.model(
source, target,
source_type = 'speech',
target_type = 'text',
return_loss = True,
return_logits = True
)
return loss, logits
# wrapper for text to semantic task
class TextToSemanticWrapper(nn.Module):
@beartype
def __init__(
self,
model: TextToSemantic
):
super().__init__()
self.model = model
def forward(
self,
grapheme_token_ids,
semantic_token_ids,
):
source = grapheme_token_ids
target = semantic_token_ids
loss, logits = self.model(
source, target,
source_type = 'text',
target_type = 'speech',
return_loss = True,
return_logits = True
)
return loss, logits
# wrapper for generating the pseudo-labelled audio to text dataset
class SemanticToTextDatasetGenerator(nn.Module):
@beartype
def __init__(
self,
model,
*,
dataset: Dataset,
folder = './generated-audio-text-pairs',
batch_size = 4,
delimiter_id: int = -1,
audio_pad_id = None,
text_pad_id = 0
):
super().__init__()
self.model = model
self.dataset = dataset
self.dl = get_dataloader(dataset, batch_size = batch_size)
self.delimiter_id = delimiter_id
self.audio_pad_id = audio_pad_id
self.text_pad_id = text_pad_id
self.folder = Path(folder)
self.folder.mkdir(exist_ok = True, parents = True)
def forward(
self,
max_length = 2048,
beam_search_decode = False,
**generate_kwargs
):
delimiter = torch.tensor([self.delimiter_id], device = self.model.device)
counter = 0
for audio, in self.dl:
audio_semantic_ids, text_ids = self.model.generate(
source = audio,
source_type = 'speech',
target_type = 'text',
return_source = True,
max_length = max_length,
beam_search_decode = beam_search_decode,
**generate_kwargs
)
for audio_semantic_id, text_id in zip(audio_semantic_ids, text_ids):
if exists(self.audio_pad_id):
audio_pad_mask = audio_semantic_id == self.audio_pad_id
audio_semantic_id = audio_semantic_id[~audio_pad_mask]
if exists(self.text_pad_id):
text_pad_mask = text_id == self.text_pad_id
text_id = text_id[~text_pad_mask]
row, _ = pack([audio_semantic_id, delimiter, text_id], '*')
path = str(self.folder / f'{counter}.pt')
torch.save(row, path)
counter += 1
| spear-tts-pytorch-main | spear_tts_pytorch/spear_tts_pytorch.py |
import re
from pathlib import Path
from shutil import rmtree
from beartype import beartype
from beartype.door import is_bearable
from beartype.typing import Union, Optional, Tuple
import torch
from torch import nn, LongTensor, IntTensor
from torch.utils.data import ConcatDataset
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import Dataset, random_split
from audiolm_pytorch import FairseqVQWav2Vec, HubertWithKmeans
from audiolm_pytorch.data import get_dataloader
from audiolm_pytorch.optimizer import get_optimizer
from spear_tts_pytorch.spear_tts_pytorch import SpeechSpeechPretrainWrapper, TextToSemantic, SemanticToTextWrapper, TextToSemanticWrapper
from spear_tts_pytorch.data import GeneratedAudioTextDataset
from accelerate import Accelerator, DistributedType
# constants
IndicesTensor = Union[LongTensor, IntTensor]
# make sure only one trainer is instantiated
ONE_TRAINER_INSTANTIATED = False
def check_one_trainer():
global ONE_TRAINER_INSTANTIATED
assert not ONE_TRAINER_INSTANTIATED, 'only one Trainer can be instantiated at a time for training'
ONE_TRAINER_INSTANTIATED = True
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
def checkpoint_num_steps(checkpoint_path):
"""Returns the number of steps trained from a checkpoint based on the filename.
Filename format assumed to be something like "/path/to/speech.speech.20000.pt" which is
for 20k train steps. Returns 20000 in that case.
"""
results = re.findall(r'\d+', str(checkpoint_path))
if len(results) == 0:
return 0
return int(results[-1])
class SpeechSpeechPretrainer(nn.Module):
@beartype
def __init__(
self,
model: TextToSemantic,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
*,
num_train_steps,
num_warmup_steps,
batch_size,
dataset: Optional[Dataset] = None,
deletion_prob: float = 0.6,
reconstruct_seq: bool = False,
mask_id = None,
lr = 3e-4,
initial_lr = 1e-5,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
log_every = 10,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.model = model
self.wav2vec = wav2vec
self.train_wrapper = SpeechSpeechPretrainWrapper(
model = model,
wav2vec = wav2vec,
deletion_prob = deletion_prob,
reconstruct_seq = reconstruct_seq,
mask_id = mask_id
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.num_warmup_steps = num_warmup_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.lr = lr
self.initial_lr = initial_lr
self.optim = get_optimizer(model.parameters(), lr = lr, wd = wd)
self.scheduler = CosineAnnealingLR(self.optim, T_max = num_train_steps)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.train_wrapper,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.train_wrapper,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.log_every = log_every
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "num_warmup_steps": num_warmup_steps, "learning_rate": lr, "initial_learning_rate": lr}
self.accelerator.init_trackers("speechspeech", config=hps)
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.model),
optim = self.optim.state_dict(),
scheduler = self.scheduler.state_dict()
)
torch.save(pkg, path)
def load(self, path):
model = self.accelerator.unwrap_model(self.model)
pkg = model.load(path)
self.optim.load_state_dict(pkg['optim'])
self.scheduler.load_state_dict(pkg['scheduler'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def warmup(self, step):
if step < self.num_warmup_steps:
return self.initial_lr + (self.lr - self.initial_lr) * step / self.num_warmup_steps
else:
return self.lr
def train_step(self):
steps = int(self.steps.item())
self.model.train()
# adjust the lr according to the schedule
if steps < self.num_warmup_steps:
# Apply warmup
lr = self.warmup(steps)
for param_group in self.optim.param_groups:
param_group['lr'] = lr
else:
# After warmup period, start to apply CosineAnnealingLR
self.scheduler.step()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
x, = next(self.dl_iter)
loss, _ = self.train_wrapper(x)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
if not (steps % self.log_every):
self.print(f"{steps}: loss: {logs['loss']:0.3f}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
x, = next(self.valid_dl_iter)
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss, _ = self.train_wrapper(x)
self.print(f'{steps}: valid loss {valid_loss:0.3f}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'speech.speech.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
class SemanticToTextTrainer(nn.Module):
@beartype
def __init__(
self,
model: TextToSemantic,
*,
num_train_steps,
num_warmup_steps,
batch_size,
dataset: Optional[Dataset] = None,
lr = 3e-4,
initial_lr = 1e-5,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
log_every = 10,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.model = model
self.train_wrapper = SemanticToTextWrapper(model = model)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.num_warmup_steps = num_warmup_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# when doing backtranslation
# encoder is frozen (and presumably all the speech embeddings)
model.unfreeze_all()
model.freeze_speech_emb()
model.freeze_encoder()
# optimizers
# get_optimizer should filter out frozen parameters (ones with requires_grad set to False)
# https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/optimizer.py#L24
self.optim = get_optimizer(
model.parameters(),
lr = lr,
wd = wd,
filter_by_requires_grad = True
)
self.lr = lr
self.initial_lr = initial_lr
self.scheduler = CosineAnnealingLR(self.optim, T_max = num_train_steps)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.train_wrapper,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.train_wrapper,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.log_every = log_every
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "num_warmup_steps": num_warmup_steps, "learning_rate": lr, "initial_learning_rate": lr}
self.accelerator.init_trackers("semantictext", config=hps)
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.model),
optim = self.optim.state_dict(),
scheduler = self.scheduler.state_dict()
)
torch.save(pkg, path)
def load(self, path, restore_optimizer = True):
model = self.accelerator.unwrap_model(self.model)
pkg = model.load(path)
if restore_optimizer:
self.optim.load_state_dict(pkg['optim'])
self.scheduler.load_state_dict(pkg['scheduler'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def warmup(self, step):
if step < self.num_warmup_steps:
return self.initial_lr + (self.lr - self.initial_lr) * step / self.num_warmup_steps
else:
return self.lr
def train_step(self):
steps = int(self.steps.item())
self.model.train()
# adjust the lr according to the schedule
if steps < self.num_warmup_steps:
# Apply warmup
lr = self.warmup(steps)
for param_group in self.optim.param_groups:
param_group['lr'] = lr
else:
# After warmup period, start to apply CosineAnnealingLR
self.scheduler.step()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
semantic_token_ids, grapheme_token_ids = next(self.dl_iter)
loss, _ = self.train_wrapper(semantic_token_ids = semantic_token_ids, grapheme_token_ids = grapheme_token_ids)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
if not (steps % self.log_every):
self.print(f"{steps}: loss: {logs['loss']:0.3f}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
semantic_token_ids, grapheme_token_ids = next(self.valid_dl_iter)
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss, _ = self.train_wrapper(semantic_token_ids = semantic_token_ids, grapheme_token_ids = grapheme_token_ids)
self.print(f'{steps}: valid loss {valid_loss:0.3f}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'semantic.text.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
class TextToSemanticTrainer(nn.Module):
@beartype
def __init__(
self,
model: TextToSemantic,
*,
num_train_steps,
num_warmup_steps,
batch_size,
dataset: Optional[Dataset] = None,
generated_audio_text_dataset_folder = None,
dataset_delimiter_id = -1,
lr = 3e-4,
initial_lr = 1e-5,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
log_every = 10,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
freeze_encoder_layers_below = 2
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.model = model
self.train_wrapper = TextToSemanticWrapper(model = model)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.num_warmup_steps = num_warmup_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# when doing text to semantic generation
# encoder is partially frozen and decoder is frozen
model.unfreeze_all()
model.freeze_speech_emb()
model.freeze_encoder_below_layer(freeze_encoder_layers_below)
model.freeze_decoder()
# optimizers
# get_optimizer should filter out frozen parameters (ones with requires_grad set to False)
# https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/optimizer.py#L24
self.optim = get_optimizer(
model.parameters(),
lr = lr,
wd = wd,
filter_by_requires_grad = True
)
self.lr = lr
self.initial_lr = initial_lr
self.scheduler = CosineAnnealingLR(self.optim, T_max = num_train_steps)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
datasets = []
if exists(dataset):
assert len(dataset) > 0 and is_bearable(dataset[0], Tuple[IndicesTensor, IndicesTensor]), 'audio-text dataset must return text and semantic token ids as a tuple of two tensors'
datasets.append(dataset)
if exists(generated_audio_text_dataset_folder):
pseudo_labelled_dataset = GeneratedAudioTextDataset(
folder = generated_audio_text_dataset_folder,
delimiter_id = dataset_delimiter_id
)
datasets.append(pseudo_labelled_dataset)
# concat the small labelled dataset with the pseudo-labelled dataset at the folder designated
assert len(datasets) > 0
self.ds = ConcatDataset(datasets)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.train_wrapper,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.train_wrapper,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.log_every = log_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "num_warmup_steps": num_warmup_steps, "learning_rate": lr, "initial_learning_rate": lr}
self.accelerator.init_trackers("textsemantic", config=hps)
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.model),
optim = self.optim.state_dict(),
scheduler = self.scheduler.state_dict()
)
torch.save(pkg, path)
def load(self, path, restore_optimizer = True):
model = self.accelerator.unwrap_model(self.model)
pkg = model.load(path)
if restore_optimizer:
self.optim.load_state_dict(pkg['optim'])
self.scheduler.load_state_dict(pkg['scheduler'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def warmup(self, step):
if step < self.num_warmup_steps:
return self.initial_lr + (self.lr - self.initial_lr) * step / self.num_warmup_steps
else:
return self.lr
def train_step(self):
steps = int(self.steps.item())
self.model.train()
# adjust the lr according to the schedule
if steps < self.num_warmup_steps:
# Apply warmup
lr = self.warmup(steps)
for param_group in self.optim.param_groups:
param_group['lr'] = lr
else:
# After warmup period, start to apply CosineAnnealingLR
self.scheduler.step()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
semantic_token_ids, grapheme_token_ids = next(self.dl_iter)
loss, _ = self.train_wrapper(semantic_token_ids = semantic_token_ids, grapheme_token_ids = grapheme_token_ids)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
if not (steps % self.log_every):
self.print(f"{steps}: loss: {logs['loss']:0.3f}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
semantic_token_ids, grapheme_token_ids = next(self.valid_dl_iter)
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss, _ = self.train_wrapper(semantic_token_ids = semantic_token_ids, grapheme_token_ids = grapheme_token_ids)
self.print(f'{steps}: valid loss {valid_loss:0.3f}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'text.semantic.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
| spear-tts-pytorch-main | spear_tts_pytorch/trainer.py |
from pathlib import Path
import torch
from torch.utils.data import Dataset
from beartype import beartype
# mock dataset
class MockDataset(Dataset):
def __init__(self, length: int):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, ind):
return torch.randn(1024)
# generated audio-text dataset
class GeneratedAudioTextDataset(Dataset):
@beartype
def __init__(
self,
folder: str,
delimiter_id: int = -1
):
self.folder = Path(folder)
assert self.folder.exists() and self.folder.is_dir()
self.paths = list(self.folder.glob('*.pt'))
self.delimiter_id = delimiter_id
def __len__(self):
return len(self.paths)
def __getitem__(self, ind):
path = self.paths[ind]
tensor = torch.load(str(path))
delimiter_mask = tensor == self.delimiter_id
assert delimiter_mask.any(), f'delimeter (<audio> <delimeter> <text>) not found'
ind = (delimiter_mask.cumsum(dim = -1) == 0).sum().item()
return tensor[:ind], tensor[(ind + 1):]
| spear-tts-pytorch-main | spear_tts_pytorch/data.py |
from setuptools import setup, find_packages
setup(
name = 'coordinate-descent-attention',
packages = find_packages(exclude=[]),
version = '0.0.11',
license='MIT',
description = 'Coordinate Descent Attention - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/coodinate-descent-attention',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism'
],
install_requires=[
'einops>=0.6.1',
'torch>=1.6',
'colt5-attention>=0.9.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| coordinate-descent-attention-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from coordinate_descent_attention import Transformer, AutoregressiveWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate transformer
model = Transformer(
num_tokens = 256,
dim = 512,
depth = 8,
seq_len = SEQ_LEN,
attn_use_coor_descent = True,
ff_use_coor_descent = True,
attn_coor_descent_sparsity_k = 2,
ff_coor_descent_sparsity_k = 128,
coor_descent_iters = 25
)
model = AutoregressiveWrapper(model).cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10.0, desc = "training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward(loss / GRADIENT_ACCUMULATE_EVERY)
print(f"training loss: {loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str, "\n")
| coordinate-descent-attention-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, -torch.finfo(logits.dtype).max)
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
pad_value = 0
):
super().__init__()
self.seq_len = net.seq_len
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(
self,
prompt,
seq_len,
temperature=1.0,
filter_thres=0.9,
**kwargs
):
b, t, device = *prompt.shape, prompt.device
out = prompt
for _ in range(seq_len):
logits = self.net(out[:, -self.seq_len:], **kwargs)[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim = -1)
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x, labels = x[:, :-1], x[:, 1:]
logits = self.net(x, **kwargs)
logits = rearrange(logits, "b c n -> b n c")
return F.cross_entropy(logits, labels)
| coordinate-descent-attention-main | coordinate_descent_attention/autoregressive_wrapper.py |
from coordinate_descent_attention.coordinate_descent_attention import Transformer, Attention
from coordinate_descent_attention.autoregressive_wrapper import AutoregressiveWrapper
| coordinate-descent-attention-main | coordinate_descent_attention/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from colt5_attention import coor_descent
from colt5_attention.triton_coor_descent import triton_coor_descent
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
use_coor_descent = False,
coor_descent_iters = 20,
coor_descent_sparsity_k = None,
coor_descent_eps = 1e-1,
coor_descent_eps_init = 4.,
coor_descent_eps_decay = 0.7,
):
super().__init__()
dim_hidden = int(dim * mult)
self.use_coor_descent = use_coor_descent
self.coor_descent_iters = coor_descent_iters
self.coor_descent_sparsity_k = default(coor_descent_sparsity_k, dim_hidden // 10)
self.coor_descent_eps = coor_descent_eps
self.coor_descent_eps_init = coor_descent_eps_init
self.coor_descent_eps_decay = coor_descent_eps_decay
self.proj_in = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim_hidden),
)
self.proj_out = nn.Linear(dim_hidden, dim)
def forward(self, x):
x = self.proj_in(x)
if self.use_coor_descent:
x = triton_coor_descent(
x,
n_iters = self.coor_descent_iters,
k = self.coor_descent_sparsity_k,
eps = self.coor_descent_eps,
eps_init = self.coor_descent_eps_init,
eps_decay = eslf.coor_descent_eps_decay,
checkpoint_segments = self.coor_descent_iters // 5
)
else:
x = F.gelu(x)
return self.proj_out(x)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
use_coor_descent = False,
coor_descent_iters = 20,
coor_descent_sparsity_k = 1,
coor_descent_eps = 1e-1,
coor_descent_eps_init = 4.,
coor_descent_eps_decay = 0.7,
attn_null_kv = 0,
learned_sparsity_k = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
dim_inner = dim_head * heads
self.use_coor_descent = use_coor_descent
self.coor_descent_iters = coor_descent_iters
self.coor_descent_sparsity_k = coor_descent_sparsity_k
self.coor_descent_eps = coor_descent_eps
self.coor_descent_eps_init = coor_descent_eps_init
self.coor_descent_eps_decay = coor_descent_eps_decay
self.to_learned_k = None
if learned_sparsity_k:
self.to_learned_k = nn.Linear(dim, heads)
nn.init.constant_(self.to_learned_k.bias, -10)
self.norm = nn.LayerNorm(dim)
self.null_kv = nn.Parameter(torch.randn(2, heads, attn_null_kv, dim_head))
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim, bias = False)
def forward(self, x):
b, n, h, device, dtype = *x.shape[:2], self.heads, x.device, x.dtype
x = self.norm(x)
# get queries, keys, values, and split heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# add null key value if needed
if self.null_kv.numel() > 0:
nk, nv = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# measure similarity
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
# whether to use coordinate descent or not
if self.use_coor_descent:
if exists(self.to_learned_k):
sparsity_k = self.to_learned_k(x).sigmoid() * (self.coor_descent_sparsity_k - 1) + 1
sparsity_k = rearrange(sparsity_k, 'b i h -> (b h i)')
else:
sparsity_k = torch.ones(i, device = device, dtype = dtype) * self.coor_descent_sparsity_k
causal_mask = repeat(causal_mask, 'i j -> b h i j', b = sim.shape[0], h = sim.shape[1])
attn = triton_coor_descent(
sim,
n_iters = self.coor_descent_iters,
k = sparsity_k,
eps = self.coor_descent_eps,
eps_decay = self.coor_descent_eps_decay,
eps_init = self.coor_descent_eps_init,
mask = ~causal_mask,
checkpoint_segments = self.coor_descent_iters // 5
)
else:
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
seq_len,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
attn_use_coor_descent = False,
ff_use_coor_descent = False,
attn_coor_descent_sparsity_k = 2,
ff_coor_descent_sparsity_k = 2,
coor_descent_iters = 15,
coor_descent_eps = 1e-1,
attn_null_kv = 0,
learned_sparsity_k = False
):
super().__init__()
self.seq_len = seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(seq_len, dim)
self.layers = nn.ModuleList([])
coor_kwargs = dict(
coor_descent_iters = coor_descent_iters,
coor_descent_eps = coor_descent_eps,
)
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(
dim,
dim_head = dim_head,
heads = heads,
use_coor_descent = attn_use_coor_descent,
coor_descent_sparsity_k = attn_coor_descent_sparsity_k,
attn_null_kv = attn_null_kv,
learned_sparsity_k = learned_sparsity_k,
**coor_kwargs
),
FeedForward(
dim,
ff_mult,
use_coor_descent = ff_use_coor_descent,
coor_descent_sparsity_k = ff_coor_descent_sparsity_k,
**coor_kwargs
)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
n, device = x.shape[-1], x.device
assert n <= self.seq_len
x = self.token_emb(x)
x = x + self.pos_emb(torch.arange(n, device = device))
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.to_logits(x)
| coordinate-descent-attention-main | coordinate_descent_attention/coordinate_descent_attention.py |
# -*- coding: utf-8 -*-
"""HyenaDNA training & inference example (Public)
This code is adapted from the original colab tutorial on HyenaDNA. Check that out for an easier entry point into the code.
We provide the code here as an example for those who want something outside collab, with Huggingface integration.
Original file is located at
https://colab.research.google.com/drive/1wyVEQd4R3HYLTUOXEEQmp_I8aNC_aLhL
"""
#@title Imports
# for HyenaDNA specifically
import torch
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange
from typing import Optional
from functools import partial
from torch import Tensor
from torchvision.ops import StochasticDepth
from collections import namedtuple
import numpy as np
import os
import json
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
"""# HyenaDNA
"""
#@title Hyena layer
def fftconv(u, k, D):
"""
We apply a convolution through the fourier domain (from the Convolution Theorem)
"""
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3: k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
class Sin(nn.Module):
"""The Sin activation function for the Hyena Filter function."""
def __init__(self, dim, w=10, train_freq=True):
super().__init__()
self.freq = nn.Parameter(w * torch.ones(1, dim)) if train_freq else w * torch.ones(1, dim)
def forward(self, x):
return torch.sin(self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float=1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
"""The window function applied to the output of the (MLP) filter function."""
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
modulate: bool=True,
shift: float = 0.05,
**kwargs
):
super().__init__()
self.modulate = modulate
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
if self.modulate:
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
fused_fft_conv=False,
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
normalized=False,
**kwargs
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
self.d_model = d_model
self.use_bias = bias
self.fused_fft_conv = fused_fft_conv
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w)
self.emb_dim = emb_dim
assert emb_dim % 2 != 0 and emb_dim >= 3, "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.seq_len = seq_len
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
h = self.modulation(t, h)
return h
def forward(self, x, L, k=None, bias=None, *args, **kwargs):
if k is None: k = self.filter(L)
# Ensure compatibility with filters that return a tuple
k = k[0] if type(k) is tuple else k
y = fftconv(x, k, bias)
return y
class HyenaOperator(nn.Module):
def __init__(
self,
d_model,
l_max,
order=2,
filter_order=64,
dropout=0.0,
filter_dropout=0.0,
**filter_args,
):
r"""
Hyena operator described in the paper https://arxiv.org/pdf/2302.10866.pdf
Args:
d_model (int): Dimension of the input and output embeddings (width of the layer)
l_max: (int): Maximum input sequence length. Defaults to None
order: (int): Depth of the Hyena recurrence. Defaults to 2
dropout: (float): Dropout probability. Defaults to 0.0
filter_dropout: (float): Dropout probability for the filter. Defaults to 0.0
"""
super().__init__()
self.d_model = d_model
self.l_max = l_max
self.order = order
inner_width = d_model * (order + 1)
self.dropout = nn.Dropout(dropout)
self.in_proj = nn.Linear(d_model, inner_width)
self.out_proj = nn.Linear(d_model, d_model)
self.short_filter = nn.Conv1d(
inner_width,
inner_width,
3,
padding=2,
groups=inner_width
)
self.filter_fn = HyenaFilter(
d_model * (order - 1),
order=filter_order,
seq_len=l_max,
channels=1,
dropout=filter_dropout,
**filter_args
)
def forward(self, u, *args, **kwargs):
l = u.size(-2)
l_filter = min(l, self.l_max)
u = self.in_proj(u)
u = rearrange(u, 'b l d -> b d l')
uc = self.short_filter(u)[...,:l_filter]
*x, v = uc.split(self.d_model, dim=1)
k = self.filter_fn.filter(l_filter)[0]
k = rearrange(k, 'l (o d) -> o d l', o=self.order - 1)
bias = rearrange(self.filter_fn.bias, '(o d) -> o d', o=self.order - 1)
for o, x_i in enumerate(reversed(x[1:])):
v = self.dropout(v * x_i)
v = self.filter_fn(v, l_filter, k=k[o], bias=bias[o])
y = rearrange(v * x[0], 'b d l -> b l d')
y = self.out_proj(y)
return y
#@title Self-Attention (alternative)
"""
If you'd like to try the HyenaDNA model using attention instead, you can. ie,
use a regular decoder only Transformer.
"""
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, causal=None, key_padding_mask=None):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
causal: if passed, will override self.causal
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
False means to mask out. (B, S)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
causal = self.causal if causal is None else causal
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if key_padding_mask is not None:
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
device=scores.device)
padding_mask.masked_fill_(key_padding_mask, 0.0)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, bias=True, dropout=0.0,
softmax_scale=None, causal=False, layer_idx=None, dwconv=False,return_residual=False,device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.layer_idx = layer_idx
self.dwconv = dwconv
self.return_residual = return_residual
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
linear_cls = nn.Linear
linear_resid_cls = LinearResidual
inner_attn_cls = SelfAttention
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, key_padding_mask=None, **kwargs):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
is the is the sum of the sequence lengths in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into x. Only applicable when using
FlashAttention.
max_seqlen: int. Maximum sequence length in the batch.
key_padding_mask: boolean mask, True means to keep, False means to mask out.
(batch, seqlen). Only applicable when not using FlashAttention.
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
inference_params: for generation. Adapted from Megatron-LM (and Apex)
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
"""
kwargs = ({'key_padding_mask': key_padding_mask, **kwargs})
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, '... (three h d) -> ... three h d', three=3, d=self.head_dim)
context = self.inner_attn(qkv, **kwargs)
out = self.out_proj(rearrange(context, '... h d -> ... (h d)'))
return out if not self.return_residual else (out, x)
#@title MLP layer
"""
The MLP layer after the mixer layer (HyenaOperator).
"""
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
return_residual=False, device=None, dtype=None):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/mlp.py
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
y = self.fc1(x)
y = self.activation(y)
y = self.fc2(y)
return y if not self.return_residual else (y, x)
#@title Block layer (Hyena + MLP layers)
"""
A block consists of a Mixer layer (Hyena or attention), and a MLP layer.
"""
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout1=0., resid_dropout2=0.,
drop_path1=0., drop_path2=0.,
return_residual=False,
residual_in_fp32=False):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/block.py
For prenorm=True, this Block has a slightly different structure compared to a regular
prenorm Transformer block.
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
[Ref: https://arxiv.org/abs/2002.04745]
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
the hidden_states (output of the MLP) and the residual.
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
The residual needs to be provided (except for the very first block).
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
This is for performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
super().__init__()
self.prenorm = prenorm
self.return_residual = return_residual
self.residual_in_fp32 = residual_in_fp32
if self.residual_in_fp32:
assert self.prenorm, 'residual_in_fp32 is only compatible with prenorm=True'
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls()
self.dropout1 = dropout_cls(resid_dropout1)
self.drop_path1 = StochasticDepth(drop_path1, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout2)
self.drop_path2 = StochasticDepth(drop_path2, mode='row')
self.norm2 = norm_cls(dim)
def forward(self, hidden_states, residual = None,
mixer_subset=None, mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
"""
if self.prenorm:
dropped = self.drop_path1(self.dropout1(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
if mixer_kwargs is None:
mixer_kwargs = {}
if mixer_subset is not None:
mixer_kwargs['mixer_subset'] = mixer_subset
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
if mixer_subset is not None:
residual = residual[:, mixer_subset]
if not isinstance(self.mlp, nn.Identity):
dropped = self.drop_path2(self.dropout2(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
)
if self.return_residual: # mixer out is actually a pair here
mixer_out, hidden_states = mixer_out
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if self.return_residual: # mlp out is actually a pair here
mlp_out, hidden_states = mlp_out
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
return hidden_states
def create_mixer_cls(layer=None,
attn_layer_idx=None, attn_cfg=None, layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop('causal', True)
mha_cls = MHA
mixer_cls = partial(mha_cls, causal=causal, layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),**factory_kwargs)
else:
# mixer_cls = instantiate(registry.layer, layer, partial=True, layer_idx=layer_idx, **factory_kwargs)
mixer_cls = partial(HyenaOperator, **layer)
return mixer_cls
def create_mlp_cls(d_model, d_inner=None, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'), **factory_kwargs)
return mlp_cls
def create_block(d_model, d_inner=None,
layer=None, attn_layer_idx=None,
attn_cfg=None, layer_norm_epsilon=1e-5,
resid_dropout1=0.0, resid_dropout2=0.0, residual_in_fp32=False,
layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
mixer_cls = create_mixer_cls(layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_idx=layer_idx,
**factory_kwargs)
mlp_cls = create_mlp_cls(d_model, d_inner=d_inner,
**factory_kwargs)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(d_model, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2,residual_in_fp32=residual_in_fp32)
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True,
glu_act=False):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(p[:out_features // 2], mean=0.0, std=initializer_range / math.sqrt(2 * n_layer) * 2)
#@title Backbone model (stack of blocks)
"""
A backbone model consists of a stack of blocks. If you use attention, then
positional embeddings are included. When using Hyena, then the pos emb
revert to doing nothing.
"""
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
word_embed_proj_dim=None, device=None, dtype=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
the project up to embed_dim
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if word_embed_proj_dim is None:
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx,
**factory_kwargs)
self.project_in = None
else:
self.word_embeddings = nn.Embedding(vocab_size, word_embed_proj_dim,
padding_idx=padding_idx, **factory_kwargs)
self.project_in = nn.Linear(word_embed_proj_dim, embed_dim, bias=False,
**factory_kwargs)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim,
**factory_kwargs)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
position_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
embeddings = self.word_embeddings(input_ids)
if self.project_in is not None:
embeddings = self.project_in(embeddings)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class LMBackbone(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.process_group = process_group
self.residual_in_fp32 = residual_in_fp32
# note max_position_embeddings is 0 for Hyena, and therefore isn't used
self.embeddings = GPT2Embeddings(d_model, vocab_size, max_position_embeddings,
**factory_kwargs)
self.layers = nn.ModuleList([create_block(
d_model, d_inner=d_inner,
layer=layer, attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout, residual_in_fp32=residual_in_fp32,layer_idx=i,
**factory_kwargs,
) for i in range(n_layer)])
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids,)
residual = None
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
return hidden_states
#@title Decoder head layer
"""
A simple decoder head (using MLP) to predict a sequence level classification.
You have the option to average across all the tokens in a sequence or using the
"last" token to classify. At least, those 2 worked best for us, but we provide
other "modes" as well.
We only need this for classification. Otherwise we'll use the hidden
states of the backbone as embeddings.
"""
class SequenceDecoder(nn.Module):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
#@title Model (backbone + head)
"""
Putting it all together, the model consists of a backbone model
and a decoder head (you can turn off head for embeddings only too).
Here we use a simple head to do multi-classification, but
can also swap the head to do next token prediction too. We defer to the main
HyenaDNA for that code, since pretraining with next token prediction isn't quite
feasible on colab.
"""
class HyenaDNAModel(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
layer=None, attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, use_head=False, n_classes: int = 2,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.use_head = use_head
# check if layer (config) has d_model (HF code differs from main Safari code)
if 'd_model' not in layer:
layer['d_model'] = d_model
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, residual_in_fp32=residual_in_fp32,
**factory_kwargs, **kwargs
)
# we only need a head if doing classification, otherwise we'll use the
# hidden states as embeddings
if self.use_head:
self.head = SequenceDecoder(d_model=d_model, d_output=n_classes, l_output=0, mode='pool')
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
# if self.use_head:
# self.tie_weights()
# def tie_weights(self):
# self.head.weight = self.backbone.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids)
if self.use_head:
return self.head(hidden_states)
else:
return hidden_states
"""# Data pipeline
"""
#@title Tokenizer
"""
Just a simple character level tokenizer.
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
class CharacterTokenizer(PreTrainedTokenizer):
def __init__(self, characters: Sequence[str], model_max_length: int, padding_side: str='left', **kwargs):
"""Character tokenizer for Hugging Face transformers.
Args:
characters (Sequence[str]): List of desired characters. Any character which
is not included in this list will be replaced by a special token called
[UNK] with id=6. Following are list of all of the special tokens with
their corresponding ids:
"[CLS]": 0
"[SEP]": 1
"[BOS]": 2
"[MASK]": 3
"[PAD]": 4
"[RESERVED]": 5
"[UNK]": 6
an id (starting at 7) will be assigned to each character.
model_max_length (int): Model maximum sequence length.
"""
self.characters = characters
self.model_max_length = model_max_length
bos_token = AddedToken("[BOS]", lstrip=False, rstrip=False)
eos_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
sep_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
cls_token = AddedToken("[CLS]", lstrip=False, rstrip=False)
pad_token = AddedToken("[PAD]", lstrip=False, rstrip=False)
unk_token = AddedToken("[UNK]", lstrip=False, rstrip=False)
mask_token = AddedToken("[MASK]", lstrip=True, rstrip=False)
super().__init__(
bos_token=bos_token,
eos_token=sep_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
unk_token=unk_token,
add_prefix_space=False,
model_max_length=model_max_length,
padding_side=padding_side,
**kwargs,
)
self._vocab_str_to_int = {
"[CLS]": 0,
"[SEP]": 1,
"[BOS]": 2,
"[MASK]": 3,
"[PAD]": 4,
"[RESERVED]": 5,
"[UNK]": 6,
**{ch: i + 7 for i, ch in enumerate(characters)},
}
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
@property
def vocab_size(self) -> int:
return len(self._vocab_str_to_int)
def _tokenize(self, text: str) -> List[str]:
return list(text)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["[UNK]"])
def _convert_id_to_token(self, index: int) -> str:
return self._vocab_int_to_str[index]
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
result = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
result += ([0] * len(token_ids_1)) + [1]
return result
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = len(cls + token_ids_0 + sep) * [0]
if token_ids_1 is not None:
result += len(token_ids_1 + sep) * [1]
return result
def get_config(self) -> Dict:
return {
"char_ords": [ord(ch) for ch in self.characters],
"model_max_length": self.model_max_length,
}
@classmethod
def from_config(cls, config: Dict) -> "CharacterTokenizer":
cfg = {}
cfg["characters"] = [chr(i) for i in config["char_ords"]]
cfg["model_max_length"] = config["model_max_length"]
return cls(**cfg)
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
cfg = self.get_config()
with open(cfg_file, "w") as f:
json.dump(cfg, f, indent=4)
@classmethod
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
with open(cfg_file) as f:
cfg = json.load(f)
return cls.from_config(cfg)
| hyena-dna-main | standalone_hyenadna.py |
#@title Huggingface Pretrained Wrapper
"""
This is script is a simple HuggingFace wrapper around a HyenaDNA model, to enable a one click example
of how to load the pretrained weights and get embeddings.
It will instantiate a HyenaDNA model (model class is in the `standalone_hyenadna.py`), and handle the downloading of pretrained weights from HuggingFace.
Check out the colab notebook for a simpler and more complete walk through of how to use HyenaDNA with pretrained weights.
"""
import json
import os
import subprocess
import torch
# import transformers
from transformers import PreTrainedModel
import re
from standalone_hyenadna import HyenaDNAModel
from standalone_hyenadna import CharacterTokenizer
# helper 1
def inject_substring(orig_str):
"""Hack to handle matching keys between models trained with and without
gradient checkpointing."""
# modify for mixer keys
pattern = r"\.mixer"
injection = ".mixer.layer"
modified_string = re.sub(pattern, injection, orig_str)
# modify for mlp keys
pattern = r"\.mlp"
injection = ".mlp.layer"
modified_string = re.sub(pattern, injection, modified_string)
return modified_string
# helper 2
def load_weights(scratch_dict, pretrained_dict, checkpointing=False):
"""Loads pretrained (backbone only) weights into the scratch state dict."""
# loop thru state dict of scratch
# find the corresponding weights in the loaded model, and set it
# need to do some state dict "surgery"
for key, value in scratch_dict.items():
if 'backbone' in key:
# the state dicts differ by one prefix, '.model', so we add that
key_loaded = 'model.' + key
# breakpoint()
# need to add an extra ".layer" in key
if checkpointing:
key_loaded = inject_substring(key_loaded)
try:
scratch_dict[key] = pretrained_dict[key_loaded]
except:
raise Exception('key mismatch in the state dicts!')
# scratch_dict has been updated
return scratch_dict
class HyenaDNAPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
base_model_prefix = "hyenadna"
def __init__(self, config):
pass
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
@classmethod
def from_pretrained(cls,
path,
model_name,
download=False,
config=None,
device='cpu',
use_head=False,
n_classes=2,
):
# first check if it is a local path
pretrained_model_name_or_path = os.path.join(path, model_name)
if os.path.isdir(pretrained_model_name_or_path) and download == False:
if config is None:
config = json.load(open(os.path.join(pretrained_model_name_or_path, 'config.json')))
else:
hf_url = f'https://huggingface.co/LongSafari/{model_name}'
subprocess.run(f'rm -rf {pretrained_model_name_or_path}', shell=True)
command = f'mkdir -p {path} && cd {path} && git lfs install && git clone {hf_url}'
subprocess.run(command, shell=True)
if config is None:
config = json.load(open(os.path.join(pretrained_model_name_or_path, 'config.json')))
scratch_model = HyenaDNAModel(**config, use_head=use_head, n_classes=n_classes) # the new model format
loaded_ckpt = torch.load(
os.path.join(pretrained_model_name_or_path, 'weights.ckpt'),
map_location=torch.device(device)
)
# need to load weights slightly different if using gradient checkpointing
if config.get("checkpoint_mixer", False):
checkpointing = config["checkpoint_mixer"] == True or config["checkpoint_mixer"] == True
else:
checkpointing = False
# grab state dict from both and load weights
state_dict = load_weights(scratch_model.state_dict(), loaded_ckpt['state_dict'], checkpointing=checkpointing)
# scratch model has now been updated
scratch_model.load_state_dict(state_dict)
print("Loaded pretrained weights ok!")
return scratch_model
####################################################################################################
"""# Inference (450k to 1M tokens)!
If all you're interested in is getting embeddings on long DNA sequences
(inference), then we can do that right here in Colab!
* We provide an example how to load the weights from Huggingface.
* On the free tier, which uses a
T4 GPU w/16GB of memory, we can process 450k tokens / nucleotides.
* For processing 1M tokens, you'll need an A100, which Colab offers as a paid tier.
* (Don't forget to run the entire notebook above too)
--
To pretrain or fine-tune the 1M long sequence model (8 layers, d_model=256),
you'll need 8 A100s 80GB, and all that code is in the main repo!
"""
#@title Single example
import json
import os
import subprocess
# import transformers
from transformers import PreTrainedModel
def inference_single():
'''
this selects which backbone to use, and grabs weights/ config from HF
4 options:
'hyenadna-tiny-1k-seqlen' # fine-tune on colab ok
'hyenadna-small-32k-seqlen'
'hyenadna-medium-160k-seqlen' # inference only on colab
'hyenadna-medium-450k-seqlen' # inference only on colab
'hyenadna-large-1m-seqlen' # inference only on colab
'''
# you only need to select which model to use here, we'll do the rest!
pretrained_model_name = 'hyenadna-small-32k-seqlen'
max_lengths = {
'hyenadna-tiny-1k-seqlen': 1024,
'hyenadna-small-32k-seqlen': 32768,
'hyenadna-medium-160k-seqlen': 160000,
'hyenadna-medium-450k-seqlen': 450000, # T4 up to here
'hyenadna-large-1m-seqlen': 1_000_000, # only A100 (paid tier)
}
max_length = max_lengths[pretrained_model_name] # auto selects
# data settings:
use_padding = True
rc_aug = False # reverse complement augmentation
add_eos = False # add end of sentence token
# we need these for the decoder head, if using
use_head = False
n_classes = 2 # not used for embeddings only
# you can override with your own backbone config here if you want,
# otherwise we'll load the HF one in None
backbone_cfg = None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Using device:", device)
# instantiate the model (pretrained here)
if pretrained_model_name in ['hyenadna-tiny-1k-seqlen',
'hyenadna-small-32k-seqlen',
'hyenadna-medium-160k-seqlen',
'hyenadna-medium-450k-seqlen',
'hyenadna-large-1m-seqlen']:
# use the pretrained Huggingface wrapper instead
model = HyenaDNAPreTrainedModel.from_pretrained(
'./checkpoints',
pretrained_model_name,
download=True,
config=backbone_cfg,
device=device,
use_head=use_head,
n_classes=n_classes,
)
# from scratch
elif pretrained_model_name is None:
model = HyenaDNAModel(**backbone_cfg, use_head=use_head, n_classes=n_classes)
# create tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'], # add DNA characters, N is uncertain
model_max_length=max_length + 2, # to account for special tokens, like EOS
add_special_tokens=False, # we handle special tokens elsewhere
padding_side='left', # since HyenaDNA is causal, we pad on the left
)
#### Single embedding example ####
# create a sample 450k long, prepare
sequence = 'ACTG' * int(max_length/4)
tok_seq = tokenizer(sequence)
tok_seq = tok_seq["input_ids"] # grab ids
# place on device, convert to tensor
tok_seq = torch.LongTensor(tok_seq).unsqueeze(0) # unsqueeze for batch dim
tok_seq = tok_seq.to(device)
# prep model and forward
model.to(device)
model.eval()
with torch.inference_mode():
embeddings = model(tok_seq)
print(embeddings.shape) # embeddings here!
# # uncomment to run! (to get embeddings)
inference_single()
# to run this, just call:
# python huggingface.py
| hyena-dna-main | huggingface.py |
import copy
import os
import random
import time
from functools import partial, wraps
from typing import Callable, List, Sequence
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import wandb
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn
from pytorch_lightning.strategies.ddp import DDPStrategy
from tqdm.auto import tqdm
from pytorch_lightning.strategies.ddp import DDPStrategy
import src.models.nn.utils as U
import src.utils as utils
import src.utils.train
from src.dataloaders import SequenceDataset # TODO make registry
from src.tasks import decoders, encoders, tasks
from src.utils import registry
from src.utils.optim_groups import add_optimizer_hooks
log = src.utils.train.get_logger(__name__)
# Turn on TensorFloat32 (speeds up large model training substantially)
import torch.backends
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
OmegaConf.register_new_resolver('eval', eval)
OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y)
# Lots of annoying hacks to get WandbLogger to continuously retry on failure
class DummyExperiment:
"""Dummy experiment."""
def nop(self, *args, **kw):
pass
def __getattr__(self, _):
return self.nop
def __getitem__(self, idx) -> "DummyExperiment":
# enables self.logger.experiment[0].add_image(...)
return self
def __setitem__(self, *args, **kwargs) -> None:
pass
def rank_zero_experiment(fn: Callable) -> Callable:
"""Returns the real experiment on rank 0 and otherwise the DummyExperiment."""
@wraps(fn)
def experiment(self):
@rank_zero_only
def get_experiment():
return fn(self)
return get_experiment() or DummyExperiment()
return experiment
class CustomWandbLogger(WandbLogger):
def __init__(self, *args, **kwargs):
"""Modified logger that insists on a wandb.init() call and catches wandb's error if thrown."""
super().__init__(*args, **kwargs)
@property
@rank_zero_experiment
def experiment(self):
r"""
Actual wandb object. To use wandb features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
.. code-block:: python
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ["WANDB_MODE"] = "dryrun"
attach_id = getattr(self, "_attach_id", None)
if wandb.run is not None:
# wandb process already created in this instance
rank_zero_warn(
"There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"
" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`."
)
self._experiment = wandb.run
elif attach_id is not None and hasattr(wandb, "_attach"):
# attach to wandb process referenced
self._experiment = wandb._attach(attach_id)
else:
# create new wandb process
while True:
try:
self._experiment = wandb.init(**self._wandb_init)
break
except Exception as e:
print("wandb Exception:\n", e)
t = random.randint(30, 60)
print(f"Sleeping for {t} seconds")
time.sleep(t)
# define default x-axis
if getattr(self._experiment, "define_metric", None):
self._experiment.define_metric("trainer/global_step")
self._experiment.define_metric("*", step_metric="trainer/global_step", step_sync=True)
return self._experiment
class SequenceLightningModule(pl.LightningModule):
def __init__(self, config):
# Disable profiling executor. This reduces memory and increases speed.
try:
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
except AttributeError:
pass
super().__init__()
# Passing in config expands it one level, so can access by self.hparams.train instead of self.hparams.config.train
self.save_hyperparameters(config, logger=False)
# Dataset arguments
self.dataset = SequenceDataset.registry[self.hparams.dataset._name_](
**self.hparams.dataset
)
# Check hparams
self._check_config()
# PL has some bugs, so add hooks and make sure they're only called once
self._has_setup = False
self.setup() ## Added by KS
def setup(self, stage=None):
if not self.hparams.train.disable_dataset:
self.dataset.setup()
# We need to set up the model in setup() because for some reason when training with DDP, one GPU uses much more memory than the others
# In order to not overwrite the model multiple times during different stages, we need this hack
# TODO PL 1.5 seems to have an option to skip hooks to avoid this
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5410#issuecomment-762257024
if self._has_setup:
return
else:
self._has_setup = True
# Convenience feature: if model specifies encoder, combine it with main encoder
encoder_cfg = utils.to_list(self.hparams.encoder) + utils.to_list(
self.hparams.model.pop("encoder", None)
)
decoder_cfg = utils.to_list(
self.hparams.model.pop("decoder", None)
) + utils.to_list(self.hparams.decoder)
# Instantiate model
self.model = utils.instantiate(registry.model, self.hparams.model)
if (name := self.hparams.train.post_init_hook['_name_']) is not None:
kwargs = self.hparams.train.post_init_hook.copy()
del kwargs['_name_']
for module in self.modules():
if hasattr(module, name):
getattr(module, name)(**kwargs)
# Instantiate the task
self.task = utils.instantiate(
tasks.registry, self.hparams.task, dataset=self.dataset, model=self.model
)
# Create encoders and decoders
encoder = encoders.instantiate(
encoder_cfg, dataset=self.dataset, model=self.model
)
decoder = decoders.instantiate(
decoder_cfg, model=self.model, dataset=self.dataset
)
# Extract the modules so they show up in the top level parameter count
self.encoder = U.PassthroughSequential(self.task.encoder, encoder)
self.decoder = U.PassthroughSequential(decoder, self.task.decoder)
self.loss = self.task.loss
self.loss_val = self.task.loss
if hasattr(self.task, 'loss_val'):
self.loss_val = self.task.loss_val
self.metrics = self.task.metrics
self.train_torchmetrics = self.task.train_torchmetrics
self.val_torchmetrics = self.task.val_torchmetrics
self.test_torchmetrics = self.task.test_torchmetrics
def load_state_dict(self, state_dict, strict=False):
if self.hparams.train.pretrained_model_state_hook['_name_'] is not None:
model_state_hook = utils.instantiate(
registry.model_state_hook,
self.hparams.train.pretrained_model_state_hook.copy(),
partial=True,
)
state_dict = model_state_hook(self.model, state_dict)
print("Custom load_state_dict function is running.")
# strict==True will require all modules to match
# strict==False can allow encoder/decoder to be loaded from scratch too
return super().load_state_dict(state_dict, strict=strict)
def _check_config(self):
assert self.hparams.train.state.mode in [None, "none", "null", "reset", "bptt", "tbptt"]
assert (
(n := self.hparams.train.state.n_context) is None
or isinstance(n, int)
and n >= 0
)
assert (
(n := self.hparams.train.state.n_context_eval) is None
or isinstance(n, int)
and n >= 0
)
def _initialize_state(self):
"""Called at model setup and start of epoch to completely reset state"""
self._state = None
self._memory_chunks = []
def _reset_state(self, batch, device=None):
"""Called to construct default_state when necessary, e.g. during BPTT"""
device = device or batch[0].device
self._state = self.model.default_state(*batch[0].shape[:1], device=device)
def _detach_state(self, state):
if isinstance(state, torch.Tensor):
return state.detach()
elif isinstance(state, tuple):
return tuple(self._detach_state(s) for s in state)
elif isinstance(state, list):
return [self._detach_state(s) for s in state]
elif isinstance(state, dict):
return {k: self._detach_state(v) for k, v in state.items()}
elif state is None:
return None
else:
raise NotImplementedError
def _process_state(self, batch, batch_idx, train=True):
"""Handle logic for state context."""
# Number of context steps
key = "n_context" if train else "n_context_eval"
n_context = self.hparams.train.state.get(key)
# Don't need to do anything if 0 context steps. Make sure there is no state
if n_context == 0 and self.hparams.train.state.mode not in ['tbptt']:
self._initialize_state()
return
# Reset state if needed
if self.hparams.train.state.mode == "reset":
if batch_idx % (n_context + 1) == 0:
self._reset_state(batch)
# Pass through memory chunks
elif self.hparams.train.state.mode == "bptt":
self._reset_state(batch)
with torch.no_grad(): # should be unnecessary because individual modules should handle this
for _batch in self._memory_chunks:
self.forward(_batch)
# Prepare for next step
self._memory_chunks.append(batch)
self._memory_chunks = self._memory_chunks[-n_context:]
elif self.hparams.train.state.mode == 'tbptt':
_, _, z = batch
reset = z["reset"]
if reset:
self._reset_state(batch)
else:
self._state = self._detach_state(self._state)
# def forward(self, batch):
# """Passes a batch through the encoder, backbone, and decoder"""
# # z holds arguments such as sequence length
# x, y, *z = batch # z holds extra dataloader info such as resolution
# if len(z) == 0:
# z = {}
# else:
# assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
# z = z[0]
# x, w = self.encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
# x, state = self.model(x, **w, state=self._state)
# self._state = state
# x, w = self.decoder(x, state=state, **z)
# return x, y, w
def forward(self, batch):
return self.task.forward(batch, self.encoder, self.model, self.decoder, self._state)
def step(self, x_t):
x_t, *_ = self.encoder(x_t) # Potential edge case for encoders that expect (B, L, H)?
x_t, state = self.model.step(x_t, state=self._state)
self._state = state
# x_t = x_t[:, None, ...] # Dummy length
# x_t, *_ = self.decoder(x_t, state=state)
# x_t = x_t[:, 0, ...]
x_t, *_ = self.decoder.step(x_t, state=state)
return x_t
def _shared_step(self, batch, batch_idx, prefix="train"):
self._process_state(batch, batch_idx, train=(prefix == "train"))
x, y, w = self.forward(batch)
# Loss
if prefix == 'train':
loss = self.loss(x, y, **w)
else:
loss = self.loss_val(x, y, **w)
# Metrics
metrics = self.metrics(x, y, **w)
metrics["loss"] = loss
metrics = {f"{prefix}/{k}": v for k, v in metrics.items()}
# Calculate torchmetrics
torchmetrics = getattr(self, f'{prefix}_torchmetrics')
torchmetrics(x, y, loss=loss)
log_on_step = 'eval' in self.hparams and self.hparams.eval.get('log_on_step', False) and prefix == 'train'
self.log_dict(
metrics,
on_step=log_on_step,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
# log the whole dict, otherwise lightning takes the mean to reduce it
# https://pytorch-lightning.readthedocs.io/en/stable/visualize/logging_advanced.html#enable-metrics-for-distributed-training
self.log_dict(
torchmetrics,
on_step=log_on_step,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def on_train_epoch_start(self):
# Reset training torchmetrics
self.task._reset_torchmetrics("train")
def training_epoch_end(self, outputs):
# Log training torchmetrics
super().training_epoch_end(outputs)
def on_validation_epoch_start(self):
# Reset all validation torchmetrics
for name in self.val_loader_names:
self.task._reset_torchmetrics(name)
def validation_epoch_end(self, outputs):
# Log all validation torchmetrics
super().validation_epoch_end(outputs)
def on_test_epoch_start(self):
# Reset all test torchmetrics
for name in self.test_loader_names:
self.task._reset_torchmetrics(name)
def test_epoch_end(self, outputs):
# Log all test torchmetrics
super().test_epoch_end(outputs)
def training_step(self, batch, batch_idx, dataloader_idx=0):
loss = self._shared_step(batch, batch_idx, prefix="train")
# Log the loss explicitly so it shows up in WandB
# Note that this currently runs into a bug in the progress bar with ddp (as of 1.4.6)
# https://github.com/PyTorchLightning/pytorch-lightning/pull/9142
# We additionally log the epochs under 'trainer' to get a consistent prefix with 'global_step'
loss_epoch = {"trainer/loss": loss, "trainer/epoch": self.current_epoch}
self.log_dict(
loss_epoch,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
# Log any extra info that the models want to expose (e.g. output norms)
metrics = {}
for module in list(self.modules())[1:]:
if hasattr(module, "metrics"):
metrics.update(module.metrics)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def validation_step(self, batch, batch_idx, dataloader_idx=0):
ema = (
self.val_loader_names[dataloader_idx].endswith("/ema")
and self.optimizers().optimizer.stepped
) # There's a bit of an annoying edge case with the first (0-th) epoch; it has to be excluded due to the initial sanity check
if ema:
self.optimizers().swap_ema()
loss = self._shared_step(
batch, batch_idx, prefix=self.val_loader_names[dataloader_idx]
)
if ema:
self.optimizers().swap_ema()
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self._shared_step(
batch, batch_idx, prefix=self.test_loader_names[dataloader_idx]
)
def configure_optimizers(self):
# Set zero weight decay for some params
if 'optimizer_param_grouping' in self.hparams.train:
add_optimizer_hooks(self.model, **self.hparams.train.optimizer_param_grouping)
# Normal parameters
all_params = list(self.parameters())
params = [p for p in all_params if not hasattr(p, "_optim")]
optimizer = utils.instantiate(registry.optimizer, self.hparams.optimizer, params)
del self.hparams.optimizer._name_
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_params if hasattr(p, "_optim")]
hps = [
# dict(s) for s in set(frozenset(hp.items()) for hp in hps)
dict(s) for s in sorted(list(dict.fromkeys(frozenset(hp.items()) for hp in hps)))
# dict(s) for s in dict.fromkeys(frozenset(hp.items()) for hp in hps)
] # Unique dicts
print("Hyperparameter groups", hps)
for hp in hps:
params = [p for p in all_params if getattr(p, "_optim", None) == hp]
optimizer.add_param_group(
{"params": params, **self.hparams.optimizer, **hp}
)
### Layer Decay ###
if self.hparams.train.layer_decay['_name_'] is not None:
get_num_layer = utils.instantiate(
registry.layer_decay,
self.hparams.train.layer_decay['_name_'],
partial=True,
)
# Go through all parameters and get num layer
layer_wise_groups = {}
num_max_layers = 0
for name, p in self.named_parameters():
# Get layer id for each parameter in the model
layer_id = get_num_layer(name)
# Add to layer wise group
if layer_id not in layer_wise_groups:
layer_wise_groups[layer_id] = {
'params': [],
'lr': None,
'weight_decay': self.hparams.optimizer.weight_decay
}
layer_wise_groups[layer_id]['params'].append(p)
if layer_id > num_max_layers: num_max_layers = layer_id
# Update lr for each layer
for layer_id, group in layer_wise_groups.items():
group['lr'] = self.hparams.optimizer.lr * (self.hparams.train.layer_decay.decay ** (num_max_layers - layer_id))
# Reset the torch optimizer's param groups
optimizer.param_groups = []
for layer_id, group in layer_wise_groups.items():
optimizer.add_param_group(group)
# Print optimizer info for debugging
keys = set([k for hp in hps for k in hp.keys()]) # Special hparams
utils.train.log_optimizer(log, optimizer, keys)
# Configure scheduler
if "scheduler" not in self.hparams:
return optimizer
lr_scheduler = utils.instantiate(
registry.scheduler, self.hparams.scheduler, optimizer
)
scheduler = {
"scheduler": lr_scheduler,
"interval": self.hparams.train.interval, # 'epoch' or 'step'
"monitor": self.hparams.train.monitor,
"name": "trainer/lr", # default is e.g. 'lr-AdamW'
}
# See documentation for how to configure the return
# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.core.lightning.html#pytorch_lightning.core.lightning.LightningModule.configure_optimizers
return [optimizer], [scheduler]
def train_dataloader(self):
return self.dataset.train_dataloader(**self.hparams.loader)
def _eval_dataloaders_names(self, loaders, prefix):
"""Process loaders into a list of names and loaders"""
if utils.is_dict(loaders):
return [
f"{prefix}/{k}" if k is not None else prefix for k in loaders.keys()
], list(loaders.values())
elif utils.is_list(loaders):
return [f"{prefix}/{i}" for i in range(len(loaders))], loaders
else:
return [prefix], [loaders]
def _eval_dataloaders(self):
# Return all val + test loaders
val_loaders = self.dataset.val_dataloader(**self.hparams.loader)
test_loaders = self.dataset.test_dataloader(**self.hparams.loader)
val_loader_names, val_loaders = self._eval_dataloaders_names(val_loaders, "val")
test_loader_names, test_loaders = self._eval_dataloaders_names(
test_loaders, "test"
)
# Duplicate datasets for ema
if self.hparams.train.ema > 0.0:
val_loader_names += [name + "/ema" for name in val_loader_names]
val_loaders = val_loaders + val_loaders
test_loader_names += [name + "/ema" for name in test_loader_names]
test_loaders = test_loaders + test_loaders
# adding option to only have val loader at eval (eg if test is duplicate)
if self.hparams.train.get("remove_test_loader_in_eval", False):
return val_loader_names, val_loaders
# adding option to only have test loader at eval
elif self.hparams.train.get("remove_val_loader_in_eval", False):
return test_loader_names, test_loaders
# default behavior is to add test loaders in eval
else:
return val_loader_names + test_loader_names, val_loaders + test_loaders
def val_dataloader(self):
val_loader_names, val_loaders = self._eval_dataloaders()
self.val_loader_names = val_loader_names
return val_loaders
def test_dataloader(self):
test_loader_names, test_loaders = self._eval_dataloaders()
self.test_loader_names = ["final/" + name for name in test_loader_names]
return test_loaders
### pytorch-lightning utils and entrypoint ###
def create_trainer(config, **kwargs):
callbacks: List[pl.Callback] = []
logger = None
# WandB Logging
if config.get("wandb") is not None:
# Pass in wandb.init(config=) argument to get the nice 'x.y.0.z' hparams logged
# Can pass in config_exclude_keys='wandb' to remove certain groups
import wandb
logger = CustomWandbLogger(
config=utils.to_dict(config, recursive=True),
settings=wandb.Settings(start_method="fork"),
**config.wandb,
)
# Lightning callbacks
if "callbacks" in config:
for _name_, callback in config.callbacks.items():
if config.get("wandb") is None and _name_ in ["learning_rate_monitor"]:
continue
log.info(f"Instantiating callback <{registry.callbacks[_name_]}>")
callback._name_ = _name_
callbacks.append(utils.instantiate(registry.callbacks, callback))
# Add ProgressiveResizing callback
if config.callbacks.get("progressive_resizing", None) is not None:
num_stages = len(config.callbacks.progressive_resizing.stage_params)
print(f"Progressive Resizing: {num_stages} stages")
for i, e in enumerate(config.callbacks.progressive_resizing.stage_params):
# Stage params are resolution and epochs, pretty print
print(f"\tStage {i}: {e['resolution']} @ {e['epochs']} epochs")
# Configure ddp automatically
n_devices = config.trainer.get('devices', 1)
if isinstance(n_devices, Sequence): # trainer.devices could be [1, 3] for example
n_devices = len(n_devices)
if n_devices > 1 and config.trainer.get('strategy', None) is None:
config.trainer.strategy = dict(
_target_='pytorch_lightning.strategies.DDPStrategy',
find_unused_parameters=False,
gradient_as_bucket_view=True, # https://pytorch-lightning.readthedocs.io/en/stable/advanced/advanced_gpu.html#ddp-optimizations
)
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
# special processing for seqlen warmup scheduler (reload)
if config.callbacks.get("seqlen_warmup_reload", None) is not None:
# we need to instantiate manually instead of with hydra, since it expects a dict instead of a hydra config for the accumulate_grad_batches
# so we convert everything to dicts (from hydra configs)
trainer_config_dict = dict(config.trainer)
epochs_cume = 0 # track cumulative epochs
accumulate_grad_schedule = {} # contains the accumulate_grad_batches schedule to init the trainer
for stage in config.callbacks.seqlen_warmup_reload.stage_params:
batch_size = stage['batch_size'] # curr batch size at this stage
grad_accum_factor = config.train.global_batch_size // batch_size # grad accum factor for this stage
accumulate_grad_schedule[epochs_cume] = grad_accum_factor # set the grad accum factor for this stage
epochs_cume += stage['epochs'] # increment epochs_cume for next stage
trainer_config_dict['accumulate_grad_batches'] = accumulate_grad_schedule # set the accumulate_grad_batches schedule
trainer_config_dict.pop('_target_') # only hydra uses this to instantiate
# Set DDPStrategy to work with pl.Trainer
config.trainer.pop('strategy')
trainer_config_dict['strategy'] = DDPStrategy(find_unused_parameters=False, gradient_as_bucket_view=True)
trainer = pl.Trainer(**trainer_config_dict, callbacks=callbacks, logger=logger)
else:
trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger)
return trainer
def train(config):
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
trainer = create_trainer(config)
model = SequenceLightningModule(config)
# Load pretrained_model if specified
if config.train.get("pretrained_model_path", None) is not None:
# PTL style. Note, method returns a new model object, and need to pass config.
model = SequenceLightningModule.load_from_checkpoint(
config.train.pretrained_model_path,
config=config,
strict=config.train.pretrained_model_strict_load,
)
# Run initial validation epoch (useful for debugging, finetuning)
if config.train.validate_at_start:
print("Running validation before training")
trainer.validate(model)
if config.train.ckpt is not None:
trainer.fit(model, ckpt_path=config.train.ckpt)
else:
trainer.fit(model)
if config.train.test:
trainer.test(model)
@hydra.main(config_path="configs", config_name="config.yaml")
def main(config: OmegaConf):
# Process config:
# - register evaluation resolver
# - filter out keys used only for interpolation
# - optional hooks, including disabling python warnings or debug friendly configuration
config = utils.train.process_config(config)
# Pretty print config using Rich library
utils.train.print_config(config, resolve=True)
train(config)
if __name__ == "__main__":
main() | hyena-dna-main | train.py |
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
def fftconv_ref(u, k, D, dropout_mask):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
return (F.gelu(out) * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
def fftconv_fast(u, k, D, dropout_mask):
"""Fuse padding + rfft + pointwise mult + ifft + multiply with D + gelu + dropout
"""
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size)
out = fftconv_fwd(u, k_f, D, dropout_mask, fft_size)
return out
def fftconv_fast_bwd(dout, u, k, D, dropout_mask=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size)
dx, dk_f, dD = fftconv_bwd(dout, u, k_f, D, dropout_mask, fft_size)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
return dx, dk, dD
device = 'cuda'
dtype = torch.float32
# dtype = torch.float16
batch_size = 64
H = 256
fft_size = 2048
seqlen = 1024
dropout_prob = 0.37
torch.manual_seed(0)
u = torch.randn(batch_size, H, seqlen, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(H, seqlen, device=device, requires_grad=True)
D = torch.randn(H, device=device, requires_grad=True)
dropout_mask = F.dropout(torch.ones(batch_size, H, device=device), dropout_prob)
out = fftconv_ref(u, k, D, dropout_mask)
out = fftconv_fast(u, k, D, dropout_mask)
g = torch.randn_like(out)
fftconv_fast_bwd(g, u, k, D, dropout_mask)
| hyena-dna-main | csrc/fftconv/launch_fftconv.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("fftconv")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
'fftconv', [
'fftconv.cpp',
'fftconv_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': ['-O3', '--threads', '4', '-lineinfo', '--use_fast_math', '-std=c++17', '-arch=compute_70']
# extra_compile_args={'cxx': ['-O3'],
# 'nvcc': append_nvcc_threads(['-O3', '-lineinfo', '--use_fast_math', '-std=c++17'] + cc_flag)
},
include_dirs=[os.path.join(this_dir, 'mathdx/22.02/include')]
)
)
torch.utils.cpp_extension.COMMON_NVCC_FLAGS.remove('-D__CUDA_NO_HALF2_OPERATORS__')
setup(
name="fftconv",
version="0.1",
description="FFTConv for state-space models",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| hyena-dna-main | csrc/fftconv/setup.py |
import math
import re
import numpy as np
# N = 8192
N = 16384
# The case of 0 / N is special, we want to simplify it to 0 / 2 instead of 0 / 1
numerator = np.arange(1, N // 8 + 1)
gcd = np.gcd(numerator, N)
num = numerator // gcd
denom = N // gcd
lut_vals = ['T_2_0'] + [f'T_{d}_{n}' for n, d in zip(num, denom)]
lut_string = f"static const __device__ float2 lut_mine_sp_8_{N}[{N // 8 + 1}] = {{\n {','.join(lut_vals)}\n}};"
print(lut_string)
# Only define new values if it's not already in the cuFFTDx lookup table
cufftdx_lut_filename = 'mathdx/22.02/include/cufftdx/include/database/lut_defines_0.hpp.inc'
matches = set()
reg = re.compile(f'^#define T_{N}_([0-9]+) ')
with open(cufftdx_lut_filename, 'r') as f:
for line in f:
if (match := reg.match(line)) is not None:
matches.add(int(match[1]))
numerator = np.arange(1, N // 8 + 1, 2)
angle = -2 * math.pi * numerator.astype(np.float64) / N
cos, sin = np.cos(angle), np.sin(angle)
defs = [f'#define T_{N}_{n} {{{c:.40f},{s:.40f}}}' for n, c, s in zip(numerator, cos, sin) if n not in matches]
def_string = '\n'.join(defs)
print(def_string)
| hyena-dna-main | csrc/fftconv/lut_code_gen.py |
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional as F
import pytorch_lightning as pl
from einops import rearrange, repeat
import sys, os
FILEDIR = os.path.realpath(__file__)
sys.path.append(os.path.join(FILEDIR, '..'))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from src.dataloaders.icl_genomics_dataloader import ICLGenomics
from src.dataloaders.genomics import ICLGenomics
def exists(x):
return x is not None
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def soft_prompting():
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt_path", help="Path to pretrained model checkpoint")
parser.add_argument("--dataset", default='none')
parser.add_argument("--config", default='./configs/evals/soft_prompting_genomics.yaml')
parser.add_argument("--results", default='./results/soft_prompting')
args = parser.parse_args()
os.makedirs(args.results, exist_ok=True)
# load configs
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
cfg_model = config['model'].copy()
cfg_dataset = config['dataset'].copy()
cfg_tuning = config['tuning'].copy()
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
rng = np.random.RandomState(config['seed'])
# dataset_name num_seqs num_classes median_len std
# dummy_mouse_enhancers_ensembl 1210 2 2381 984.4
# demo_coding_vs_intergenomic_seqs 100_000 2 200 0
# demo_human_or_worm 100_000 2 200 0
# human_enhancers_cohn 27791 2 500 0
# human_enhancers_ensembl 154842 2 269 122.6
# human_ensembl_regulatory 289061 3 401 184.3
# human_nontata_promoters 36131 2 251 0
# human_ocr_ensembl 174756 2 315 108.1
# chrom_names = [
# 'chr11', 'chr13', 'chr15', 'chr17', 'chr19', 'chr21', 'chr2', 'chr4', 'chr6', 'chr8', 'chr10', 'chr12',
# 'chr14', 'chr16', 'chr18', 'chr20', 'chr22', 'chrX', 'chrY', 'chr1', 'chr3', 'chr5', 'chr7', 'chr9'
# ]
nuc_chars = list('ACGTN')
characters = nuc_chars # + chrom_names
label_to_token = {0: 'A', 1: 'N'}
datasets = {
'dummy_mouse_enhancers_ensembl': {
'max_length': 3200,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
# 'demo_coding_vs_intergenomic_seqs': {
# 'max_length': 202,
# 'd_output': 2,
# 'characters': characters,
# 'label_to_token': label_to_token
# },
# 'demo_human_or_worm': {
# 'max_length': 202,
# 'd_output': 2,
# 'characters': characters,
# 'label_to_token': label_to_token,
# },
'human_enhancers_cohn': {
'max_length': 502,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_nontata_promoters': {
'max_length': 251, #253
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_enhancers_ensembl': {
'max_length': 320,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_ensembl_regulatory': {
'max_length': 600,
'd_output': 3,
'characters': characters,
'label_to_token': {0: 'A', 1: 'G', 2: 'N'},
},
'human_ocr_ensembl': {
'max_length': 420,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
}
}
df_results = []
df_i = 0
ds_iter = datasets.items() if args.dataset=='none' else zip([args.dataset], [datasets[args.dataset]])
for dataset, dataset_cfg in ds_iter:
print(f'\nDataset {dataset}...')
for shots in cfg_dataset['shots']:
print(f'...with {shots} shots...')
cfg = cfg_dataset.copy()
cfg.update(dataset_cfg)
cfg['dataset_name'] = dataset
cfg['shots'] = shots
loader = ICLGenomics(**cfg)
loader.setup()
for soft_tokens in cfg_tuning['soft_tokens']:
print(f'...and {soft_tokens} soft tokens...')
# print('Pretrained model...')
pretrained_model = load_model(
cfg_model=cfg_model,
ckpt_path=args.ckpt_path,
n_soft_tokens=soft_tokens,
soft_token_pdrop=cfg_tuning['soft_token_pdrop'],
max_length=cfg['max_length'] if shots>0 else None
)
pretrained_model.to(DEVICE)
if soft_tokens>0: # we only tune when using soft tokens!
print('...tuning...')
pretrained_model = tune_model(
pretrained_model, #deepcopy(pretrained_model).to(DEVICE),
loader,
cfg_tuning,
rng=rng
)
print('...evaluating...')
acc = eval_on_loaders(pretrained_model, {dataset: loader})[dataset]
df_results.append(
pd.DataFrame({
'dataset': dataset,
'model': 'pretrained',
'shots': shots,
'soft_tokens': soft_tokens,
'eval_acc': acc
}, index=[df_i])
)
df_i += 1
pd.concat(df_results).to_csv(
os.path.join(
args.results,
f'soft_prompting_performance_{dataset}.csv'
)
)
del pretrained_model
def load_model(
cfg_model: tp.Dict,
ckpt_path: str=None,
n_soft_tokens: int=0,
soft_token_pdrop: float=0.,
max_length: int=None
):
model = ConvLMHeadModel(**cfg_model)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(model_state_dict)
return LitModel(model, n_soft_tokens=n_soft_tokens, soft_token_pdrop=soft_token_pdrop, max_length=max_length)
class LitModel(pl.LightningModule):
def __init__(self,
model,
n_soft_tokens: int=0,
soft_token_pdrop: float=0.,
max_length: int=None
):
super().__init__()
self.model = model
requires_grad(self.model, False) # we only want to train soft tokens
self.max_length = max_length
d_model = self.model.lm_head.weight.shape[1]
self.n_soft_tokens = n_soft_tokens
soft_tokens = torch.nn.Parameter(torch.zeros(n_soft_tokens, d_model)) if n_soft_tokens>0 else None
if exists(soft_tokens):
torch.nn.init.normal_(soft_tokens, mean=0.0, std=0.02)
self.soft_tokens = soft_tokens
self.soft_tokens_drop = torch.nn.Dropout(soft_token_pdrop) if soft_token_pdrop>0 else torch.nn.Identity()
def forward(self, x: torch.Tensor):
# get embeddings
with torch.no_grad():
hidden_states = self.model.backbone.embeddings(x)
# attach soft tokens
if exists(self.soft_tokens):
hidden_states = torch.cat([
repeat(self.soft_tokens_drop(self.soft_tokens), 'n d -> b n d', b=hidden_states.shape[0]),
hidden_states
], dim=1)
# forward
residual = None
for layer in self.model.backbone.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.model.backbone.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.model.backbone.ln_f(residual.to(dtype=self.model.backbone.ln_f.weight.dtype))
return self.model.lm_head(hidden_states)
def step(self, batch: tp.Tuple[torch.Tensor], phase: str='train'):
# get ys
x, y = batch['x'].to(DEVICE), batch['y'].to(DEVICE)
labels_idx = x.shape[1]-1
if exists(self.max_length):
x = torch.cat([x, y], dim=1)
labels_idx = self.get_labels_idx(x)
y = x[:,labels_idx]
# forward
logits = self(x)
logits = logits[:,self.n_soft_tokens:] # we exclude soft tokens
logits = logits[:,labels_idx-1] # previous token predicts target
if logits.ndim>2:
logits = rearrange(logits, 'b n c -> (b n) c')
if y.ndim==2:
y = rearrange(y, 'b n -> (b n)')
# compute loss/acc
loss = F.cross_entropy(logits, y)
preds = logits.argmax(axis=-1)
acc = torch.mean((preds==y).to(torch.float32))
return {'loss': loss, 'acc': acc}
def get_labels_idx(self, x):
return np.concatenate([
[self.max_length+1],
np.arange((2*self.max_length)+4, x.shape[1], self.max_length+3)
])
def tune_model(model, loader, cfg_tuning, verbose: bool=True, rng: np.random.RandomState=None):
rng = np.random.RandomState(0) if rng is None else rng
optimizer = torch.optim.AdamW(
model.parameters(),
weight_decay=float(cfg_tuning['weight_decay']),
lr=float(cfg_tuning['lr'])
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='min',
factor=0.1,
patience=0
)
best_model = deepcopy(model)
requires_grad(best_model, False)
step = 0
losses, accs, val_losses = [], [], []
for epoch in range(cfg_tuning['max_epochs']):
if verbose:
print(f'Epoch {epoch}...')
# train epoch:
model.train()
for i, (x,y) in enumerate(loader.train_dataloader()):
batch = {'x': x, 'y': y}
model.on_train_batch_start(batch=batch, batch_idx=step)
with torch.cuda.amp.autocast():
out = model.step(batch)
loss, acc = out['loss'], out['acc']
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg_tuning.get('gradient_clip_val', 1.0))
losses.append(loss.cpu().detach().numpy().mean())
accs.append(acc.cpu().detach().numpy())
# accumulate gradients of N batches
if (i + 1) % cfg_tuning['accumulate_grad_batches'] == 0:
optimizer.step()
optimizer.zero_grad()
# update_ema(ema, model, decay=cfg_tuning['ema_decay'])
step += 1
# eval epoch:
model.eval()
val_loss = []
with torch.no_grad():
for x, y in loader.val_dataloader():
batch = {'x': x, 'y': y}
model.on_train_batch_start(batch=batch, batch_idx=step)
out = model.step(batch)
loss, acc = out['loss'], out['acc']
val_loss.append(loss.cpu().detach().numpy())
val_losses.append(np.mean(val_loss))
if val_losses[-1]==np.min(val_losses): # also covers first epoch
update_ema(best_model, model, decay=0)
scheduler.step(val_losses[-1])
if verbose:
print(f'\tstep {step}; avg. val loss: {val_losses[-1]:1.4f}')
if (epoch > 0 and sum(val_losses[-1] >= val_losses[:-1])>1) or (epoch+1)>=cfg_tuning['max_epochs']:
break
best_model = best_model.to(DEVICE)
requires_grad(best_model, True) # we turn grads back on for completion, even though model will not be trained further...
return best_model #, ema
@torch.no_grad()
def update_ema(ema_model, model, decay=0.999):
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def eval_on_loaders(model, loaders):
results = {}
for name, loader in loaders.items():
print(f'Evaluating on {name} data...')
all_acc = []
val_loader = loader.val_dataloader()
for x,y in tqdm(val_loader):
x = x.to(DEVICE)
with torch.no_grad():
logits = model(x)
logits = logits[:, -1]
logits = logits.cpu().detach().numpy()
batch_preds = logits.argmax(axis=-1)
# batch_preds = np.array(batch_preds)
y = y.cpu().detach().numpy()
batch_preds = batch_preds.flatten()
y = y.flatten()
acc = (batch_preds == y).mean()
all_acc.append(acc)
results[name] = np.mean(all_acc)
print(f"{name}; full eval. accuracy: {results[name]:1.4f}")
return results
if __name__ == "__main__":
soft_prompting() | hyena-dna-main | evals/soft_prompting_genomics.py |
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional as F
import pytorch_lightning as pl
from einops import rearrange
import sys, os
FILEDIR = os.path.realpath(__file__)
sys.path.append(os.path.join(FILEDIR, '..'))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from src.dataloaders.icl_genomics_dataloader import ICLGenomics
from src.dataloaders.genomics import ICLGenomics
# TODO:
# Make use of maximum long context: either put entire downstream dataset in context
# or add many tunable soft tokens (soft prompting)!
# -> just fill the context up one way or another and show whats possible!
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def instruction_tuned_ICL():
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt_path", help="Path to pretrained model checkpoint")
parser.add_argument("--config", default='./configs/evals/instruction_tuned_genomics.yaml')
parser.add_argument("--results", default='./results/instruction_tuned_genomics')
args = parser.parse_args()
os.makedirs(args.results, exist_ok=True)
# load configs
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
cfg_model = config['model'].copy()
cfg_dataset = config['dataset'].copy()
cfg_tuning = config['tuning'].copy()
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
rng = np.random.RandomState(config['seed'])
# dataset_name num_seqs num_classes median_len std
# dummy_mouse_enhancers_ensembl 1210 2 2381 984.4
# demo_coding_vs_intergenomic_seqs 100_000 2 200 0
# demo_human_or_worm 100_000 2 200 0
# human_enhancers_cohn 27791 2 500 0
# human_enhancers_ensembl 154842 2 269 122.6
# human_ensembl_regulatory 289061 3 401 184.3
# human_nontata_promoters 36131 2 251 0
# human_ocr_ensembl 174756 2 315 108.1
nuc_chars = list('ACGTN')
characters = nuc_chars # + chrom_names
label_to_token = {0: 'A', 1: 'N'}
datasets = {
'human_enhancers_cohn': {
'max_length': 502,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_nontata_promoters': {
'max_length': 251, #253
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_enhancers_ensembl': {
'max_length': 320,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_ensembl_regulatory': {
'max_length': 600,
'd_output': 3,
'characters': characters,
'label_to_token': {0: 'A', 1: 'G', 2: 'N'},
},
'human_ocr_ensembl': {
'max_length': 420,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
}
}
print('\n\nEvaluating instruction-tuned ICL performance... ')
df_results = []
df_i = 0
for tuning_samples in cfg_tuning['tuning_samples']:
print(f'...when tuning on {tuning_samples} samples...')
for shots in cfg_dataset['shots']:
print(f'...with {shots} shots...')
for dataset, dataset_cfg in datasets.items():
print(f'...from dataset {dataset}...')
print(f'Collecting tuning data...')
cfg = cfg_dataset.copy()
cfg.update(dataset_cfg)
cfg['dataset_name'] = dataset
cfg['shots'] = shots
loader = ICLGenomics(**cfg)
loader.setup()
# collect tuning samples
tuning_X = []
train_loader = iter(loader.train_dataloader())
samples_collected = 0
for x, y in tqdm(train_loader):
n = min(tuning_samples, x.shape[0])
tuning_X.append(torch.cat([x[:n], y[:n]], dim=1))
samples_collected += n
if samples_collected >= tuning_samples:
print(f'...stop becuase {tuning_samples} samples collected.')
break
tuning_X = torch.cat(tuning_X, dim=0)
if shots>0:
tuning_y_idx = np.concatenate([
[cfg['max_length']+1],
np.arange((2*cfg['max_length'])+4, tuning_X.shape[1], cfg['max_length']+3)
])
else:
tuning_y_idx = cfg['max_length']+1
tuning_y = tuning_X[:,tuning_y_idx]
tuning_loss_mask = tuning_y_idx-1 # prediction is always from previous token
print('Tuning pretrained model...')
pretrained_model = load_model(cfg_model, args.ckpt_path)
pretrained_model.to(DEVICE)
tuned_pretrained_model = tune_model(
deepcopy(pretrained_model).to(DEVICE),
tuning_X,
tuning_y,
cfg_tuning,
loss_mask=tuning_loss_mask,
rng=rng
)
# print('Tuning untrained model...')
# scratch_model = load_model(cfg_model)
# scratch_model.to(DEVICE)
# tuned_scratch_model = tune_model(
# scratch_model,
# tuning_X,
# tuning_y,
# cfg_tuning,
# loss_mask=tuning_loss_mask,
# rng=rng
# )
print('Evaluating ICL performance...')
for label, model in zip(
['tuned_pretrained'], #, 'scratchtrained'
[tuned_pretrained_model] # tuned_scratch_model
):
print(f'{label}:')
acc = eval_on_loaders(model, {dataset: loader})[dataset]
df_results.append(
pd.DataFrame({
'dataset': dataset,
'tuning_samples': tuning_samples,
'model': label,
'shots': shots,
'eval_acc': acc
}, index=[df_i])
)
df_i += 1
pd.concat(df_results).to_csv(
os.path.join(args.results, 'instruction_tuned_genomics.csv')
)
def load_model(cfg_model, ckpt_path: str=None):
model = ConvLMHeadModel(**cfg_model)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(model_state_dict)
return LitModel(model)
class LitModel(pl.LightningModule):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x: torch.Tensor):
return self.model(x)[0]
def step(self, batch: tp.Tuple[torch.Tensor], loss_mask: tp.Union[int, np.ndarray]=-1, phase: str='train'):
x, y = batch['x'].to(DEVICE), batch['y'].to(DEVICE)
loss_mask = -1 if loss_mask is None else loss_mask
out = self(x)
logits = out.logits[:,loss_mask]
if logits.ndim>2:
logits = rearrange(logits, 'b n c -> (b n) c')
if y.ndim==2:
y = rearrange(y, 'b n -> (b n)')
loss = F.cross_entropy(logits, y)
preds = logits.argmax(axis=-1)
acc = torch.mean((preds==y).to(torch.float32))
return {'loss': loss, 'acc': acc}
def tune_model(model, X, y, cfg_tuning, max_epochs: int=1, loss_mask=None, verbose: bool=True, rng: np.random.RandomState=None):
rng = np.random.RandomState(0) if rng is None else rng
# # we use expected moving average of model for downstream ICL...
# ema = deepcopy(model).to(DEVICE)
# requires_grad(ema, False)
# update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights
# ema.eval()
optimizer = torch.optim.AdamW(
model.parameters(),
weight_decay=float(cfg_tuning['weight_decay']),
lr=float(cfg_tuning['lr'])
)
# split train/eval
n_samples = X.shape[0]
train_idx = np.arange(n_samples)
batch_size = min(len(train_idx), cfg_tuning['batch_size'])
epoch = 0
step = 0
losses, accs = [], []
stop_training = False
while not stop_training:
if verbose:
print(f'Epoch {epoch}...')
# train epoch:
model.train()
rng.shuffle(train_idx)
batch_i, batch_start = 0, 0
while batch_start+batch_size <= len(train_idx):
idx = train_idx[batch_start:batch_start+batch_size]
batch = {'x': X[idx], 'y': y[idx]}
model.on_train_batch_start(batch=batch, batch_idx=step)
out = model.step(batch, loss_mask=loss_mask)
loss, acc = out['loss'], out['acc']
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg_tuning.get('gradient_clip_val', 1.0))
losses.append(loss.cpu().detach().numpy().mean())
accs.append(acc.cpu().detach().numpy())
# accumulate gradients of N batches
if (batch_i + 1) % cfg_tuning['accumulate_grad_batches'] == 0:
optimizer.step()
optimizer.zero_grad()
# update_ema(ema, model, decay=cfg_tuning['ema_decay'])
step += 1
print(f'step: {step}; train loss: {losses[-1]}, acc: {accs[-1]}')
batch_start += batch_size
batch_i += 1
epoch += 1
if epoch>=max_epochs:
stop_training = True
return model #, ema
@torch.no_grad()
def update_ema(ema_model, model, decay=0.999):
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def eval_on_loaders(model, loaders):
results = {}
for name, loader in loaders.items():
print(f'Evaluating on {name} data...')
all_acc = []
val_loader = loader.val_dataloader()
for batch in tqdm(val_loader):
x, y = batch
x = x.to(DEVICE)
with torch.no_grad():
out = model(x)
if type(out) == tuple: out = out[0]
logits = out.logits[:, -1]
logits = logits.cpu().detach().numpy()
batch_preds = logits.argmax(axis=-1)
# batch_preds = np.array(batch_preds)
y = y.cpu().detach().numpy()
batch_preds = batch_preds.flatten()
y = y.flatten()
acc = (batch_preds == y).mean()
all_acc.append(acc)
results[name] = np.mean(all_acc)
print(f"{name}; full eval. accuracy: {results[name]:1.4f}")
return results
if __name__ == "__main__":
instruction_tuned_ICL() | hyena-dna-main | evals/instruction_tuned_genomics.py |
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
from src.models.sequence.long_conv_lm import DNAEmbeddingModel
from src.tasks.decoders import SequenceDecoder
from src.dataloaders import SequenceDataset
import numpy as np
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from src.dataloaders.genomic_bench_dataloader import GenomicBenchmark
from src.dataloaders.nucleotide_transformer_dataloader import NucleotideTransformer
try:
from tokenizers import Tokenizer
except:
pass
genomic_benchmark_datasets = ["dummy_mouse_enhancers_ensembl", "demo_coding_vs_intergenomic_seqs", "demo_human_or_worm", "human_enhancers_cohn", "human_enhancers_ensembl", "human_ensembl_regulatory", "human_nontata_promoters", "human_ocr_ensembl"]
nucleotide_datasets = [""]
class HG38Inference:
'''Model (backbone + decoder) inference, initially for enhancer model, but can be modified for other classification tasks as well.
model_cfg, dict: config for entire model, backbone and decoder head
ckpt_path, str: path to config
max_seq_len, int: max seq len of model (technically in the model_cfg already, but more explicit)
'''
def __init__(self, cfg, ckpt_path, max_seq_len, use_dataloader=False):
self.max_seq_len = max_seq_len
self.backbone, self.decoder, self.tokenizer = self.load_model(cfg, ckpt_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.backbone = self.backbone.to(self.device)
self.decoder = self.decoder.to(self.device)
# load dataloader if given
if use_dataloader:
self.loader = self.get_dataloader(cfg)
def get_dataloader(self, config):
cfg = yaml.load(open(config, 'r'), Loader=yaml.FullLoader)
dataset_name = cfg['dataset']["dataset_name"]
if dataset_name in genomic_benchmark_datasets:
loader = GenomicBenchmark(**cfg['dataset'])
else:
# assume the rest are in the nucleotide trans datasets
loader = NucleotideTransformer(**cfg['dataset'])
loader.setup()
return loader
def predict_on_list(self, seqs):
"""
makes predictions just given a list of string sequences, handles all the tokenizers, and tensor conversion
"""
preds = []
# sample code to loop thru each sample and tokenize first (char level)
for seq in tqdm(seqs):
if isinstance(self.tokenizer, Tokenizer):
seq = self.tokenizer.encode(seq).ids
else:
seq = self.tokenizer.encode(seq)
# can accept a batch, shape [B, seq_len, hidden_dim]
embeddings, _ = self.backbone(torch.tensor([seq]).to(device=self.device))
pred = self.decoder(embeddings)
preds.append(pred)
# we provide the predictions (you can pass back embeddings if you wish)
return preds
def predict_from_loader(self):
"""
Don't forget this returns a list of the labels too with the predictions
"""
all_preds = []
all_labels = []
# by default we'll use the test dataloader, but you can grab val_dataloader or train_dataloader too
for i, batch in enumerate(self.loader.test_dataloader()):
print('batch {}'.format(i))
x, y = batch
x = x.to(self.device)
# y = y.to(self.device)
# save the labels y
all_labels.append(y.cpu().detach().numpy())
embeddings, _ = self.backbone(x)
pred_batch = self.decoder(embeddings)
# take argmax of the predictions
pred_batch = torch.argmax(pred_batch, dim=1)
all_preds.append(pred_batch.cpu().detach().numpy())
# convert list to tensor
all_preds = np.concatenate(all_preds, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_preds, all_labels
def load_model(self, cfg, ckpt_path):
# get the configs
cfg = yaml.load(open(cfg, 'r'), Loader=yaml.FullLoader)
train_cfg = cfg['train'] # grab section `train` section of config
model_cfg = cfg['model'] # grab the `model` section of config
self.d_output = train_cfg['d_output'] # number of classes the head was trained on
# the state dict has both the backbone model and the decoder (normally as a Lightning module), but we need to instantiate both separately
# when not using Lightning.
# instantiate the model
backbone = DNAEmbeddingModel(**model_cfg) # instantiate the backbone separately from the decoder
# instantiate the decoder
decoder = SequenceDecoder(model_cfg['d_model'], d_output=self.d_output, l_output=0, mode='pool') # needs to know the d_model
state_dict = torch.load(ckpt_path, map_location='cpu') # has both backbone and decoder
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
# the state_dict keys slightly mismatch from Lightning..., so we fix it here
decoder_state_dict = {}
decoder_state_dict['output_transform.weight'] = model_state_dict.pop('decoder.0.output_transform.weight')
decoder_state_dict['output_transform.bias'] = model_state_dict.pop('decoder.0.output_transform.bias')
# now actually load the state dict to the decoder and backbone separately
decoder.load_state_dict(decoder_state_dict, strict=True)
backbone.load_state_dict(model_state_dict, strict=True)
# setup tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_seq_len + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
return backbone, decoder, tokenizer
if __name__ == "__main__":
"""
Example cmd for loading a pretrained model (that was finedtuned). This checkpoint was trained on the 'human_nontata_promoters path' dataset.
# (from safari-internal-inf root, note the -m and no '.py')
python -m evals.hg38_inference_decoder --config /home/workspace/eric/safari-internal/configs/evals/hg38_decoder.yaml \
--ckpt_path /home/workspace/eric/safari-internal/outputs/2023-04-14/04-32-17-578382/checkpoints/val/accuracy.ckpt
# enhancer (genomic benchmark)
python -m evals.hg38_inference_decoder --config /home/workspace/eric/safari-internal/configs/evals/hg38_decoder.yaml \
--ckpt_path /home/workspace/eric/safari-internal/outputs/2023-04-12/23-40-51-542457/checkpoints/val/mcc.ckpt --output_path /home/workspace/eric/safari-internal/outputs
# config is located here:
configs/evals/hg38_decoder.yaml
# download the checkpoints from google drive, and put it in the outputs/ dir
https://drive.google.com/drive/folders/11cDmLZgBHr3KkiCtS2V6sqI3Kf8lTW39?usp=share_link
# enhancer weights, from nucleotide transformer, binary classification
/home/workspace/eric/safari-internal/outputs/2023-04-12/23-40-51-542457/checkpoints/val/mcc.ckpt
https://drive.google.com/drive/folders/1wIijtwlqWwzNe_0d3meAXSk7oYJ2POMC?usp=share_link
# promoter tata weights
/home/workspace/eric/safari-internal/outputs/2023-05-01/04-13-05-495708/checkpoints/val/f1_macro.ckpt
note, this model is larger, 2 layers, d_model=256 (not 128!!), and d_inner=1024
https://drive.google.com/drive/folders/1tbIUYwScEox4SLFqeZIFp7Z4YvmIN0M3?usp=share_link
# In general, you need to make sure there config has the same model settings as it was trained on.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default=f"",
)
parser.add_argument(
"--ckpt_path",
default=f"",
help="Path to model state dict checkpoint"
)
parser.add_argument(
"--output_path",
default=f"",
help="Path to where to save npy file"
)
args = parser.parse_args()
task = HG38Inference(args.config, args.ckpt_path, max_seq_len=1024, use_dataloader=True)
# sample sequence, can pass a list of seqs (themselves a list of chars)
# seqs = ["ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT"]
# if you just have a list of sequences, as strings, you can use this function, returns list
# preds = task.predict_on_list(seqs) # return a list of predictions
# print(preds[0].shape) # shape is [batch, 2] for binary class prediction
# OR...
# or if you rather use the existing dataloader for the enhancer dataset, you can call this instead
# returns a np array
preds, labels = task.predict_from_loader()
# print(preds.shape) # shape is [batch, 2] for binary class prediction
# calculate accuracy of preds vs labels
acc = np.mean(preds.squeeze() == labels.squeeze())
print("Acc: ", acc)
breakpoint()
pred_path = os.path.join(args.output_path, "preds.npy")
label_path = os.path.join(args.output_path, "labels.npy")
# save as numpy arr
preds_np = np.array(preds)
labels_np = np.array(labels)
with open(pred_path, 'wb') as f:
np.save(f, preds_np)
with open(label_path, 'wb') as f:
np.save(f, labels_np)
| hyena-dna-main | evals/hg38_inference_decoder.py |
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
sys.path.append(os.environ.get("SAFARI_PATH", "."))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from transformers import AutoTokenizer, GPT2LMHeadModel
# from spacy.lang.en.stop_words import STOP_WORDS
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
try:
from tokenizers import Tokenizer
except:
pass
# https://github.com/openai/gpt-2/issues/131#issuecomment-492786058
# def preprocess(text):
# text = text.replace("“", '"')
# text = text.replace("”", '"')
# return '\n'+text.strip()
class HG38Encoder:
"Encoder inference for HG38 sequences"
def __init__(self, model_cfg, ckpt_path, max_seq_len):
self.max_seq_len = max_seq_len
self.model, self.tokenizer = self.load_model(model_cfg, ckpt_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
def encode(self, seqs):
results = []
# sample code to loop thru each sample and tokenize first (char level)
for seq in tqdm(seqs):
if isinstance(self.tokenizer, Tokenizer):
tokenized_seq = self.tokenizer.encode(seq).ids
else:
tokenized_seq = self.tokenizer.encode(seq)
# can accept a batch, shape [B, seq_len, hidden_dim]
logits, __ = self.model(torch.tensor([tokenized_seq]).to(device=self.device))
# Using head, so just have logits
results.append(logits)
return results
def load_model(self, model_cfg, ckpt_path):
config = yaml.load(open(model_cfg, 'r'), Loader=yaml.FullLoader)
model = ConvLMHeadModel(**config['model_config'])
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(state_dict["state_dict"])
# setup tokenizer
if config['tokenizer_name'] == 'char':
print("**Using Char-level tokenizer**")
# add to vocab
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_seq_len + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
print(tokenizer._vocab_str_to_int)
else:
raise NotImplementedError("You need to provide a custom tokenizer!")
return model, tokenizer
if __name__ == "__main__":
SAFARI_PATH = os.getenv('SAFARI_PATH', '.')
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_cfg",
default=f"{SAFARI_PATH}/configs/evals/hyena_small_150b.yaml",
)
parser.add_argument(
"--ckpt_path",
default=f"",
help="Path to model state dict checkpoint"
)
args = parser.parse_args()
task = HG38Encoder(args.model_cfg, args.ckpt_path, max_seq_len=1024)
# sample sequence, can pass a list of seqs (themselves a list of chars)
seqs = ["ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT"]
logits = task.encode(seqs)
print(logits)
print(logits[0].logits.shape)
breakpoint()
| hyena-dna-main | evals/hg38_inference.py |
import math
import torch
import torch.nn.functional as F
from sklearn.metrics import f1_score, roc_auc_score
from functools import partial
import torchmetrics.functional as tm_f
import torch.distributions as dist
from sklearn.metrics import f1_score, roc_auc_score, matthews_corrcoef
from torchmetrics import Metric
from torchmetrics.classification import MulticlassRecall, MulticlassPrecision
class CorrectAggregatedMetric(Metric):
"""This is needed to calculate some metrics b/c small batch sizes cause aggregation via a simple
average to be off, as some classes might not be present in batch but will get penalized with a 0."""
def __init__(self, class_idx: int, dist_sync_on_step=False):
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.class_idx = torch.tensor(class_idx)
self.add_state("numerator", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("denominator", default=torch.tensor(0.0), dist_reduce_fx="sum")
def _update(self, numerator, denominator, preds, y) -> tuple:
raise NotImplemented
def update(self, logits: torch.Tensor, y: torch.Tensor):
# update metric states
preds = torch.argmax(logits, dim=-1)
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
assert preds.shape == y.shape, f"preds shape {preds.shape} != y shape {y.shape}"
self.numerator, self.denominator = self._update(self.numerator, self.denominator, preds, y)
def compute(self):
# compute final result
value = self.numerator.float() / self.denominator if self.denominator > 0 else torch.tensor(0.0)
return value
def reset(self):
self.numerator = torch.tensor(0.0)
self.denominator = torch.tensor(0.0)
class AccuracyPerClass(CorrectAggregatedMetric):
"""Calculate per class accuracy, i.e. P(y_hat = class_idx AND y = class_idx OR y_hat != class_idx AND y != class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (y == class_idx)
numerator += (preds[relevant_idxs] == class_idx).sum()
denominator += relevant_idxs.sum()
relevant_idxs = (y != class_idx)
numerator += (preds[relevant_idxs] != class_idx).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
class PrecisionPerClass(CorrectAggregatedMetric):
"""Calculate per class precision, i.e. P(y_hat = y | y_hat = class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (preds == class_idx)
numerator += (preds[relevant_idxs] == y[relevant_idxs]).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
class RecallPerClass(CorrectAggregatedMetric):
"""Calculate per class recall, i.e. P(y_hat = y | y = class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (y == class_idx)
numerator += (preds[relevant_idxs] == y[relevant_idxs]).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
def mcc(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return matthews_corrcoef(y.cpu().numpy(), y_hat.cpu().numpy())
def last_k_ppl(logits, y, seq_len=1024, k=None):
'''
Calculate perplexity for last k tokens in a sequence.
logits: (batch_size * seq_len, vocab_size), note, already flattened
y: (batch_size * seq_len), note, already flattened
seq_len: int, length of each sequence in the batch
k: if None, use all tokens in sequence
returns: (batch_size,) ppl for each sequence in the batch
'''
if k is None:
k = 0 # use the entire sequence
# need to reshape logits and y to be (batch_size, seq_len, vocab_size) and (batch_size, seq_len)
# respectively
# breakpoint()
logits = logits.view(-1, seq_len, logits.shape[-1])
y = y.view(-1, seq_len)
# only use the last k values of seq dim in logits and y
logits = logits[:, -k:, :]
y = y[:, -k:]
# reshape to flatten the batch and seq_len dimensions
logits = logits.reshape(-1, logits.shape[-1])
y = y.reshape(-1)
# get avg and put on cpu
return F.cross_entropy(logits, y, reduction='none').view(y.shape[0], -1).mean().exp().cpu()
def _student_t_map(mu, sigma, nu):
sigma = F.softplus(sigma)
nu = 2.0 + F.softplus(nu)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1), nu.squeeze(axis=-1)
def student_t_loss(outs, y):
mu, sigma, nu = outs[..., 0], outs[..., 1], outs[..., 2]
mu, sigma, nu = _student_t_map(mu, sigma, nu)
y = y.squeeze(axis=-1)
nup1_half = (nu + 1.0) / 2.0
part1 = 1.0 / nu * torch.square((y - mu) / sigma)
Z = (
torch.lgamma(nup1_half)
- torch.lgamma(nu / 2.0)
- 0.5 * torch.log(math.pi * nu)
- torch.log(sigma)
)
ll = Z - nup1_half * torch.log1p(part1)
return -ll.mean()
def gaussian_ll_loss(outs, y):
mu, sigma = outs[..., 0], outs[..., 1]
y = y.squeeze(axis=-1)
sigma = F.softplus(sigma)
ll = -1.0 * (
torch.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * torch.square((y - mu) / sigma)
)
return -ll.mean()
def binary_cross_entropy(logits, y):
# BCE loss requires squeezing last dimension of logits so it has the same shape as y
# requires y to be float, since it's overloaded to represent a probability
return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float())
def binary_accuracy(logits, y):
return torch.eq(logits.squeeze(-1) >= 0, y).float().mean()
def padded_cross_entropy(logits, y, pad_mask, pad_value=-1):
"""Will ignore the pad value in label (eg, -1)
logits: (batch_size, seq_len, vocab_size)
y: (batch_size, seq_len)
pad_mask: (batch_size, seq_len)
"""
# need to apply pad mask to y
y_pad = y + pad_mask * pad_value
logits = logits.view(-1, logits.shape[-1])
y_pad = y_pad.view(-1)
return F.cross_entropy(logits, y_pad, ignore_index=pad_value)
def cross_entropy(logits, y, ignore_index=-100):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return F.cross_entropy(logits, y, ignore_index=ignore_index)
def soft_cross_entropy(logits, y, label_smoothing=0.0):
logits = logits.view(-1, logits.shape[-1])
# target is now 2d (no target flattening)
return F.cross_entropy(logits, y, label_smoothing=label_smoothing)
def accuracy(logits, y):
logits = logits.view(-1, logits.shape[-1])
preds = torch.argmax(logits, dim=-1)
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.eq(preds, y).float().mean()
def accuracy_ignore_index(logits, y, ignore_index=-100):
num_classes = logits.shape[-1]
preds = torch.argmax(logits, dim=-1)
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
accuracy = tm_f.classification.accuracy(preds, y, 'multiclass', num_classes=num_classes, ignore_index=ignore_index, average='micro')
return accuracy
def accuracy_at_k(logits, y, k=1):
logits = logits.view(-1, logits.shape[-1])
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.topk(logits, k, dim=-1)[1].eq(y.unsqueeze(-1)).any(dim=-1).float().mean()
def f1_binary(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="binary")
def f1_macro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="macro")
def f1_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="micro")
def roc_auc_macro(logits, y):
logits = logits.view(
-1, logits.shape[-1]
).detach() # KS: had to add detach to eval while training
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="macro"
)
def roc_auc_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="micro"
)
def mse(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.mse_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
# TODO document the use case of this
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.mse_loss(outs_masked, y_masked)
def forecast_rmse(outs, y, len_batch=None):
# TODO: generalize, currently for Monash dataset
return torch.sqrt(F.mse_loss(outs, y, reduction='none').mean(1)).mean()
def mae(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.l1_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.l1_loss(outs_masked, y_masked)
# Metrics that can depend on the loss
def loss(x, y, loss_fn):
""" This metric may be useful because the training loss may add extra regularization (e.g. weight decay implemented as L2 penalty), while adding this as a metric skips the additional losses """
return loss_fn(x, y)
def bpb(x, y, loss_fn):
""" bits per byte (image density estimation, speech generation, char LM) """
return loss_fn(x, y) / math.log(2)
def ppl(x, y, loss_fn):
return torch.exp(loss_fn(x, y))
# should have a better way to do this
output_metric_fns = {
"binary_cross_entropy": binary_cross_entropy,
"cross_entropy": cross_entropy,
"padded_cross_entropy": padded_cross_entropy,
"binary_accuracy": binary_accuracy,
"precision": MulticlassPrecision,
"precision_per_class": PrecisionPerClass,
"recall": MulticlassRecall,
"recall_per_class": RecallPerClass,
"accuracy": accuracy,
"accuracy_per_class": AccuracyPerClass,
"accuracy_ignore_index": accuracy_ignore_index,
'accuracy@3': partial(accuracy_at_k, k=3),
'accuracy@5': partial(accuracy_at_k, k=5),
'accuracy@10': partial(accuracy_at_k, k=10),
"eval_loss": loss,
"mcc": mcc,
"mse": mse,
"mae": mae,
"forecast_rmse": forecast_rmse,
"f1_binary": f1_binary,
"f1_macro": f1_macro,
"f1_micro": f1_micro,
"roc_auc_macro": roc_auc_macro,
"roc_auc_micro": roc_auc_micro,
"soft_cross_entropy": soft_cross_entropy, # only for pytorch 1.10+
"student_t": student_t_loss,
"gaussian_ll": gaussian_ll_loss,
}
loss_metric_fns = {
"loss": loss,
"bpb": bpb,
"ppl": ppl,
}
metric_fns = {**output_metric_fns, **loss_metric_fns} # TODO py3.9
| hyena-dna-main | src/tasks/metrics.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/metrics/perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
# Also adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/text/perplexity.py
# But we pass in the loss to avoid recomputation
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torchmetrics import Metric
try:
from flash_attn.losses.cross_entropy import CrossEntropyLoss
except ImportError:
CrossEntropyLoss = torch.nn.CrossEntropyLoss
try:
from apex.transformer import parallel_state
except ImportError:
parallel_state = None
class Perplexity(Metric):
r"""
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits
per word a model needs to represent the sample.
Args:
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> import torch
>>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))
>>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))
>>> target[0, 6:] = -100
>>> metric = Perplexity(ignore_index=-100)
>>> metric(preds, target)
tensor(5.2545)
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
total_log_probs: Tensor
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("total_log_probs", default=torch.tensor(0.0, dtype=torch.float64),
dist_reduce_fx="sum")
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum")
self.loss_fn = CrossEntropyLoss()
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
"""Compute and store intermediate statistics for Perplexity.
Args:
preds:
Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
target:
Ground truth values with a shape [batch_size, seq_len].
"""
count = target.numel()
if loss is None:
loss = self.loss_fn(preds, target)
self.total_log_probs += loss.double() * count
self.count += count
def compute(self) -> Tensor:
"""Compute the Perplexity.
Returns:
Perplexity
"""
return torch.exp(self.total_log_probs / self.count)
class NumTokens(Metric):
"""Keep track of how many tokens we've seen.
"""
# TODO: how do we prevent the reset between the epochs? The reset happens on the 1st batch
# of the next epoch.
# Right now the hack is that we override reset(), which would mess up the forward method.
# We then override forward to do the right thing.
is_differentiable = False
higher_is_better = False
full_state_update = False
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum",
persistent=True) # We want the count to be saved to state-dict
if parallel_state is not None and not parallel_state.is_unitialized():
self.tensor_parallel_world_size = parallel_state.get_tensor_model_parallel_world_size()
else:
self.tensor_parallel_world_size = 1
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
self.count += target.numel() // self.tensor_parallel_world_size
def compute(self) -> Tensor:
return self.count
def reset(self):
count = self.count
super().reset()
self.count = count
# Adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/metric.py
def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:
"""forward computation using single call to `update` to calculate the metric value on the current batch and
accumulate global state.
This can be done when the global metric state is a sinple reduction of batch states.
"""
self.update(*args, **kwargs)
return self.compute()
torchmetric_fns = {
"perplexity": Perplexity,
"num_tokens": NumTokens,
} | hyena-dna-main | src/tasks/torchmetrics.py |
from typing import Optional, List, Tuple
import math
import functools
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from omegaconf import ListConfig
from src.models.nn.components import ReversibleInstanceNorm1dInput, ReversibleInstanceNorm1dOutput, \
TSNormalization, TSInverseNormalization
from src.models.nn.adaptive_softmax import AdaptiveEmbedding, ProjectedAdaptiveLogSoftmax
import src.tasks.metrics as M
from src.tasks.torchmetrics import torchmetric_fns as tm_mine
import src.models.nn.utils as U
import torchmetrics as tm
from src.utils.config import to_list, instantiate
from torchmetrics import MetricCollection
class BaseTask:
""" Abstract class that takes care of:
- loss function
- arbitrary metrics
- forward pass
- (optional) encoder module that interfaces with dataset (inputs) and model
- (optional) decoder module that interfaces with dataset (targets) and model
"""
encoder = None
decoder = None
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None):
""" This class is allowed to grab attributes directly off a constructed dataset and model object """
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
torchmetrics = MetricCollection(self._init_torchmetrics())
self.train_torchmetrics = torchmetrics.clone(prefix='train/')
self.val_torchmetrics = torchmetrics.clone(prefix='val/')
self.test_torchmetrics = torchmetrics.clone(prefix='test/')
def _init_torchmetrics(self):
"""
Instantiate torchmetrics.
"""
tracked_torchmetrics = {}
for name in self.torchmetric_names:
if name in tm_mine:
tracked_torchmetrics[name] = tm_mine[name]().to('cuda')
elif name in ['AUROC', 'StatScores', 'Precision', 'Recall', 'F1', 'F1Score']:
tracked_torchmetrics[name] = getattr(tm, name)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False).to('cuda')
elif '@' in name:
k = int(name.split('@')[1])
mname = name.split('@')[0]
tracked_torchmetrics[name] = getattr(tm, mname)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False, top_k=k).to('cuda')
else:
tracked_torchmetrics[name] = getattr(tm, name)(compute_on_step=False).to('cuda')
return tracked_torchmetrics
def _reset_torchmetrics(self, prefix=None):
"""
Reset torchmetrics for a prefix
associated with a particular dataloader (e.g. train, val, test).
Generally do this at the start of an epoch.
"""
all_prefixes = [prefix] if prefix is not None else self._tracked_torchmetrics
for prefix in all_prefixes:
if prefix in self._tracked_torchmetrics:
self._tracked_torchmetrics[prefix].reset()
def get_torchmetrics(self, prefix):
"""
Compute torchmetrics for a prefix associated with
a particular dataloader (e.g. train, val, test).
Generally do this at the end of an epoch.
"""
return {name: self._tracked_torchmetrics[prefix][name].compute() for name in self.torchmetric_names}
def torchmetrics(self, x, y, prefix, loss=None):
"""
Update torchmetrics with new x, y .
Prefix corresponds to a particular dataloader (e.g. train, val, test).
Generally call this every batch.
"""
if prefix not in self._tracked_torchmetrics:
self._init_torchmetrics(prefix)
self._tracked_torchmetrics[prefix](x, y, loss=loss)
# for name in self.torchmetric_names:
# if name.startswith('Accuracy'):
# if len(x.shape) > 2:
# # Multi-dimensional, multi-class
# self._tracked_torchmetrics[prefix][name].update(x.transpose(1, 2), y.squeeze())
# continue
# self._tracked_torchmetrics[prefix][name].update(x, y)
def get_torchmetrics(self, prefix):
return self._tracked_torchmetrics[prefix]
def metrics(self, x, y, **kwargs):
"""
Metrics are just functions
output metrics are a function of output and target
loss metrics are a function of loss (e.g. perplexity)
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = model(x, **w, state=_state)
self._state = state
x, w = decoder(x, state=state, **z)
return x, y, w
class Scalar(nn.Module):
def __init__(self, c=1):
super().__init__()
self.c = c
def forward(self, x):
return x * self.c
class LMTask(BaseTask):
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = model(x, **w, state=_state)
self._state = state
x, w = decoder(x, state=state, **z)
x = x.logits
x = rearrange(x, '... C -> (...) C')
y = rearrange(y, '... -> (...)')
return x, y, w
class MultiClass(BaseTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.continual_metrics = {}
for name in self.metric_names:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec] = M.output_metric_fns[name](spec_idx)
def metrics(self, x, y, **kwargs):
output_metrics = {}
for name in self.metric_names:
if name in M.output_metric_fns:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec] = self.continual_metrics[name + '_' + spec].to(x.device)
self.continual_metrics[name + '_' + spec].update(x, y)
output_metrics[name + '_' + spec] = self.continual_metrics[name + '_' + spec].compute()
elif name in ['precision', 'recall']:
self.continual_metrics[name] = self.continual_metrics[name].to(x.device)
output_metrics[name] = self.continual_metrics[name](x, y)
else:
output_metrics[name] = U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
def _reset_torchmetrics(self, prefix=None):
super()._reset_torchmetrics(prefix)
for name in self.metric_names:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec].reset()
class HG38Task(LMTask):
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None, last_k_ppl=None, per_token_ppl=None):
""" Extending LMTask to add custom metrics for HG38 task
last_k_ppl: config for custom ppl, with hparams to pass with it
per_token_ppl: config for per token ppl calc, with list of k (ppls) to track
"""
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
self.last_k_ppl = last_k_ppl
self.per_token_ppl = per_token_ppl
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
torchmetrics = MetricCollection(self._init_torchmetrics())
self.train_torchmetrics = torchmetrics.clone(prefix='train/')
self.val_torchmetrics = torchmetrics.clone(prefix='val/')
self.test_torchmetrics = torchmetrics.clone(prefix='test/')
# Create custom metrics for last k ppl
# last_k_ppl is a list of dicts (configs), so loop thru them
if self.last_k_ppl is not None:
self.custom_ppl_dict = {}
for k in self.last_k_ppl:
key_name = "last_" + str(k) + "_ppl"
# create config
custom_ppl_config = {"_name_": "last_k_ppl", "k": k, "seq_len": self.dataset.max_length}
k_ppl_fn = instantiate(M.output_metric_fns, custom_ppl_config, partial=True)
k_ppl_fn = U.discard_kwargs(k_ppl_fn)
self.custom_ppl_dict[key_name] = k_ppl_fn
# Create custom metric for per token ppl
if self.per_token_ppl is not None:
per_token_ppl_config = {"_name_": "per_token_ppl", "ks": self.per_token_ppl["ks"], "seq_len": self.dataset.max_length}
per_token_fn = instantiate(M.output_metric_fns, per_token_ppl_config, partial=True)
per_token_fn = U.discard_kwargs(per_token_fn)
self.per_token_fn = per_token_fn
def metrics(self, x, y, **kwargs):
"""
Need to modify metrics to include custom metrics
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
# loop thru all custom ppls and add them to output_metrics
if self.last_k_ppl is not None:
for key_name, k_ppl_fn in self.custom_ppl_dict.items():
output_metrics[key_name] = k_ppl_fn(x, y, **kwargs)
# loop thru all custom ppls and add them to output_metrics
if self.per_token_ppl is not None:
# returns k ppl values, (averaged over batch)
per_k_ppl = self.per_token_fn(x, y, **kwargs)
# loop over ks to log metric
for ind, k in enumerate(self.per_token_ppl["ks"]):
key_name = "ppl_at_{}".format(k)
k = k-1 # 0 index in the background
output_metrics[key_name] = per_k_ppl[ind] # should be in order
return {**output_metrics, **loss_metrics}
class AdaptiveLMTask(BaseTask):
def __init__(
self,
div_val,
cutoffs : List[int],
tie_weights : bool,
tie_projs : List[bool],
init_scale=1.0,
bias_scale=0.0,
dropemb=0.0,
dropsoft=0.0,
**kwargs,
):
super().__init__(**kwargs)
n_tokens = self.dataset.n_tokens
d_model = self.model.d_model
d_output = self.model.d_output
encoder = AdaptiveEmbedding(
n_tokens,
d_model,
d_model,
cutoffs=cutoffs,
div_val=div_val,
init_scale=init_scale,
dropout=dropemb,
)
if tie_weights:
assert d_model == d_output
emb_layers = [i.weight for i in encoder.emb_layers]
else:
emb_layers = None
# Construct decoder/loss
emb_projs = encoder.emb_projs
loss = ProjectedAdaptiveLogSoftmax(
n_tokens, d_output, d_output,
cutoffs, div_val=div_val,
tie_projs=tie_projs,
out_projs=emb_projs,
out_layers_weights=emb_layers,
bias_scale=bias_scale,
dropout=dropsoft,
)
self.encoder = encoder
self.loss = loss
registry = {
'base': BaseTask,
'multiclass': MultiClass,
'lm': LMTask,
'hg38': HG38Task,
}
| hyena-dna-main | src/tasks/tasks.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
import src.utils.train
log = src.utils.train.get_logger(__name__)
class Decoder(nn.Module):
"""This class doesn't do much but just signals the interface that Decoders are expected to adhere to
TODO: is there a way to enforce the signature of the forward method?
"""
def forward(self, x, **kwargs):
"""
x: (batch, length, dim) input tensor
state: additional state from the model backbone
*args, **kwargs: additional info from the dataset
Returns:
y: output tensor
*args: other arguments to pass into the loss function
"""
return x
def step(self, x):
"""
x: (batch, dim)
"""
return self.forward(x.unsqueeze(1)).squeeze(1)
class SequenceDecoder(Decoder):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
class TokenDecoder(Decoder):
"""Decoder for token level classification"""
def __init__(
self, d_model, d_output=3
):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
x = self.output_transform(x)
return x
class NDDecoder(Decoder):
"""Decoder for single target (e.g. classification or regression)"""
def __init__(
self, d_model, d_output=None, mode="pool"
):
super().__init__()
assert mode in ["pool", "full"]
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
self.mode = mode
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.mode == 'pool':
x = reduce(x, 'b ... h -> b h', 'mean')
x = self.output_transform(x)
return x
class StateDecoder(Decoder):
"""Use the output state to decode (useful for stateful models such as RNNs or perhaps Transformer-XL if it gets implemented"""
def __init__(self, d_model, state_to_tensor, d_output):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
self.state_transform = state_to_tensor
def forward(self, x, state=None):
return self.output_transform(self.state_transform(state))
class RetrievalHead(nn.Module):
def __init__(self, d_input, d_model, n_classes, nli=True, activation="relu"):
super().__init__()
self.nli = nli
if activation == "relu":
activation_fn = nn.ReLU()
elif activation == "gelu":
activation_fn = nn.GELU()
else:
raise NotImplementedError
if (
self.nli
): # Architecture from https://github.com/mlpen/Nystromformer/blob/6539b895fa5f798ea0509d19f336d4be787b5708/reorganized_code/LRA/model_wrapper.py#L74
self.classifier = nn.Sequential(
nn.Linear(4 * d_input, d_model),
activation_fn,
nn.Linear(d_model, n_classes),
)
else: # Head from https://github.com/google-research/long-range-arena/blob/ad0ff01a5b3492ade621553a1caae383b347e0c1/lra_benchmarks/models/layers/common_layers.py#L232
self.classifier = nn.Sequential(
nn.Linear(2 * d_input, d_model),
activation_fn,
nn.Linear(d_model, d_model // 2),
activation_fn,
nn.Linear(d_model // 2, n_classes),
)
def forward(self, x):
"""
x: (2*batch, dim)
"""
outs = rearrange(x, "(z b) d -> z b d", z=2)
outs0, outs1 = outs[0], outs[1] # (n_batch, d_input)
if self.nli:
features = torch.cat(
[outs0, outs1, outs0 - outs1, outs0 * outs1], dim=-1
) # (batch, dim)
else:
features = torch.cat([outs0, outs1], dim=-1) # (batch, dim)
logits = self.classifier(features)
return logits
class RetrievalDecoder(Decoder):
"""Combines the standard FeatureDecoder to extract a feature before passing through the RetrievalHead"""
def __init__(
self,
d_input,
n_classes,
d_model=None,
nli=True,
activation="relu",
*args,
**kwargs
):
super().__init__()
if d_model is None:
d_model = d_input
self.feature = SequenceDecoder(
d_input, d_output=None, l_output=0, *args, **kwargs
)
self.retrieval = RetrievalHead(
d_input, d_model, n_classes, nli=nli, activation=activation
)
def forward(self, x, state=None, **kwargs):
x = self.feature(x, state=state, **kwargs)
x = self.retrieval(x)
return x
class PackedDecoder(Decoder):
def forward(self, x, state=None):
x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Decoder,
"id": nn.Identity,
"linear": nn.Linear,
"sequence": SequenceDecoder,
"nd": NDDecoder,
"retrieval": RetrievalDecoder,
"state": StateDecoder,
"pack": PackedDecoder,
"token": TokenDecoder,
}
model_attrs = {
"linear": ["d_output"],
"sequence": ["d_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_state", "state_to_tensor"],
"forecast": ["d_output"],
"token": ["d_output"],
}
dataset_attrs = {
"linear": ["d_output"],
"sequence": ["d_output", "l_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_output"],
"forecast": ["d_output", "l_output"],
"token": ["d_output"],
}
def _instantiate(decoder, model=None, dataset=None):
"""Instantiate a single decoder"""
if decoder is None:
return None
if isinstance(decoder, str):
name = decoder
else:
name = decoder["_name_"]
# Extract arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate decoder
obj = utils.instantiate(registry, decoder, *model_args, *dataset_args)
return obj
def instantiate(decoder, model=None, dataset=None):
"""Instantiate a full decoder config, e.g. handle list of configs
Note that arguments are added in reverse order compared to encoder (model first, then dataset)
"""
decoder = utils.to_list(decoder)
return U.PassthroughSequential(
*[_instantiate(d, model=model, dataset=dataset) for d in decoder]
)
| hyena-dna-main | src/tasks/decoders.py |
import datetime
import math
from typing import ForwardRef
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
from src.models.sequence.block import SequenceResidualBlock
from src.models.nn.components import Normalization
class Encoder(nn.Module):
"""Encoder abstraction
Accepts a tensor and optional kwargs. Outside of the main tensor, all other arguments should be kwargs.
Returns a tensor and optional kwargs.
Encoders are combined via U.PassthroughSequential which passes these kwargs through in a pipeline. The resulting kwargs are accumulated and passed into the model backbone.
"""
def forward(self, x, **kwargs):
"""
x: input tensor
*args: additional info from the dataset (e.g. sequence lengths)
Returns:
y: output tensor
*args: other arguments to pass into the model backbone
"""
return x, {}
class PositionalIDEncoder(Encoder):
def forward(self, x):
position_ids = torch.arange(x.shape[-1], dtype=torch.long, device=x.device)
position_ids = repeat(position_ids, 'l -> b l', b=x.shape[0])
return x, { 'position_ids': position_ids }
# Adapted from https://github.com/pytorch/examples/blob/master/word_language_model/model.py
class PositionalEncoder(Encoder):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoder(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=16384, pe_init=None):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
if pe_init is not None:
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.normal_(self.pe, 0, pe_init)
# self.pe = pe.unsqueeze(1)
else:
pe = torch.zeros(max_len, d_model)
position = torch.arange(0.0, max_len).unsqueeze(1)
div_term = torch.exp(
-math.log(10000.0) * torch.arange(0.0, d_model, 2.0) / d_model
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
self.attn_mask = None
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
lens: actual lengths of sequences
Shape:
x: [l_sequence, n_batch, d_model]
Returns: [l_sequence, n_batch, d_model]
attn_mask: [l_sequence, l_sequence]
padding_mask:
"""
x = x + self.pe[: x.size(-2)]
return self.dropout(x)
class ClassEmbedding(Encoder):
# Should also be able to define this by subclassing Embedding
def __init__(self, n_classes, d_model):
super().__init__()
self.embedding = nn.Embedding(n_classes, d_model)
def forward(self, x, y):
x = x + self.embedding(y).unsqueeze(-2) # (B, L, D)
return x
class Conv1DEncoder(Encoder):
def __init__(self, d_input, d_model, kernel_size=25, stride=1, padding='same'):
super().__init__()
self.conv = nn.Conv1d(
in_channels=d_input,
out_channels=d_model,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
def forward(self, x):
# BLD -> BLD
x = self.conv(x.transpose(1, 2)).transpose(1, 2)
return x
class LayerEncoder(Encoder):
"""Use an arbitrary SequenceModule layer"""
def __init__(self, d_model, prenorm=False, norm='layer', layer=None):
super().__init__()
# Simple stack of blocks
layer["transposed"] = False
self.layer = SequenceResidualBlock(
d_input=d_model,
prenorm=prenorm,
layer=layer,
residual='R',
norm=norm,
pool=None,
)
def forward(self, x):
x, _ = self.layer(x) # Discard state
return x
class TimestampEmbeddingEncoder(Encoder):
"""
General time encoder for Pandas Timestamp objects (encoded as torch tensors).
See MonashDataset for an example of how to return time features as 'z's.
"""
cardinalities = {
'day': (1, 31),
'hour': (0, 23),
'minute': (0, 59),
'second': (0, 59),
'month': (1, 12),
'year': (1950, 2010), # (1800, 3000) used to be (1970, datetime.datetime.now().year + 1) but was not enough for all datasets in monash
'dayofweek': (0, 6),
'dayofyear': (1, 366),
'quarter': (1, 4),
'week': (1, 53),
'is_month_start': (0, 1),
'is_month_end': (0, 1),
'is_quarter_start': (0, 1),
'is_quarter_end': (0, 1),
'is_year_start': (0, 1),
'is_year_end': (0, 1),
'is_leap_year': (0, 1),
}
def __init__(self, d_model, table=False, features=None):
super().__init__()
self.table = table
self.ranges = {k: max_val - min_val + 2 for k, (min_val, max_val) in self.cardinalities.items()} # padding for null included
if features is None:
pass
else:
self.cardinalities = {k: v for k, v in self.cardinalities.items() if k in features}
if table:
self.embedding = nn.ModuleDict({
attr: nn.Embedding(maxval - minval + 2, d_model, padding_idx=0)
for attr, (minval, maxval) in self.cardinalities.items()
})
else:
self.embedding = nn.ModuleDict({
attr: nn.Linear(1, d_model)
for attr in self.cardinalities
})
def forward(self, x, timestamps=None):
for attr in timestamps:
mask = timestamps[attr] == -1
timestamps[attr] = timestamps[attr] - self.cardinalities[attr][0]
timestamps[attr][mask] = 0
if self.table:
x = x + self.embedding[attr](timestamps[attr].to(torch.long))
else:
x = x + self.embedding[attr]((2 * timestamps[attr] / self.ranges[attr] - 1).unsqueeze(-1))
#x = x + self.embedding(timestamps[attr].to(torch.float)).unsqueeze(1)
return x
class TimeEncoder(Encoder):
def __init__(self, n_tokens_time, d_model, timeenc=0):
super().__init__()
self.timeenc = timeenc
if self.timeenc == 0:
self.encoders = nn.ModuleList(
[nn.Embedding(v, d_model) for v in n_tokens_time]
)
else:
self.encoders = nn.Linear(len(n_tokens_time), d_model)
self.mask_embed = nn.Embedding(2, d_model)
def forward(self, x, mark=None, mask=None):
assert mark is not None and mask is not None, "Extra arguments should be returned by collate function"
if self.timeenc == 0:
assert mark.size(-1) == len(self.encoders)
embeddings = [
embed(z) for embed, z in zip(self.encoders, torch.unbind(mark, dim=-1))
]
time_encode = torch.sum(torch.stack(embeddings), dim=0)
else:
time_encode = self.encoders(mark)
mask_encode = self.mask_embed(mask.squeeze(-1))
return x + time_encode + mask_encode # (B, L, d_model)
class PackedEncoder(Encoder):
def forward(self, x, len_batch=None):
assert len_batch is not None
x = nn.utils.rnn.pack_padded_sequence(
x, len_batch.cpu(), enforce_sorted=False, batch_first=True,
)
return x
class OneHotEncoder(Encoder):
def __init__(self, n_tokens, d_model):
super().__init__()
assert n_tokens <= d_model
self.d_model = d_model
def forward(self, x):
return F.one_hot(x.squeeze(-1), self.d_model).float()
class Conv2DPatchEncoder(Encoder):
"""
For encoding images into a sequence of patches.
"""
def __init__(self, d_input, d_model, filter_sizes, flat=False):
"""
d_input: dim of encoder input (data dimension)
d_model: dim of encoder output (model dimension)
filter_sizes: tuple with fh, fw
flat: if image is flattened from dataloader (like in cifar),
then we need to reshape back to 2D before conv
"""
fh, fw = filter_sizes
self.flat = flat
super().__init__()
assert len(filter_sizes) == 2
self.encoder = nn.Conv2d(d_input, d_model, kernel_size=(fh, fw), stride=(fh, fw))
def forward(self, x):
"""
x shape expected = [b, h, w, c]
returns tuple with x, with new shape = [b, seq_len, c_out]
"""
x = rearrange(x, 'b h w c -> b c h w')
x = self.encoder(x)
x = rearrange(x, 'b c h w -> b (h w) c')
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Encoder,
"id": nn.Identity,
"embedding": nn.Embedding,
"linear": nn.Linear,
"position": PositionalEncoder,
"position_id": PositionalIDEncoder,
"class": ClassEmbedding,
"pack": PackedEncoder,
"time": TimeEncoder,
"onehot": OneHotEncoder,
"conv1d": Conv1DEncoder,
"patch2d": Conv2DPatchEncoder,
"timestamp_embedding": TimestampEmbeddingEncoder,
"layer": LayerEncoder,
}
dataset_attrs = {
"embedding": ["n_tokens"],
"linear": ["d_input"], # TODO make this d_data?
"class": ["n_classes"],
"time": ["n_tokens_time"],
"onehot": ["n_tokens"],
"conv1d": ["d_input"],
"patch2d": ["d_input"],
}
model_attrs = {
"embedding": ["d_model"],
"linear": ["d_model"],
"position": ["d_model"],
"class": ["d_model"],
"time": ["d_model"],
"onehot": ["d_model"],
"conv1d": ["d_model"],
"patch2d": ["d_model"],
"timestamp_embedding": ["d_model"],
"layer": ["d_model"],
}
def _instantiate(encoder, dataset=None, model=None):
"""Instantiate a single encoder"""
if encoder is None:
return None
if isinstance(encoder, str):
name = encoder
else:
name = encoder["_name_"]
# Extract dataset/model arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate encoder
obj = utils.instantiate(registry, encoder, *dataset_args, *model_args)
return obj
def instantiate(encoder, dataset=None, model=None):
encoder = utils.to_list(encoder)
return U.PassthroughSequential(
*[_instantiate(e, dataset=dataset, model=model) for e in encoder]
)
| hyena-dna-main | src/tasks/encoders.py |
from typing import Any
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(pl.Callback):
""" Log the number of parameters of the model """
def __init__(
self,
total: bool = True,
trainable: bool = True,
fixed: bool = True,
):
super().__init__()
self._log_stats = AttributeDict(
{
'total_params_log': total,
'trainable_params_log': trainable,
'non_trainable_params_log': fixed,
}
)
@rank_zero_only
def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
logs = {}
if self._log_stats.total_params_log:
logs["params/total"] = sum(p.numel() for p in pl_module.parameters())
if self._log_stats.trainable_params_log:
logs["params/trainable"] = sum(p.numel() for p in pl_module.parameters()
if p.requires_grad)
if self._log_stats.non_trainable_params_log:
logs["params/fixed"] = sum(p.numel() for p in pl_module.parameters()
if not p.requires_grad)
if trainer.logger:
trainer.logger.log_hyperparams(logs)
| hyena-dna-main | src/callbacks/params.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def set_affinity(trainer):
try:
from src.utils.gpu_affinity import set_affinity
nproc_per_node = torch.cuda.device_count()
affinity = set_affinity(trainer.local_rank, nproc_per_node, 'socket_unique_continuous')
log.info(f'{trainer.local_rank}: thread affinity: {affinity}')
# TD [2022-05-07] Somehow calling this causes GPU 0 to allocate extra ~800MB of memory per
# number of GPUs (e.g., 6.4GB of extra memory in a 8-GPU setup). H/t Dan.
# l2_promote()
except:
pass
class GpuAffinity(Callback):
"""Set GPU affinity and increase the L2 fetch granularity.
Adapted from https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL
"""
def setup(self, trainer: Trainer, pl_module: LightningModule, stage=None) -> None:
set_affinity(trainer)
| hyena-dna-main | src/callbacks/gpu_affinity.py |
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/wandb_callbacks.py
import glob
import os
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.log_artifact(ckpts)
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, axis=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
class LogDT(Callback):
""" Log the dt values (from NeurIPS 2021 LSSL submission) """
def on_train_epoch_end(self, trainer, pl_module):
log_dict = {}
for name, m in pl_module.model.named_modules():
if pl_module.hparams.train.get('log_dt', False) \
and hasattr(m, "log_dt"):
log_dict[f"{name}.log_dt"] = (
m.log_dt.detach().cpu().numpy().flatten()
)
log_dict[f"{name}.log_dt.image"] = wandb.Image(
m.log_dt.detach().cpu().numpy().flatten().reshape(1, -1)
)
log_dict[f"{name}.log_dt"] = wandb.Table(
dataframe=pd.DataFrame(
{"log_dt": m.log_dt.detach().cpu().numpy().flatten()}
)
)
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
if trainer.logger is not None:
trainer.logger.experiment.log(log_dict)
| hyena-dna-main | src/callbacks/wandb.py |
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/speed_monitor.py
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
class Timer(Callback):
"""Monitor the speed of each step and each epoch.
"""
def __init__(
self,
step: bool = True,
inter_step: bool = True,
epoch: bool = True,
val: bool = True,
):
super().__init__()
self._log_stats = AttributeDict( {
'step_time': step,
'inter_step_time': inter_step,
'epoch_time': epoch,
'val_time': val,
})
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.step_time:
self._snap_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs["timer/inter_step"] = (time.time() - self._snap_inter_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.step_time and self._snap_step_time:
logs["timer/step"] = (time.time() - self._snap_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.epoch_time and self._snap_epoch_time:
logs["timer/epoch"] = time.time() - self._snap_epoch_time
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
def on_validation_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_val_time = time.time()
@rank_zero_only
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.val_time and self._snap_val_time:
logs["timer/validation"] = time.time() - self._snap_val_time
if trainer.logger: trainer.logger.log_metrics(logs) # , step=trainer.global_step)
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| hyena-dna-main | src/callbacks/timer.py |
r"""
Sequence Length Warmup by Reloading
====================
Change sequence lengths according to a stage schedule. The stage parameters sets the sequence length
and batch size.
TODO (not yet supported):
If batch size is not provided for that stage, calculate the batch size based on the
sequence length reshaping into the batch size.
"""
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class SeqlenWarmupReload(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'seq_len': 512, 'epochs': 50},
{'seq_len': 256, 'epochs': 30},
{'seq_len': 128, 'epochs': 20},
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'seq_len', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: seq_len and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Seq Len Warmup: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
# self._update_model(trainer, model) # we don't need to update the model, yet
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
# set new seq len and reset the dataloader
# max_length should be set in the config of the dataloader
seq_len = self.stage_params[self._current_stage]['seq_len']
model.hparams.loader.max_length = seq_len
# we need to resize the batch size too
batch_size = self.stage_params[self._current_stage].get('batch_size', None)
# need to change the dataset params, and the set the phase, which reinits the dataset
model.dataset.max_length = seq_len # progressively update the seq len
# model.dataset.max_length_val = seq_len # we update the val len to be same as train
# model.dataset.max_length_test = seq_len # we don't change the test set, always the longest
model.dataset.batch_size = batch_size # need to adjust the batch size
# model.dataset.batch_size_eval = batch_size * 2 #
# model.dataset.dataset_train.max_length = seq_len
model.dataset.init_datasets() # reinit the datasets with new batch size and seq len
trainer.reset_train_dataloader(model) # tells PTL to use the new dataloaders/datasets
trainer.reset_val_dataloader(model)
print('\tAt epoch {}, changed Seq Len to {}, and batch size to {}'.format(trainer.current_epoch, seq_len, batch_size))
# def _update_model(self, trainer, model):
# if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
# return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
# for module in model.modules():
# if hasattr(module, 'bandlimit'):
# module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
# print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Seq Len Warmup: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
# self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
| hyena-dna-main | src/callbacks/seqlen_warmup_reload.py |
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from omegaconf import OmegaConf
class TrackNorms(pl.Callback):
# TODO do callbacks happen before or after the method in the main LightningModule?
# @rank_zero_only # needed?
def on_after_training_step(self, batch, batch_idx, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Log extra metrics
metrics = {}
if hasattr(pl_module, "_grad_norms"):
metrics.update(pl_module._grad_norms)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
def on_after_backward(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# example to inspect gradient information in tensorboard
if OmegaConf.select(trainer.hparams, 'trainer.track_grad_norms'): # TODO dot notation should work with omegaconf?
norms = {}
for name, p in pl_module.named_parameters():
if p.grad is None:
continue
# param_norm = float(p.grad.data.norm(norm_type))
param_norm = torch.mean(p.grad.data ** 2)
norms[f"grad_norm.{name}"] = param_norm
pl_module._grad_norms = norms
| hyena-dna-main | src/callbacks/norms.py |
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class ProgressiveResizing(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'resolution': 4, 'epochs': 50}, # 32 x 32
{'resolution': 2, 'epochs': 30}, # 64 x 64
{'resolution': 1, 'epochs': 20}, # 128 x 128
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'resolution', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: resolution and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Progressive Resizing: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
model.hparams.loader.train_resolution = self.stage_params[self._current_stage]['resolution']
trainer.reset_train_dataloader(model)
print('\tChanged resolution to {}'.format(self.stage_params[self._current_stage]['resolution']))
def _update_model(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
for module in model.modules():
if hasattr(module, 'bandlimit'):
module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Progressive Resizing: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
| hyena-dna-main | src/callbacks/progressive_resizing.py |
"""
ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from src.dataloaders.base import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc=1, freq="h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates["month"] = dates.date.apply(lambda row: row.month, 1)
dates["day"] = dates.date.apply(lambda row: row.day, 1)
dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1)
dates["hour"] = dates.date.apply(lambda row: row.hour, 1)
dates["minute"] = dates.date.apply(lambda row: row.minute, 1)
dates["minute"] = dates.minute.map(lambda x: x // 15)
freq_map = {
"y": [],
"m": ["month"],
"w": ["month"],
"d": ["month", "day", "weekday"],
"b": ["month", "day", "weekday"],
"h": ["month", "day", "weekday", "hour"],
"t": ["month", "day", "weekday", "hour", "minute"],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack(
[feat(dates) for feat in time_features_from_frequency_str(freq)]
).transpose(1, 0)
class StandardScaler:
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data - mean) / std
def inverse_transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data * std) + mean
class InformerDataset(Dataset):
def __init__(
self,
root_path,
flag="train",
size=None,
features="S",
data_path="ETTh1.csv",
target="OT",
scale=True,
inverse=False,
timeenc=0,
freq="h",
cols=None,
eval_stamp=False,
eval_mask=False,
):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ["train", "test", "val"]
type_map = {"train": 0, "val": 1, "test": 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.eval_stamp = eval_stamp
self.eval_mask = eval_mask
self.forecast_horizon = self.pred_len
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def _borders(self, df_raw):
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
return border1s, border2s
def _process_columns(self, df_raw):
if self.cols:
cols = self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove("date")
return df_raw[["date"] + cols + [self.target]]
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
df_raw = self._process_columns(df_raw)
border1s, border2s = self._borders(df_raw)
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features == "M" or self.features == "MS":
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == "S":
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0] : border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[["date"]][border1:border2]
df_stamp["date"] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_x = np.concatenate(
[seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0
)
if self.inverse:
seq_y = np.concatenate(
[
self.data_x[r_begin : r_begin + self.label_len],
self.data_y[r_begin + self.label_len : r_end],
],
0,
)
raise NotImplementedError
else:
# seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase
seq_y = self.data_y[s_end:r_end]
# OLD in Informer codebase
# seq_x_mark = self.data_stamp[s_begin:s_end]
# seq_y_mark = self.data_stamp[r_begin:r_end]
if self.eval_stamp:
mark = self.data_stamp[s_begin:r_end]
else:
mark = self.data_stamp[s_begin:s_end]
mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0)
if self.eval_mask:
mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0)
else:
mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0)
mask = mask[:, None]
# Add the mask to the timestamps: # 480, 5
# mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1)
seq_x = seq_x.astype(np.float32)
seq_y = seq_y.astype(np.float32)
if self.timeenc == 0:
mark = mark.astype(np.int64)
else:
mark = mark.astype(np.float32)
mask = mask.astype(np.int64)
return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask)
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
@property
def d_input(self):
return self.data_x.shape[-1]
@property
def d_output(self):
if self.features in ["M", "S"]:
return self.data_x.shape[-1]
elif self.features == "MS":
return 1
else:
raise NotImplementedError
@property
def n_tokens_time(self):
if self.freq == 'h':
return [13, 32, 7, 24]
elif self.freq == 't':
return [13, 32, 7, 24, 4]
else:
raise NotImplementedError
class _Dataset_ETT_hour(InformerDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 - self.seq_len,
12 * 30 * 24 + 4 * 30 * 24 - self.seq_len,
]
border2s = [
12 * 30 * 24,
12 * 30 * 24 + 4 * 30 * 24,
12 * 30 * 24 + 8 * 30 * 24,
]
return border1s, border2s
def _process_columns(self, df_raw):
return df_raw
@property
def n_tokens_time(self):
assert self.freq == "h"
return [13, 32, 7, 24]
class _Dataset_ETT_minute(_Dataset_ETT_hour):
def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs):
super().__init__(data_path=data_path, freq=freq, **kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
]
border2s = [
12 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
]
return border1s, border2s
@property
def n_tokens_time(self):
assert self.freq == "t"
return [13, 32, 7, 24, 4]
class _Dataset_Weather(InformerDataset):
def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ECL(InformerDataset):
def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class InformerSequenceDataset(SequenceDataset):
@property
def n_tokens_time(self):
# Shape of the dates: depends on `timeenc` and `freq`
return self.dataset_train.n_tokens_time # data_stamp.shape[-1]
@property
def d_input(self):
return self.dataset_train.d_input
@property
def d_output(self):
return self.dataset_train.d_output
@property
def l_output(self):
return self.dataset_train.pred_len
def _get_data_filename(self, variant):
return self.variants[variant]
_collate_arg_names = ["mark", "mask"] # Names of the two extra tensors that the InformerDataset returns
def setup(self):
self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_
self.dataset_train = self._dataset_cls(
root_path=self.data_dir,
flag="train",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_val = self._dataset_cls(
root_path=self.data_dir,
flag="val",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_test = self._dataset_cls(
root_path=self.data_dir,
flag="test",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
class ETTHour(InformerSequenceDataset):
_name_ = "etth"
_dataset_cls = _Dataset_ETT_hour
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ETTh1.csv",
1: "ETTh2.csv",
}
class ETTMinute(InformerSequenceDataset):
_name_ = "ettm"
_dataset_cls = _Dataset_ETT_minute
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "t",
"cols": None,
}
variants = {
0: "ETTm1.csv",
1: "ETTm2.csv",
}
class Weather(InformerSequenceDataset):
_name_ = "weather"
_dataset_cls = _Dataset_Weather
init_defaults = {
"size": None,
"features": "S",
"target": "WetBulbCelsius",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "WTH.csv",
}
class ECL(InformerSequenceDataset):
_name_ = "ecl"
_dataset_cls = _Dataset_ECL
init_defaults = {
"size": None,
"features": "S",
"target": "MT_320",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ECL.csv",
}
| hyena-dna-main | src/dataloaders/et.py |
from . import et, genomics
from .base import SequenceDataset
| hyena-dna-main | src/dataloaders/__init__.py |
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py
from pathlib import Path
from typing import Any, List, Union
from torch.utils.data.dataloader import DataLoader, Dataset
from transformers import AutoTokenizer
from datasets import Dataset
from src.dataloaders.base import SequenceDataset, default_data_path
from src.dataloaders.fault_tolerant_sampler import RandomFaultTolerantSampler
from src.dataloaders.fault_tolerant_sampler import FaultTolerantDistributedSampler
# genomics datasets
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from src.dataloaders.datasets.hg38_dataset import HG38Dataset
from src.dataloaders.datasets.genomic_bench_dataset import GenomicBenchmarkDataset
from src.dataloaders.datasets.nucleotide_transformer_dataset import NucleotideTransformerDataset
from src.dataloaders.datasets.chromatin_profile_dataset import ChromatinProfileDataset
from src.dataloaders.datasets.species_dataset import SpeciesDataset
from src.dataloaders.datasets.icl_genomics_dataset import ICLGenomicsDataset
from src.dataloaders.datasets.hg38_fixed_dataset import HG38FixedDataset
"""
Dataloaders for genomics datasets, including pretraining and downstream tasks. First works in HyenaDNA project, May 2023.
"""
class HG38(SequenceDataset):
"""
Base class, other dataloaders can inherit from this class.
You must implement the following functions:
- __init__
- setup
You can then use (already have access to) the following functions:
- train_dataloader
- val_dataloader
- test_dataloader
"""
###### very important to set this! ######
_name_ = "hg38" # this name is how the dataset config finds the right dataloader
#########################################
def __init__(self, bed_file, fasta_file, tokenizer_name=None, dataset_config_name=None, max_length=1024, d_output=2, rc_aug=False,
max_length_val=None, max_length_test=None, val_ratio=0.0005, val_split_seed=2357, use_fixed_len_val=False,
add_eos=True, detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, replace_N_token=False, pad_interval=False,
*args, **kwargs):
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug # reverse compliment augmentation
self.max_length = max_length
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.bed_file = bed_file
self.fasta_file = fasta_file
self.use_fixed_len_val = use_fixed_len_val
self.replace_N_token = replace_N_token
self.pad_interval = pad_interval
# handle if file paths are None (default paths)
if self.bed_file is None:
self.bed_file = default_data_path / self._name_ / 'human-sequences.bed'
if self.fasta_file is None:
self.fasta_file = default_data_path / self._name_ / 'hg38.ml.fa'
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
"""Set up the tokenizer and init the datasets."""
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
self.vocab_size = len(self.tokenizer)
self.init_datasets() # creates the datasets. You can also just create this inside the setup() here.
def init_datasets(self):
"""Init the datasets (separate from the tokenizer)"""
# delete old datasets to free memory
if hasattr(self, 'dataset_train'):
self.dataset_train.fasta.seqs.close()
del self.dataset_train.fasta.seqs
# delete old datasets to free memory
if hasattr(self, 'dataset_test'):
self.dataset_test.fasta.seqs.close()
del self.dataset_test.fasta.seqs
# Create all splits: torch datasets
self.dataset_train, self.dataset_val, self.dataset_test = [
HG38Dataset(split=split,
bed_file=self.bed_file,
fasta_file=self.fasta_file,
max_length=max_len,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
add_eos=self.add_eos,
return_seq_indices=False,
shift_augs=None,
rc_aug=self.rc_aug,
return_augs=False,
replace_N_token=self.replace_N_token,
pad_interval=self.pad_interval)
for split, max_len in zip(['train', 'valid', 'test'], [self.max_length, self.max_length_val, self.max_length_test])
]
if self.use_fixed_len_val:
# we're placing the fixed test set in the val dataloader, for visualization!!!
# that means we should track mode with test loss, not val loss
# new option to use fixed val set
print("Using fixed length val set!")
# start end of chr14 and chrX grabbed from Enformer
chr_ranges = {'chr14': [19726402, 106677047],
'chrX': [2825622, 144342320],
}
self.dataset_val = HG38FixedDataset(
chr_ranges=chr_ranges,
fasta_file=self.fasta_file,
max_length=self.max_length,
pad_max_length=self.max_length,
tokenizer=self.tokenizer,
add_eos=True,
)
return
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
if self.shuffle and self.fault_tolerant:
shuffle = False
# TD [2022-12-26]: We need the distributed_sampler_kwargs in case of model parallel:
# In that case the number of replicas and the data parallel rank are more complicated.
distributed_sampler_kwargs = self.trainer.distributed_sampler_kwargs
sampler = (FaultTolerantDistributedSampler(self.dataset_train,
**self.trainer.distributed_sampler_kwargs)
if self.ddp else RandomFaultTolerantSampler(self.dataset_train))
# TD [2022-08-06]: Only the DDP sampler supports fast-forwarding for now
# We assume that it's being resumed with the same number of GPUs
if self.ddp and self.fast_forward_epochs is not None and self.fast_forward_batches is not None:
sampler.load_state_dict({
'epoch': self.fast_forward_epochs,
'counter': self.fast_forward_batches * self.batch_size
})
else:
shuffle = self.shuffle
sampler = None
return self._data_loader(self.dataset_train, batch_size=self.batch_size,
shuffle=shuffle, sampler=sampler)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test, batch_size=self.batch_size_eval)
def _data_loader(self, dataset: Dataset, batch_size: int, shuffle: bool = False,
sampler=None) -> DataLoader:
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=1, # Data is already in memory, we don't need many workers
shuffle=shuffle,
sampler=sampler,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def load_state_dict(self, checkpoint):
if self.fault_tolerant:
self.fast_forward_epochs = checkpoint['loops']['fit_loop']['epoch_progress']['current']['completed']
# TD [2022-08-07] ['epoch_loop.batch_progress']['total']['completed'] is 1 iteration
# behind, so we're using the optimizer's progress. This is set correctly in seq.py.
self.fast_forward_batches = checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['current']['completed']
# At this point the train loader hasn't been constructed yet
class GenomicBenchmark(HG38):
_name_ = "genomic_benchmark"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None,
padding_side='left', val_ratio=0.0005, val_split_seed=2357, add_eos=False,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=True, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if self.dest_path is None:
self.dest_path = default_data_path / self._name_
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side=self.padding_side,
)
# Create all splits: torch datasets (only train/test in this benchmark)
self.dataset_train, self.dataset_val = [
GenomicBenchmarkDataset(split=split,
max_length=max_len,
dataset_name=self.dataset_name,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
use_padding=self.use_padding,
d_output=self.d_output,
add_eos=self.add_eos,
dest_path=self.dest_path,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class NucleotideTransformer(HG38):
_name_ = "nucleotide_transformer"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None,
padding_side='left', val_ratio=0.0005, val_split_seed=2357, add_eos=False,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=True, shuffle_eval=None, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.shuffle_eval = shuffle_eval if shuffle_eval is not None else shuffle # default is to use the same as train shuffle arg
self.pin_memory = pin_memory
self.drop_last = drop_last
if self.dest_path is None:
self.dest_path = default_data_path / self._name_
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side=self.padding_side,
)
# Create all splits: torch datasets (only train/test in this benchmark)
self.dataset_train, self.dataset_val = [
NucleotideTransformerDataset(split=split,
max_length=max_len,
tokenizer=self.tokenizer, # pass the tokenize wrapper
dataset_name = self.dataset_name,
tokenizer_name=self.tokenizer_name,
use_padding=self.use_padding,
d_output=self.d_output,
add_eos=self.add_eos,
dest_path=self.dest_path,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval, shuffle=self.shuffle_eval)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
# note: we're combining val/test into one
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval, shuffle=self.shuffle_eval)
class ChromatinProfile(HG38):
_name_= 'chromatin_profile'
l_output = 0 # need to set this for decoder to work correctly for seq level
def __init__(self, data_path, ref_genome_path, ref_genome_version=None,
tokenizer_name=None, dataset_config_name=None,
max_length=1000, d_output=2, rc_aug=False, add_eos=True, val_only=False,
batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
*args, **kwargs):
self.data_path = data_path
self.ref_genome_path = ref_genome_path
self.ref_genome_version = ref_genome_version
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug # reverse compliment augmentation
self.max_length = max_length
self.add_eos = add_eos
self.val_only=val_only
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
self.vocab_size = len(self.tokenizer)
# Create all splits: torch datasets
if self.val_only:
splits=['val']*3
else:
splits=['train','val','test']
self.dataset_train, self.dataset_val, self.dataset_test = [
ChromatinProfileDataset(
max_length=self.max_length,
ref_genome_path = self.ref_genome_path,
ref_genome_version = self.ref_genome_version,
coords_target_path = f'{self.data_path}/{split}_{self.ref_genome_version}_coords_targets.csv',
tokenizer=self.tokenizer,
tokenizer_name=self.tokenizer_name,
use_padding=True,
)
for split in splits
]
class Species(HG38):
_name_ = "species"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, species: list, species_dir: str, tokenizer_name=None, dataset_config_name=None, d_output=None, max_length=1024, rc_aug=False,
max_length_val=None, max_length_test=None, cache_dir=None, val_ratio=0.0005, val_split_seed=2357,
add_eos=True, detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, chromosome_weights='uniform', species_weights='uniform',
total_size=None, task='species_classification', remove_tail_ends=False, cutoff_train=0.1, cutoff_test=0.2,
*args, **kwargs):
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.rc_aug = rc_aug # reverse compliment augmentation
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.species = species # list of species to load
self.species_dir = species_dir
self.chromosome_weights = chromosome_weights
self.species_weights = species_weights
self.total_size = total_size
self.task = task
self.remove_tail_ends = remove_tail_ends
self.cutoff_train = cutoff_train
self.cutoff_test = cutoff_test
self.d_output = len(self.species)
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
else:
raise ValueError(f"Invalid tokenizer name: {self.tokenizer_name}")
self.vocab_size = len(self.tokenizer)
# Create datasets
self.init_datasets()
def init_datasets(self):
# delete old datasets
# NOTE: For some reason only works to close files for train
if hasattr(self, 'dataset_train'):
for spec in list(self.dataset_train.fastas.keys()):
for chromosome in list(self.dataset_train.fastas[spec].keys()):
self.dataset_train.fastas[spec][chromosome].close()
del self.dataset_train.fastas[spec][chromosome]
if hasattr(self, 'dataset_val'):
pass
if hasattr(self, 'dataset_test'):
pass
# Create all splits: torch datasets
self.dataset_train, self.dataset_val, self.dataset_test = [
SpeciesDataset(species=self.species,
species_dir=self.species_dir,
split=split,
max_length=max_len,
total_size=self.total_size * (1 if split == 'test' else (self.max_length_test + 2) // max_len), # See the same # of tokens every epoch across train/val/test
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
add_eos=self.add_eos,
rc_aug=self.rc_aug,
chromosome_weights=self.chromosome_weights,
species_weights=self.species_weights,
task=self.task,
remove_tail_ends=self.remove_tail_ends,
cutoff_train=self.cutoff_train,
cutoff_test=self.cutoff_test,
)
for split, max_len in zip(['train', 'valid', 'test'], [self.max_length, self.max_length_val, self.max_length_test])
]
return
class ICLGenomics(HG38):
_name_ = "icl_genomics"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None, shots=1, label_to_token=None,
add_eos=True, characters=None, padding_side='left', val_ratio=0.0005, val_split_seed=2357,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=0,
shuffle=True, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
use_shmem=True, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.shots = shots # num shots in ICL sample
self.label_to_token = label_to_token # this maps the label to a token in the vocab already, arbitrary
self.add_eos = add_eos
self.characters = list('ACTGN') if characters is None else characters
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
self.use_shmem = use_shmem
# if self.use_shmem:
# assert cache_dir is not None
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=self.characters,
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
self.vocab_size = len(self.tokenizer)
# Create all splits: torch datasets
self.dataset_train, self.dataset_val = [
ICLGenomicsDataset(
dataset_name=self.dataset_name,
split=split,
shots=self.shots,
use_padding=self.use_padding,
d_output=self.d_output,
max_length=max_len,
dest_path=self.dest_path,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
label_to_token=self.label_to_token,
rc_aug=self.rc_aug,
add_eos=self.add_eos,
)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class HG38Fixed(HG38):
_name_ = "hg38_fixed"
"""Just used for testing a fixed length, *non-overlapping* dataset for HG38."""
def __init__(self, fasta_file=None, chr_ranges=None, pad_max_length=None, batch_size=32,
max_length=None, num_workers=1, add_eos=True,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.fasta_file = fasta_file
self.chr_ranges = chr_ranges
self.max_length = max_length
self.pad_max_length = pad_max_length
self.add_eos = add_eos
self.batch_size = batch_size
self.batch_size_eval = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
if self.fasta_file is None:
self.fasta_file = default_data_path / "hg38" / 'hg38.ml.fa'
if self.chr_ranges is None:
# start end of chr14 and chrX grabbed from Enformer
self.chr_ranges = {'chr14': [19726402, 106677047],
'chrX': [2825622, 144342320],
}
def setup(self, stage=None):
# Create tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length= self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
# we only need one
self.dataset_train = HG38FixedDataset(
fasta_file=self.fasta_file,
chr_ranges=self.chr_ranges, # a dict of chr: (start, end) to use for test set
max_length=self.max_length,
pad_max_length=self.pad_max_length,
tokenizer=tokenizer,
add_eos=self.add_eos,
)
self.dataset_val = self.dataset_train
self.dataset_test = self.dataset_train
# if __name__ == '__main__':
# """Quick test using dataloader. Can't call from here though."""
# loader = HG38(
# bed_file='/home/exnx/enformer-pytorch/data/basenji/human-sequences.bed',
# fasta_file='/home/exnx/enformer-pytorch/data/basenji/hg38.ml.fa',
# tokenizer_name='char_level', max_length=2000
# )
# breakpoint()
# it = iter(ds)
# elem = next(it)
# print(len(elem))
# breakpoint()
| hyena-dna-main | src/dataloaders/genomics.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import math
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSampler):
def __init__(self, *args, generator=None, **kwargs):
# generator = torch.Generator().manual_seed(seed)
# super().__init__(*args, generator=generator, **kwargs)
# TD [2022-07-17]: We don't force the seed to be zero. We generate random seed,
# which should be reproducible if pl.seed_everything was called before hand.
# This means that changing the seed of the experiment will also change the
# sampling order.
if generator is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator().manual_seed(seed)
super().__init__(*args, generator=generator, **kwargs)
self.counter = 0
# self.start_counter = 0
self.restarting = False
def state_dict(self):
return {"random_state": self.state, "counter": self.counter}
def load_state_dict(self, state_dict):
self.generator.set_state(state_dict.get("random_state"))
self.counter = state_dict["counter"]
# self.start_counter = self.counter
self.restarting = True
# TD [2022-08-28] Setting the len will cause PL to think there are only a few batches left per
# epoch, and subsequent epoch will have very few batches.
# def __len__(self):
# # We need a separate self.start_counter because PL seems to call len repeatedly.
# # If we use len(self.data_source) - self.counter then PL will think the epoch ends
# # when we're only half way through.
# return len(self.data_source) - self.start_counter
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
self.state = self.generator.get_state()
indices = torch.randperm(n, generator=self.generator).tolist()
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
# self.start_counter = self.counter
for index in indices:
self.counter += 1
yield index
self.counter = 0
# self.start_counter = self.counter
class FaultTolerantDistributedSampler(DistributedSampler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
# self.start_counter = 0
self.restarting = False
def state_dict(self):
return {"epoch": self.epoch, "counter": self.counter}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
self.counter = state_dict["counter"]
# self.start_counter = self.counter
self.restarting = True
# TD [2022-08-28] Setting the len will cause PL to think there are only a few batches left per
# epoch, and subsequent epoch will have very few batches.
# def __len__(self) -> int:
# return self.num_samples - self.start_counter
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
# self.start_counter = self.counter
for index in indices:
self.counter += 1
yield index
self.counter = 0
# self.start_counter = self.counter | hyena-dna-main | src/dataloaders/fault_tolerant_sampler.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.