python_code
stringlengths 0
108k
|
---|
"""
.. Densely Connected Convolutional Networks:
https://arxiv.org/abs/1608.06993
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4 * growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4 * growth_rate)
self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out, x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=100):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2 * growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3] * growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
# test()
|
from .resnet import *
from .densenet import *
|
"""
.. Deep Residual Learning for Image Recognition:
https://arxiv.org/abs/1512.03385
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_lr_scheduler import FairseqLRScheduler
LR_SCHEDULER_REGISTRY = {}
def build_lr_scheduler(args, optimizer):
return LR_SCHEDULER_REGISTRY[args.lr_scheduler](args, optimizer)
def register_lr_scheduler(name):
"""Decorator to register a new LR scheduler."""
def register_lr_scheduler_cls(cls):
if name in LR_SCHEDULER_REGISTRY:
raise ValueError('Cannot register duplicate LR scheduler ({})'.format(name))
if not issubclass(cls, FairseqLRScheduler):
raise ValueError('LR Scheduler ({}: {}) must extend FairseqLRScheduler'.format(name, cls.__name__))
LR_SCHEDULER_REGISTRY[name] = cls
return cls
return register_lr_scheduler_cls
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.lr_scheduler.' + module)
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('cold_start')
class ColdStartSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = args.lr * sqrt(args.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with inverse_sqrt.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.lr[0]
if args.warmup_init_lr < 0:
args.warmup_init_lr = warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
# fmt: off
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
# fmt: on
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.lr[0]
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
|
import csv
from clap.datasets import tokenize
import torch
import torchaudio
# constants
MAX_TOKEN_LENGTH = 256
DATA_DIR = './data'
NUM_MEL = 80
TSV_FILE_NAME = 'subset.tsv'
# helpers
def tsv_to_dict(path):
with open(path) as fd:
rd = csv.DictReader(fd, delimiter = "\t", quotechar = '"')
return [row for row in rd]
# script
voice_clips = tsv_to_dict(f'{DATA_DIR}/{TSV_FILE_NAME}')
for clip in voice_clips:
filename = clip['path']
text = clip['sentence']
waveform, sample_rate = torchaudio.load(f"{DATA_DIR}/clips/{filename}", normalization = True)
output = torchaudio.transforms.MelSpectrogram(sample_rate, n_mels = NUM_MEL)(waveform)[0]
tokenized = torch.tensor([int(byte) for i, byte in enumerate(text.encode('utf-8'))], dtype = torch.uint8)
save_path = f"{DATA_DIR}/{filename}.pt"
torch.save({
'audio': output.t(),
'text': tokenized
}, save_path)
|
from setuptools import setup, find_packages
setup(
name="clap-jax",
packages=find_packages(),
version="0.0.1",
license="MIT",
description="CLAP - Contrastive Language-Audio Pretraining",
author="Charles Foster",
author_email="",
url="https://github.com/cfoster0/CLAP",
keywords=[
"artificial intelligence",
"deep learning",
"contrastive learning",
"audio",
],
install_requires=[
"click",
"click-option-group",
"einops>=0.3",
"flax",
"jax",
"jaxlib",
"lm_dataformat",
"optax",
"torch",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
import click
from click_option_group import optgroup
import jax
from jax import random, numpy as np, value_and_grad, jit, tree_util
from optax import chain, clip_by_global_norm, scale_by_adam, scale, apply_updates, add_decayed_weights, masked
from clap.models import CLAP
# data
from torch.utils.data import DataLoader
from clap.datasets import pair_text_spectrogram_dataset_collate_fn, PairTextSpectrogramDataset
@click.command()
@optgroup.group('Model settings')
@optgroup.option('--text_vocab', default = 256, type = int)
@optgroup.option('--text_dim', default = 512, type = int)
@optgroup.option('--text_depth', default = 1, type = int)
@optgroup.option('--text_heads', default = 8, type = int)
@optgroup.option('--audio_dim', default = 512, type = int)
@optgroup.option('--audio_depth', default = 1, type = int)
@optgroup.option('--audio_heads', default = 8, type = int)
@optgroup.group('Training settings')
@optgroup.option('--data_folder', default = './data', type = str)
@optgroup.option('--batch_size', default = 16, type = int)
@optgroup.option('--epochs', default = 100, type = int)
@optgroup.option('--learning_rate', default = 3e-4, type = float)
@optgroup.option('--weight_decay', default = 1e-1, type = float)
@optgroup.option('--seed', default = 0, type = int)
@optgroup.option('--max_norm', default = 0.5, type = float)
def train(
*,
data_folder,
batch_size,
epochs,
learning_rate,
weight_decay,
seed,
max_norm,
text_vocab,
text_dim,
text_depth,
text_heads,
audio_dim,
audio_depth,
audio_heads
):
# rng
rng_key = random.PRNGKey(seed)
# data
dataset = PairTextSpectrogramDataset(data_folder)
dl = DataLoader(dataset, batch_size = batch_size, collate_fn = pair_text_spectrogram_dataset_collate_fn, drop_last = True, shuffle = True)
# model
model = CLAP(
text_vocab = text_vocab,
text_dim = text_dim,
text_depth = text_depth,
text_heads = text_heads,
audio_dim = audio_dim,
audio_depth = audio_depth,
audio_heads = audio_heads
)
# optimizer
exclude_bias = lambda params: tree_util.tree_map(lambda x: x.ndim != 1, params)
optim = chain(
clip_by_global_norm(max_norm),
scale_by_adam(eps=1e-4),
add_decayed_weights(weight_decay, exclude_bias),
scale(-learning_rate)
)
# init
audio, audio_mask, text, text_mask = next(iter(dl))
params = model.init(rng_key, text, audio, text_mask, audio_mask)
optim_state = optim.init(params)
# loss function, for use with value_and_grad
@jit
@value_and_grad
def loss_fn(params, text, audio, text_mask, audio_mask):
return model.apply(params, text, audio, text_mask, audio_mask)
# train loop
for _ in range(epochs):
for audio, audio_mask, text, text_mask in dl:
loss, grads = loss_fn(params, text, audio, text_mask, audio_mask)
updates, optim_state = optim.update(grads, optim_state, params)
params = apply_updates(params, updates)
print(f'loss: {loss}')
# finished
if __name__ == "__main__":
train()
|
import jax
from typing import Any, Callable, Sequence, Optional
from jax import lax, random, numpy as np, vmap, jit
from jax.ops import index, index_update
# einsum and einops
from jax.numpy import einsum
from einops import rearrange, repeat
# flax
import flax
from flax.core import freeze, unfreeze
from flax import linen as nn
# constants
LARGE_NEG_VALUE = -1e10
# config
from jax.config import config
config.enable_omnistaging() # Linen requires enabling omnistaging
# helpers
def cross_entropy(logits, targets, axis=-1):
logprobs = nn.log_softmax(logits, axis=axis)
nll = np.take_along_axis(logprobs, np.expand_dims(targets, axis=axis), axis=axis)
ce = -np.mean(nll)
return ce
def fixed_pos_embedding(seq, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(seq), inv_freq)
return np.sin(sinusoid_inp), np.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = np.stack((-x2, x1), axis=-1)
return rearrange(x, "... d j -> ... (d j)")
def apply_rotary_pos_emb(x, sincos):
sin, cos = map(lambda t: repeat(t, "b n -> b (n j)", j=2)[:, None, :], sincos)
return (x * cos) + (rotate_every_two(x) * sin)
# main class
class Attention(nn.Module):
dim: int
heads: int
dim_head: int = 64
causal: bool = False
@nn.compact
def __call__(self, x, pos_emb, mask):
dim_in, h = x.shape[-1], self.heads
scale = dim_in ** -0.5
norm = nn.LayerNorm()
to_qkv = nn.Dense(features=self.dim_head * h * 3, use_bias=False)
to_out = nn.Dense(features=dim_in)
x = norm(x)
qkv = np.split(to_qkv(x), 3, axis=-1)
q, k, v = map(lambda t: rearrange(t, "i (h d) -> i h d", h=h), qkv)
q = index_update(q, index[1:], apply_rotary_pos_emb(q[1:], pos_emb))
k = index_update(k, index[1:], apply_rotary_pos_emb(k[1:], pos_emb))
sim = einsum("i h d, j h d -> i j h", q, k) * scale
mask = np.pad(mask, (1, 0), constant_values=True)
mask = rearrange(mask, "j -> () j ()")
if self.causal:
i, j = sim.shape[:2]
tri_mask = np.ones((i - 1, j - 1), dtype=bool)
tri_mask = np.pad(tri_mask, ((1, 0), (1, 0)), constant_values=False)
causal_mask = np.triu(tri_mask, j - i + 1)
causal_mask = rearrange(causal_mask, "i j -> i j ()")
mask = ~causal_mask * mask
sim = np.where(mask, sim, LARGE_NEG_VALUE)
attn = nn.softmax(sim, axis=-2)
out = einsum("i j h, j h d -> i h d", attn, v)
out = rearrange(out, "i h d -> i (h d)")
return to_out(out)
class FeedForward(nn.Module):
mult: int = 4
@nn.compact
def __call__(self, x):
dim_in, mult = x.shape[-1], self.mult
norm = nn.LayerNorm()
to_intermediate = nn.Dense(features=dim_in * mult)
to_out = nn.Dense(features=dim_in)
x = norm(x)
x = to_intermediate(x)
x = nn.gelu(x)
x = to_out(x)
return x
class Transformer(nn.Module):
dim: int
depth: int
heads: int
dim_head: int = 64
causal: bool = False
cls_init: Callable = nn.initializers.lecun_normal()
def setup(self):
self.layers = [
(
Attention(
dim=self.dim,
heads=self.heads,
dim_head=self.dim_head,
causal=self.causal,
),
FeedForward(),
)
for _ in range(self.depth)
]
@nn.compact
def __call__(self, x, mask):
n, d, h, dh, dim = *x.shape, self.heads, self.dim_head, self.dim
if d != dim:
x = nn.Dense(features=dim)(x)
cls_token = self.param("cls", self.cls_init, (1, x.shape[-1]))
to_norm_out = nn.LayerNorm()
sincos = fixed_pos_embedding(n, self.dim_head)
x = np.concatenate((cls_token, x), axis=0)
for attn, ff in self.layers:
x = attn(x, pos_emb=sincos, mask=mask) + x
x = ff(x) + x
x = to_norm_out(x)
return x
class CLAP(nn.Module):
text_vocab: int
text_dim: int
text_depth: int
text_heads: int
audio_dim: int
audio_depth: int
audio_heads: int
temp_init: Callable = nn.initializers.zeros
def setup(self):
self.audio_encoder = Transformer(
dim=self.audio_dim, depth=self.audio_depth, heads=self.audio_heads
)
self.text_encoder = Transformer(
dim=self.text_dim, depth=self.text_depth, heads=self.text_heads, causal=True
)
@nn.compact
def __call__(self, text, audio, text_mask, audio_mask, return_loss=True):
b, text_vocab, text_dim = text.shape[0], self.text_vocab, self.text_dim
to_text_tokens = nn.Embed(num_embeddings=text_vocab, features=text_dim)
temp = self.param("temperature", self.temp_init, tuple())
text = to_text_tokens(text)
enc_text = vmap(self.text_encoder)(text, mask=text_mask)
enc_audio = vmap(self.audio_encoder)(audio, mask=audio_mask)
enc_text = enc_text[:, 0]
enc_audio = enc_audio[:, 0]
enc_text = enc_text / np.linalg.norm(enc_text, axis=-1, keepdims=True)
enc_audio = enc_audio / np.linalg.norm(enc_audio, axis=-1, keepdims=True)
sim = einsum("i d, j d -> i j", enc_text, enc_audio) * np.exp(temp)
if not return_loss:
return sim
labels = np.arange(b)
loss = (
cross_entropy(sim, labels, axis=0) + cross_entropy(sim, labels, axis=1)
) / 2
return loss
|
import glob
import torch
from pathlib import Path
import lm_dataformat as lmd
from itertools import cycle, islice, chain
import torch.nn.functional as F
from torch.utils.data import Dataset, TensorDataset, ConcatDataset, IterableDataset
class CaptionedAudioMetadataset(IterableDataset):
def __init__(self, path_pairs, lazy=False):
self.datasets = [
CaptionedAudioDataset(captions_path, spectrograms_path, lazy=lazy)
for (captions_path, spectrograms_path) in path_pairs
]
def __iter__(self):
def roundrobin(datasets):
num_active = len(datasets)
nexts = cycle(iter(it).__next__ for it in datasets)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
iterator = roundrobin(self.datasets)
return iterator
class CaptionedAudioDataset(IterableDataset):
def __init__(self, captions_path, spectrograms_path, lazy=False):
self.lazy = lazy
if self.lazy:
# Warning: The lazy path does not check whether the cpation metadata
# links it to the spectrogram. It assumes that the specrogram data,
# read from the files from the path in sorted order, loaded in as
# tensors, follows the exact same ordering as the LMD-encoded captions.
self.captions = lmd.Reader(captions_path).stream_data(get_meta=False)
self.spectrograms = SpectrogramLazyDataset(spectrograms_path)
else:
self.captions = lmd.Reader(captions_path).stream_data(get_meta=True)
self.spectrograms = SpectrogramDataset(spectrograms_path)
def __iter__(self):
if self.lazy:
iterator = (
(tokenize(text), spectrogram)
for ((text, _), spectrogram) in zip(self.captions, self.spectrograms)
)
else:
iterator = (
(tokenize(text), self.spectrograms[meta["index"]])
for (text, meta) in self.captions
)
return iterator
class SpectrogramDataset(Dataset):
def __init__(self, path):
self.shard_paths = sorted(glob.glob(f"{path}/*.pt"))
self.data = ConcatDataset(
[SpectrogramDatasetShard(shard_path) for shard_path in self.shard_paths]
)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class SpectrogramLazyDataset(IterableDataset):
def __init__(self, path):
self.shard_paths = sorted(glob.glob(f"{path}/*.pt"))
def __iter__(self):
def lazy_shard_loader():
for shard_path in self.shard_paths:
self.shard_data = SpectrogramDatasetShard(shard_path)
for example in self.shard_data:
yield example
return lazy_shard_loader()
class SpectrogramDatasetShard(Dataset):
def __init__(self, path):
self.dataset_shard = TensorDataset(torch.load(path))
def __len__(self):
# Layout is [examples, frames, channels]
return len(self.dataset_shard)
def __getitem__(self, idx):
return self.dataset_shard[idx]
class PairTextSpectrogramDataset(Dataset):
def __init__(self, folder, max_audio_len = 2048, max_text_len = 256):
self.paths = [path for path in Path(folder).glob('*.pt')]
self.max_audio_len = max_audio_len
self.max_text_len = max_text_len
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
max_audio_len, max_text_len = self.max_audio_len, self.max_text_len
path = self.paths[idx]
data = torch.load(path)
audio, text = data['audio'], data['text']
audio = audio[:max_audio_len]
text = text[:max_text_len]
audio_mask = torch.ones(audio.shape[:-1]).bool()
text_mask = torch.ones_like(text).bool()
return audio, audio_mask, text, text_mask
def pair_text_spectrogram_dataset_collate_fn(batch):
audios = [el[0] for el in batch]
texts = [el[2] for el in batch]
max_audio_len = max([audio.shape[0] for audio in audios])
max_text_len = max([text.shape[0] for text in texts])
padded_batch = []
for audio, audio_mask, text, text_mask in batch:
audio_len = audio.shape[0]
text_len = text.shape[0]
audio_pad_len = max_audio_len - audio_len
text_pad_len = max_text_len - text_len
if audio_pad_len > 0:
audio = F.pad(audio, (0, 0, audio_pad_len, 0), value = 0.)
audio_mask = F.pad(audio_mask, (audio_pad_len, 0), value = False)
if text_pad_len > 0:
text = F.pad(text, (text_pad_len, 0), value = 0.)
text_mask = F.pad(text_mask, (text_pad_len, 0), value = False)
padded_batch.append((audio, audio_mask, text, text_mask))
output = tuple(map(lambda t: torch.stack(t).numpy(), zip(*padded_batch)))
return output
def tokenize(text, pad_to=256):
# Padding token is 0, the null byte
tokens = torch.zeros(pad_to, dtype=torch.uint8)
# Truncate to context window size on the right if need be
for i, byte in enumerate(text.encode("utf-8")):
if i < pad_to:
tokens[i] = int(byte)
else:
break
return torch.tensor(tokens)
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
num_active = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
|
from clap.models import CLAP
from clap.datasets import CaptionedAudioDataset, CaptionedAudioMetadataset, tokenize
|
# Modified from Google's Vision Transformer repo, whose notice is reproduced below.
#
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import einops
import flax.linen as nn
import jax.numpy as jnp
class MlpBlock(nn.Module):
mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.Dense(self.mlp_dim)(x)
y = nn.gelu(y)
return nn.Dense(x.shape[-1])(y)
class MixerBlock(nn.Module):
"""Mixer block layer."""
tokens_mlp_dim: int
channels_mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.LayerNorm()(x)
y = jnp.swapaxes(y, 0, 1)
y = MlpBlock(self.tokens_mlp_dim, name="token_mixing")(y)
y = jnp.swapaxes(y, 0, 1)
x = x + y
y = nn.LayerNorm()(x)
return x + MlpBlock(self.channels_mlp_dim, name="channel_mixing")(y)
class MlpMixer(nn.Module):
"""Mixer architecture."""
patches: Any
strides: Any
num_classes: int
num_blocks: int
hidden_dim: int
tokens_mlp_dim: int
channels_mlp_dim: int
@nn.compact
def __call__(self, inputs):
x = nn.Conv(
self.hidden_dim, self.patches.size, strides=self.strides.size, name="stem"
)(inputs)
x = einops.rearrange(x, "h w c -> (h w) c")
for _ in range(self.num_blocks):
x = MixerBlock(self.tokens_mlp_dim, self.channels_mlp_dim)(x)
x = nn.LayerNorm(name="pre_head_layer_norm")(x)
x = jnp.mean(x, axis=0)
return nn.Dense(
self.num_classes, kernel_init=nn.initializers.zeros, name="head"
)(x)
|
import bitsandbytes as bnb
import torch
p = torch.nn.Parameter(torch.rand(10,10).cuda())
a = torch.rand(10,10).cuda()
p1 = p.data.sum().item()
adam = bnb.optim.Adam([p])
out = a*p
loss = out.sum()
loss.backward()
adam.step()
p2 = p.data.sum().item()
assert p1 != p2
print('SUCCESS!')
print('Installation was successful!')
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
from setuptools import find_packages, setup
libs = list(glob.glob("./bitsandbytes/libbitsandbytes*.so"))
libs = [os.path.basename(p) for p in libs]
print("libs:", libs)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name=f"bitsandbytes",
version=f"0.37.0",
author="Tim Dettmers",
author_email="[email protected]",
description="8-bit optimizers and matrix multiplication routines.",
license="MIT",
keywords="gpu optimizers optimization 8-bit quantization compression",
url="https://github.com/TimDettmers/bitsandbytes",
packages=find_packages(),
package_data={"": libs},
long_description=read("README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
import math
import random
import time
from itertools import product
import einops
import pytest
import torch
import numpy as np
import bitsandbytes as bnb
from bitsandbytes import functional as F
from scipy.stats import norm
torch.set_printoptions(
precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000
)
k = 20
def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
torch.testing.assert_allclose(a, b, rtol, atol)
class FFN(torch.nn.Module):
def __init__(self, input_features, hidden_size, bias=True):
super().__init__()
self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)
self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)
with torch.no_grad():
torch.nn.init.xavier_uniform_(self.fc1.weight)
torch.nn.init.xavier_uniform_(self.fc2.weight)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
class Timer:
def __init__(self):
self.starts = {}
self.ends = {}
self.agg = {}
def tick(self, name="default"):
if name not in self.starts:
self.starts[name] = torch.cuda.Event(enable_timing=True)
self.ends[name] = torch.cuda.Event(enable_timing=True)
self.starts[name].record()
else:
ms = self.tock(name, evict=True, print_ms=False)
def tock(self, name="default", evict=True, print_ms=True):
if name in self.ends:
self.ends[name].record()
torch.cuda.synchronize()
ms = self.starts[name].elapsed_time(self.ends[name])
if name not in self.agg:
self.agg[name] = 0.0
self.agg[name] += ms
if evict:
self.starts.pop(name)
self.ends.pop(name)
if print_ms and name in self.agg:
print(f"{name} took: {self.agg[name] / 1000.0:.5f}s")
return self.agg[name]
def reset(self):
self.starts = {}
self.ends = {}
self.agg = {}
print("Resetting benchmark data")
def setup():
pass
def teardown():
pass
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
def test_dynamic_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diff.mean().item() < 0.0135
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2).mean().item()
torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
assert diff < 0.004
def test_dynamic_blockwise_quantization():
#print('')
for blocksize in [4096, 2048, 1024, 512]:
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize_blockwise(A1, blocksize=blocksize)
A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.011
assert relerr < 0.018
#print('randn', blocksize, sum(diffs)/len(diffs))
#print('randn', blocksize, sum(reldiffs)/len(reldiffs))
diffs = []
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize_blockwise(A1, blocksize=blocksize)
A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
#torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.0035
assert relerr < 0.015
#print('rand', blocksize, sum(diffs)/len(diffs))
#print('rand', blocksize, sum(reldiffs)/len(reldiffs))
def test_dynamic_blockwise_stochastic_quantization():
diffs = []
reldiffs = []
rand = torch.rand(1024).cuda()
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C1, S1 = F.quantize_blockwise(A1, rand=rand)
C2, S2 = F.quantize_blockwise(A1)
# a maximunm distance of quantized values of 1
torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
fraction_smaller = (C1 < C2).float().sum() / C1.numel()
fraction_larger = (C1 > C2).float().sum() / C1.numel()
torch.testing.assert_allclose(
fraction_larger, fraction_smaller, atol=0.01, rtol=0
)
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device="cuda")
gnorm_vec2 = torch.zeros(100, device="cuda")
n = 4
step = 0
percentile = 5
for i in range(k):
step += 1
g = torch.randn(n, n, dtype=gtype, device="cuda")
gnorm1, clip2, gnorm_scale = F.percentile_clipping(
g, gnorm_vec2, step, percentile=percentile
)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
torch.testing.assert_allclose(clip1, clip2)
torch.testing.assert_allclose(gnorm1, gnorm2)
def quant(x):
max1 = torch.abs(x).max()
x = torch.round(x / max1 * 127)
return max1, x.to(torch.int8)
def dequant(c, maxC):
return c.float() * (maxC / 127)
def mm_dequant(maxA, maxB, C):
return C.float() * (maxA / 127) * (maxB / 127)
def quant_multi(x, dim):
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
max1[max1 == 0] = 1.0
x = torch.round(x / max1 * 127)
return max1, x.to(torch.int8)
def quant_multi_chunk(x, dim, chunk_size=32):
if dim == 1:
x_chunked = einops.rearrange(x, "(c a) b -> c a b", c=chunk_size)
max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)
max1 = torch.tile(max1, (1, 1, x.shape[1]))
max1 = max1.view(x.shape)
elif dim == 0:
x_chunked = einops.rearrange(x, "a (b c) -> a b c", c=chunk_size)
max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)
max1 = torch.tile(max1, (x.shape[0], 1, 1))
max1 = max1.view(x.shape)
max1[max1 == 0] = 1.0
x = torch.round(x / max1 * 127)
return max1, x.to(torch.int8)
def quant_minmax(A):
minA = A.min()
maxA = A.max()
def mean(xx):
return sum(xx) / float(len(xx))
# dim1 = torch.randint(1,1024*4, size=(4,)).tolist()
# dim2 = torch.randint(1,1024*4, size=(4,)).tolist()
dim1 = [1024 * 2]
dim2 = [1024 * 16]
methods = [
(
lambda x, dim: quant(x),
lambda x, dim: quant(x),
dequant,
dequant,
mm_dequant,
)
]
methods.append((quant_multi, quant_multi, dequant, dequant, mm_dequant))
# methods.append((lambda x: quant_multi_chunk(x, dim=-1), lambda x: quant_multi_chunk(x, dim=0), dequant, dequant, mm_dequant))
method_names = ["linear", "vectorwise"]
batched = [False, True]
values = list(product(dim1, dim2, methods, batched))
values_names = list(product(dim1, dim2, method_names, batched))
names = [
"dim1_{}_dim2_{}_quant_{}_batched_{}".format(*vals)
for vals in values_names
]
@pytest.mark.parametrize(
"dim1, dim2, quant_methods, batched", values, ids=names
)
def test_approx_igemm(dim1, dim2, quant_methods, batched):
dim1 = dim1 - (dim1 % 32)
dim2 = dim2 - (dim2 % 32)
errors = []
relerrors = []
print("")
for i in range(5):
if batched:
A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device="cuda")
B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device="cuda")
maxA, Ac = quant_methods[0](A, 2)
maxB, Bc = quant_methods[1](B, 1)
else:
A = torch.normal(0, 0.5, size=(dim1, dim2), device="cuda")
B = torch.normal(0, 0.5, size=(dim2, dim1), device="cuda")
maxA, Ac = quant_methods[0](A, 1)
maxB, Bc = quant_methods[1](B, 0)
torch.testing.assert_allclose(
quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05
)
if batched:
out2 = torch.bmm(A, B)
C = torch.bmm(Ac.float(), Bc.float())
else:
out2 = torch.mm(A, B)
C = F.igemm(Ac, Bc)
out = quant_methods[4](maxA, maxB, C)
std = out2.std()
out /= std
out2 /= std
err = torch.abs(out - out2)
relerr = err / torch.abs(out2)
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
print(mean(errors))
print(mean(relerrors))
def test_stable_embedding():
layer = bnb.nn.StableEmbedding(1024, 1024)
layer.reset_parameters()
n = 2
hidden_dim = torch.randint(32, 256, size=(n,)).tolist()
batch_dim = torch.randint(16, 256, size=(n,)).tolist()
seq_dim = torch.randint(16, 256, size=(n,)).tolist()
transpose = [(False, False), (False, True), (True, False), (True, True)]
values = list(product(hidden_dim, batch_dim, transpose, seq_dim))
names = [
"hidden_dim_{}_batch_dim_{},transpose_{}_seq_dim_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize(
"hidden_dim, batch_dim, transpose, seq_dim", values, ids=names
)
def test_igemm(hidden_dim, batch_dim, transpose, seq_dim):
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 16)
seq_dim = seq_dim - (seq_dim % 16)
for i in range(k):
shapeA = (
(batch_dim, hidden_dim)
if not transpose[0]
else (hidden_dim, batch_dim)
)
shapeB = (
(32 * random.randint(1, 4), hidden_dim)
if transpose[1]
else (hidden_dim, 32 * random.randint(1, 4))
)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.matmul(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.matmul(A.float(), B.t().float())
out = F.igemm(A, B.t())
elif transpose[0] and not transpose[1]:
out2 = torch.matmul(A.t().float(), B.float())
out = F.igemm(A.t(), B)
elif transpose[0] and transpose[1]:
out2 = torch.matmul(A.t().float(), B.t().float())
out = F.igemm(A.t(), B.t())
torch.testing.assert_allclose(out.float(), out2)
for i in range(k):
shapeA = (batch_dim, seq_dim, hidden_dim)
shapeB = (
(32 * random.randint(1, 4), hidden_dim)
if transpose[1]
else (hidden_dim, 32 * random.randint(1, 4))
)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.matmul(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.matmul(A.float(), B.t().float())
out = F.igemm(A, B.t())
torch.testing.assert_allclose(out.float(), out2)
n = 3
seq_dim = torch.randint(32, 512, size=(n,)).tolist()
hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
batch_dim = torch.randint(2, 16, size=(n,)).tolist()
values = list(product(seq_dim, hidden_dim, batch_dim))
names = [
"seq_dim{}_hidden_dim{}_batch_dim{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
seq_dim = seq_dim - (seq_dim % 32)
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 2)
for i in range(25):
A = torch.randint(
-128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
).to(torch.int8)
B = torch.randint(
-128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
).to(torch.int8)
out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
iout = torch.empty(
A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
)
out = F.igemm(A, B, out=iout)
torch.testing.assert_allclose(out.float(), out2)
n = 2
seq_dim = torch.randint(32, 512, size=(n,)).tolist()
hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
batch_dim = torch.randint(2, 16, size=(n,)).tolist()
transpose = [False, True]
values = list(product(seq_dim, hidden_dim, batch_dim, transpose))
names = [
"seq_dim={}_hidden_dim={}_batch_dim={}_transpose{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize(
"seq_dim, hidden_dim, batch_dim, transpose", values, ids=names
)
def test_minmax_igemm(seq_dim, hidden_dim, batch_dim, transpose):
def min_max(x):
maxA = torch.amax(x, dim=2, keepdim=True)
minA = torch.amin(x, dim=2, keepdim=True)
scale = (maxA - minA) / 2.0
return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale
seq_dim = seq_dim - (seq_dim % 16)
hidden_dim = hidden_dim - (hidden_dim % 16)
batch_dim = batch_dim - (batch_dim % 2)
errs = []
relerrs = []
errs2 = []
relerrs2 = []
for i in range(k):
A = torch.normal(
0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
)
if transpose:
B = torch.normal(0, 0.5, size=(256, hidden_dim), device="cuda")
else:
B = torch.normal(0, 0.5, size=(hidden_dim, 256), device="cuda")
Ac, minA, scale = min_max(A)
if transpose:
maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))
out = F.igemm(Ac, Bc.t())
out2 = torch.matmul(A, B.t())
offset = B.t().sum(0) * (minA + scale)
out = out.float()
out = (out * maxB.t() * scale / (127 * 127)) + offset
maxA, Ac = quant_multi(A, dim=2)
out3 = F.igemm(Ac, Bc.t())
out3 = mm_dequant(maxA, maxB.t(), out3)
else:
maxB, Bc = quant_multi(B, dim=0)
offset = B.sum(0) * (minA + scale)
out = F.igemm(Ac, Bc)
out2 = torch.matmul(A, B)
out = out.float()
out = (out * maxB * scale / (127 * 127)) + offset
maxA, Ac = quant_multi(A, dim=2)
out3 = F.igemm(Ac, Bc)
out3 = mm_dequant(maxA, maxB, out3)
std = out2.std()
out2 /= std
out /= std
out3 /= std
err = torch.abs(out - out2)
relerr = err / (torch.abs(out2) + 1e-7)
err2 = torch.abs(out3 - out2)
relerr2 = err2 / (torch.abs(out2) + 1e-7)
errs.append(err.mean().item())
relerrs.append(relerr.mean().item())
errs2.append(err2.mean().item())
relerrs2.append(relerr2.mean().item())
# print(mean(errs))
# print(mean(relerrs))
# print(mean(errs2))
# print(mean(relerrs2))
assert mean(errs) < 0.015
assert mean(relerrs) < 0.3
n = 2
dim1 = torch.randint(1, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 128, size=(n,)).tolist()
dim3 = torch.randint(32, 256, size=(n,)).tolist()
dim4 = torch.randint(32, 256, size=(n,)).tolist()
transpose = [(False, False), (True, False), (False, True), (True, True)]
values = list(product(dim1, dim2, dim3, dim4, transpose))
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_transpose_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
dim4 = dim4 - (dim4 % 16)
for i in range(k):
shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)
shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.bmm(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())
out = F.igemm(A, B.permute([0, 2, 1]))
elif transpose[0] and not transpose[1]:
out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())
out = F.igemm(A.permute([0, 2, 1]), B)
elif transpose[0] and transpose[1]:
out2 = torch.bmm(
A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float()
)
out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))
torch.testing.assert_allclose(out.float(), out2.float())
n = 1
dim1 = torch.randint(1, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 128, size=(n,)).tolist()
dim3 = torch.randint(32, 256, size=(n,)).tolist()
values = list(product(dim1, dim2, dim3))
names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, dim3", values, ids=names)
def test_vector_quant(dim1, dim2, dim3):
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
for i in range(k):
A = torch.randn(size=(dim2, dim3), device="cuda")
qA, SA = F.vectorwise_quant(A, dim=0)
A1 = F.vectorwise_dequant(qA, SA)
n = A1.numel()
assert_all_approx_close(A1, A, atol=0.01, rtol=0.1, count=int(n*0.002))
n = 2
dim1 = torch.randint(2, 256, size=(n,)).tolist()
dim2 = torch.randint(2, 256, size=(n,)).tolist()
dim3 = torch.randint(2, 256, size=(n,)).tolist()
# dim1, dim2 = (256,), (256,)
dtype = [torch.int8, torch.int32]
a_order = ["row"]
out_order = ["col", "row", "col32"]
transpose = [False]
dims = [2, 3]
values = list(product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose))
names = ["dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_transpose_{}".format(*vals)for vals in values]
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
if dims == 3 and out_order != "col32":
return
if dtype == torch.int32 and out_order != "col32":
return
func = F.get_transform_func(dtype, orderA, orderOut, transpose)
if dims == 2:
A = torch.randint(-128, 127, size=(dim1, dim2), device="cuda").to(dtype)
elif dims == 3:
A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device="cuda").to(
dtype
)
out, S = F.nvidia_transform(A, to_order=orderOut)
if orderOut == "row":
torch.testing.assert_allclose(A.flatten(), out.flatten())
elif orderOut == "col":
torch.testing.assert_allclose(A.t().flatten(), out.flatten())
elif orderOut == "col32":
if dims == 2:
n = A.shape[0] * (A.shape[1] + (32 - (A.shape[1] % 32)))
elif dims == 3:
n = (
A.shape[0]
* A.shape[1]
* (A.shape[2] + (32 - (A.shape[2] % 32)))
)
assert out.numel() == n
elif orderOut == "col_turing":
# 32 col 8 row tiles
n = (A.shape[0] + (8 - A.shape[0] % 8)) * (
A.shape[1] + (32 - (A.shape[1] % 32))
)
assert out.numel() == n
total_coltile = (A.shape[1] // 32) + (1 if A.shape[1] % 32 != 0 else 0)
for row in range(A.shape[0]):
for col in range(A.shape[1]):
i = row * A.shape[1]
j = col
coltile = (col // 32) + (1 if col % 32 != 0 else 0)
rowtile = (
(row // 8) + (1 if row % 8 != 0 else 0)
) * total_coltile
offset = 32 * 8 * (rowtile + coltile)
col2 = col % 32
row2 = (row % 8) * 32
assert A.flatten()[i + j] == A[row, col]
# assert A.flatten()[i+j] == out.flatten()[row2+col2]
# torch.testing.assert_allclose(A.flatten()[i+j], A[row, col])
# torch.testing.assert_allclose(A.flatten()[i+j], out.flatten()[row2+ col2+block_offset])
if orderOut == "col32":
out2, S = F.nvidia_transform(
out, from_order=orderOut, to_order="row", state=S
)
torch.testing.assert_allclose(A, out2)
n = 1
dim1 = torch.randint(1, 256, size=(n,)).tolist()
dim2 = torch.randint(32, 512, size=(n,)).tolist()
dim3 = torch.randint(32, 1024, size=(n,)).tolist()
dim4 = torch.randint(32, 1024, size=(n,)).tolist()
# dim1 = [2]
# dim2 = [2]
# dim3 = [2]
# dim4 = [2]
dims = (2, 3)
ldb = [0]
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dim3, dim4, dims, ldb))
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}_ldb_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
for i in range(k):
if dims == 2:
A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to(
torch.int8
)
elif dims == 3:
A = torch.randint(
-128, 127, size=(dim1, dim2, dim3), device="cuda"
).to(torch.int8)
B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.t().float())
A2, SA = F.transform(A, "col32")
B2, SB = F.transform(B, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
torch.testing.assert_allclose(C1, C3.float())
# transpose
B = torch.randint(-128, 127, size=(dim3, dim4), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.float())
B2t, SBt = F.transform(B, "col_turing", transpose=True)
C2, SC = F.igemmlt(A2, B2t, SA, SBt)
C3, S = F.nvidia_transform(C2, "row", state=SC)
torch.testing.assert_allclose(C1, C3.float())
dim1 = [32]
dim2 = [32]
dim3 = [32]
dim4 = [32]
dims = (2,)
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dim3, dim4, dims))
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims", values, ids=names)
def test_igemmlt_half(dim1, dim2, dim3, dim4, dims):
formatB = F.get_special_format_str()
for i in range(k):
if dims == 2:
A = torch.normal(0, 0.5, size=(dim1, dim3), device="cuda").half()
elif dims == 3:
A = torch.normal(
0, 0.5, size=(dim1, dim2, dim3), device="cuda"
).half()
B = torch.randn((dim4, dim3), device="cuda").half()
torch.nn.init.xavier_uniform_(B)
C1 = torch.matmul(A, B.t())
C2 = bnb.matmul(A, B.t())
A = A.view(-1, A.shape[-1])
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)
C32A, SA = F.transform(CA, "col32")
CxB, SB = F.transform(CB, to_order=formatB)
out1_32, Sout1_32 = F.igemmlt(C32A, CxB, SA, SB)
output = F.mm_dequant(out1_32, Sout1_32, statsAt, statsBt)
# print('')
# print(output.flatten()[:10])
# print(C1.flatten()[:10])
# print(C2.flatten()[:10])
# torch.testing.assert_allclose(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
# transpose
# B = torch.randint(-128, 127, size=(dim3, dim4), device='cuda').to(torch.int8)
# C1 = torch.matmul(A.float(), B.float())
# B2t, SBt = F.transform2(B, 'col_turing', transpose=True)
# C2, SC = F.igemmlt(A2, B2t, SA, SBt)
# C3, S = F.transform(C2, 'row', state=SC)
# torch.testing.assert_allclose(C1, C3.float())
batch_size = 2
seqdim = 512
# values = [(batch_size, seqdim, 4*1024, 16*1024),(batch_size, seqdim, 5120, 4*5120),(batch_size, seqdim, 12*1024, 4*12*1024)]
values = [
(batch_size, seqdim, 4 * 1024, 3 * 4 * 1024),
(batch_size, seqdim, 5120, 3 * 5120),
(batch_size, seqdim, 12 * 1024, 4 * 12 * 1024),
]
# values = list(product(batch, seq, model, hidden))
names = [
"batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_8bit_training(batch, seq, model, hidden):
formatB = F.get_special_format_str()
A = torch.randn(batch, seq, model, device="cuda").half()
grad = torch.randn(batch, seq, model, device="cuda").half()
w1 = torch.randint(-128, 127, size=(hidden, model), device="cuda").half()
w2 = torch.randint(-128, 127, size=(model, hidden), device="cuda").half()
print("")
# torch.cuda.synchronize()
## warmup
# for i in range(100):
# torch.matmul(A, w1.t())
# torch.cuda.synchronize()
dtype = torch.int8
A = A.view(-1, A.shape[-1]).contiguous()
grad = grad.view(-1, grad.shape[-1]).contiguous()
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
out1 = torch.matmul(A, w1.t()) # fc1
# out2 = torch.matmul(out1, w2.t())# fc2
# d1 = torch.matmul(grad, w2) # delta1
# d2 = torch.matmul(d1, w1) # delta2
# grad1 = torch.einsum('bo,bh->oh', out1, grad) # grad w2
# grad2 = torch.einsum('bh,bo->ho', A, d2) # grad w1
torch.cuda.synchronize()
t16 = time.time() - t0
print(t16)
# torch.cuda.empty_cache()
# Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
# CTw1, Sw1 = F.transform2(Cw1, formatB)
# CTw2, Sw2 = F.transform2(Cw2, formatB)
# CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
# CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
# CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# C32A, SA = F.transform2(CA, 'col32')
## fc1
# out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)
##out1 = F.mm_dequant(out1_32, Sout1_32, statsAt, statsw1t)
## fc2
# Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)
# C32out1, Sout1 = F.transform2(Cout1, 'col32')
# out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)
##out2 = F.mm_dequant(out2_32, Sout2_32, statsout1t, statsw2t)
## delta1
# Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)
# C32grad, Sgrad = F.transform2(Cgrad, 'col32')
##d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)
##d1 = F.mm_dequant(d1_32, Sd1_32, statsgradt, statsw2)
## delta2
# Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)
# C32d1, Sd1 = F.transform2(Cd1, 'col32')
##d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)
##d2 = F.mm_dequant(d2_32, Sd2_32, statsd1t, statsw1)
## grad1
# C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)
# CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)
##grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)
##grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1, statsgrad)
## grad2
# C32At, SAt = F.transform2(CAt, 'col32', transpose=True)
# CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)
##grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)
##grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsA, statsd1)
# Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
# Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
# CTw1, Sw1 = F.transform2(Cw1, formatB)
# CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
# CTw2, Sw2 = F.transform2(Cw2, formatB)
# CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
# torch.cuda.synchronize()
# t0 = time.time()
# for i in range(k):
# #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# #CTw1, Sw1 = F.transform2(Cw1, formatB)
# #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# #CTw1, Sw1 = F.transform2(Cw1, formatB)
# #CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A, threshold=3.5)
# CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# #CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
# #CTw2, Sw2 = F.transform2(Cw2, formatB)
# #CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
# C32A, SA = F.transform2(CA, 'col32')
# # fc1
# out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)
# #out1dn = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)
# #print(coo_tensor.nnz)
# #out1sp = F.spmm_coo(coo_tensor, w1.t())
# #print(w1.t().shape)
# #out1 = out1dn + out1sp
# # fc2
# Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)
# C32out1, Sout1 = F.transform2(Cout1, 'col32')
# out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)
# #out2 = F.mm_dequant(out2_32, Sout2_32, statsout1, statsw2)
# # delta1
# Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)
# C32grad, Sgrad = F.transform2(Cgrad, 'col32')
# d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)
# #d1 = F.mm_dequant(d1_32, Sd1_32, statsgrad, statsw2t)
# # delta2
# Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)
# C32d1, Sd1 = F.transform2(Cd1, 'col32')
# d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)
# #d2 = F.mm_dequant(d2_32, Sd2_32, statsd1, statsw1t)
# # grad1
# #C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)
# #CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)
# #grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)
# #grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1t, statsgradt)
# ## grad2
# #C32At, SAt = F.transform2(CAt, 'col32', transpose=True)
# #CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)
# #grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)
# #grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsAt, statsd1t)
# torch.cuda.synchronize()
# t8 = time.time() - t0
# print(t8)
n = 2
dim1 = torch.randint(64, 256, size=(n,)).tolist()
dim4 = torch.randint(64, 1024, size=(n,)).tolist()
#dim1 = [2*1024]
#dim4 = [2*1024]
#dim1 = [4]
#dim4 = [4]
dims = (2,)
formatB = ["col_turing", "col_ampere"]
has_bias = [True, False]
values = list(product(dim1, dim4, dims, formatB, has_bias))
names = ["dim1_{}_dim4_{}_dims_{}_formatB_{}_has_bias_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, dims, formatB, has_bias", values, ids=names)
def test_dequant_mm(dim1, dim4, dims, formatB, has_bias):
inner = torch.randint(1, 128, size=(1,)).item()
bias = None
if has_bias: bias = torch.randn(dim4, device='cuda', dtype=torch.float16)
formatB = F.get_special_format_str()
for i in range(1):
A = torch.randn(dim1, inner, device="cuda")
B = torch.randn(dim4, inner, device="cuda")
C1 = torch.matmul(A.half(), B.t().half())
if has_bias: C1 += bias
A1, maxA = F.vectorwise_quant(A, dim=1)
B1, maxB = F.vectorwise_quant(B, dim=1)
A2, SA = F.nvidia_transform(A1, "col32")
B2, SB = F.nvidia_transform(B1, formatB)
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
C4 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
if has_bias: C4 += bias
# TODO: is something wrong here? If so, the problem goes deeper
#n = C1.numel()
#p = 0.06
std = C1.std(0).view(1, -1)
C1 /= std
C4 /= std
#assert_all_approx_close(C1, C4, atol=0.02, rtol=0.1, count=int(n*0.06))
#assert (count / n < p), f"error in more than {p} of elements: {count}/{n}={count/n}"
C5 = F.mm_dequant(C2, SC, maxA.flatten(), maxB.flatten(), bias=bias)
#torch.testing.assert_allclose(C5, C4, atol=0.015, rtol=0.1)
n = C5.numel()
assert_all_approx_close(C1, C4, atol=0.015, rtol=0.1, count=int(0.01*n))
n = 2
dim1 = [1 * 1024]
dim2 = [1 * 1024]
# dim1 = torch.randint(1,4*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dims = (2,)
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dims))
names = ["dim1_{}_dim2_{}_dims_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
for i in range(k):
threshold = 3.0
A = torch.randn(dim1, dim2, device="cuda").half()
A_truncated = A.clone()
A_truncated[torch.abs(A_truncated) >= 3.0] = 0.0
if dims == 2:
row_stats1, _ = torch.abs(A.float()).max(1)
col_stats1, _ = torch.abs(A.float()).max(0)
row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)
col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)
else:
assert False
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=threshold
)
A_blocked = einops.rearrange(
torch.abs(A),
"(rows row_tiles) (cols block_size)-> rows cols row_tiles block_size",
row_tiles=16,
block_size=64 * 4,
)
nnz_rows1_counts = (torch.abs(A_blocked) >= threshold).sum(3).flatten()
nnz_block_ptr1 = torch.zeros(
nnz_rows1_counts.shape[0] + 1,
dtype=nnz_rows1_counts.dtype,
device=nnz_rows1_counts.device,
)
nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)
torch.testing.assert_allclose(col_stats1_trunc, col_stats2)
torch.testing.assert_allclose(row_stats1_trunc, row_stats2)
torch.testing.assert_allclose(nnz_block_ptr1, nnz_block_ptr2)
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=0.0
)
torch.testing.assert_allclose(col_stats1, col_stats2)
torch.testing.assert_allclose(row_stats1, row_stats2)
assert nnz_block_ptr2 is None
n = 2
# dim1 = [8*1024]
# dim2 = [4*1024]
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
values = list(product(dim1, dim2))
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
out_col1, Scol = F.vectorwise_quant(A, dim=0)
out_row1, Srow = F.vectorwise_quant(A, dim=1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# max difference is 1 due to rounding differences
torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
n = CAt.numel()
num_not_close_rows = (
(torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
)
num_not_close_cols = (
(torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
)
# allow for 1:500 error due to rounding differences
min_error = 1 / 500
if num_not_close_cols > (min_error * n):
print(
f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
)
assert False
if num_not_close_rows > (min_error * n):
print(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
)
assert False
torch.testing.assert_allclose(Srow.flatten(), statsA)
torch.testing.assert_allclose(Scol.flatten(), statsAt)
n = 4
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
values = list(zip(dim1, dim4, inner))
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
for i in range(k):
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
out1 = torch.matmul(A.half(), B.t().half())
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
A1, maxA = F.vectorwise_quant(A, dim=1)
B1, maxB = F.vectorwise_quant(B, dim=1)
torch.testing.assert_allclose(maxA.flatten(), stats1a)
torch.testing.assert_allclose(maxB.flatten(), stats2a)
torch.testing.assert_allclose(C1a, A1, rtol=0, atol=1)
torch.testing.assert_allclose(C2a, B1, rtol=0, atol=1)
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(C2a, "col_turing")
outC32, SC = F.igemmlt(A2, B2, SA, SB)
out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
A2, SA = F.nvidia_transform(A1, "col32")
B2, SB = F.nvidia_transform(B1, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
out3 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out3).mean().item()
assert err2 <= err1 * 1.025
n = 6
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
values = list(zip(dim1, dim4, inner))
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
@pytest.mark.skip("Row scale has some bugs for ampere")
def test_igemmlt_row_scale(dim1, dim4, inner):
formatB = F.get_special_format_str()
err1, err2, err3 = [], [], []
relerr1, relerr2 = [], []
scale = 1
for i in range(k):
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
C1 = torch.matmul(A, B.t())
out1 = torch.matmul(A.half(), B.t().half())
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(CB, formatB)
A1, maxA = F.vectorwise_quant(A, dim=1)
c = 10.0 * inner * scale
row_scale = torch.ones_like(maxA) / c
outC32, SC = F.igemmlt(
A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
)
C3, S = F.nvidia_transform(outC32, "row", state=SC)
maxval = torch.abs(C3).max()
if maxval == 127:
scale = 1.5
else:
scale = maxval / 120
out3 = C3 * maxA * absmaxB * c / (127 * 127)
C4 = torch.matmul(C1a.float(), CB.float().t())
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
B2, SB = F.nvidia_transform(C2a, formatB)
outC32, SC = F.igemmlt(A2, B2, SA, SB)
out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
CA, SA = F.vectorwise_quant(A, dim=1, quant_type="vector")
CB, SB = F.vectorwise_quant(B, dim=1, quant_type="linear")
C = torch.matmul(CA.float(), CB.t().float())
out4 = C * SA * SB / (127 * 127)
# out4 = torch.clip(torch.round(C*SA/c), -127, 127)*c*SB/(127*127)
# print('='*80)
# print(out1)
# print(out2)
# print(out3)
# print(out1)
# print(out2)
# print(out3)
err1.append(torch.abs(out1 - out2).mean().item())
err2.append(torch.abs(out1 - out3).mean().item())
err3.append(torch.abs(out1 - out4).mean().item())
# assert_all_approx_close(C3.float(), torch.round(C4*row_scale), rtol=0, atol=0, count=10)
print("")
print(sum(err1) / len(err1))
print(sum(err2) / len(err2))
print(sum(err3) / len(err3))
dim1 = [1024, 2048]
inner = [12288 * 4, 4096 * 4]
dim4 = [12288, 4096]
values = list(zip(dim1, dim4, inner))
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
@pytest.mark.skip("Row scale has some bugs for ampere")
def test_row_scale_bench(dim1, dim4, inner):
err1, err2, err3 = [], [], []
relerr1, relerr2 = [], []
scale = 1
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
# warmpup
for i in range(k):
C1 = torch.matmul(A, B.t())
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
C1 = torch.matmul(A, B.t())
torch.cuda.synchronize()
print("16", time.time() - t0)
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(CB, formatB)
A1, maxA = F.vectorwise_quant(A, dim=1)
c = 10.0 * inner * scale
row_scale = maxA / c
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
outC32, SC = F.igemmlt(
A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
)
torch.cuda.synchronize()
print("row-wise", time.time() - t0)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
B2, SB = F.nvidia_transform(C2a, formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
outC32, SC = F.igemmlt(A2, B2, SA, SB)
torch.cuda.synchronize()
print("vector-wise", time.time() - t0)
n = 2
dim1 = torch.randint(2, 1024, size=(n,)).tolist()
dim2 = torch.randint(2, 1024, size=(n,)).tolist()
# dim1 = [8*1024]
# dim2 = [4*1024]
dim3 = [0]
dtype = [torch.int8]
a_order = ["row"]
out_order = ["col32", "col_turing", "col_ampere"]
transpose = [False, True]
dims = [2]
values = list(
product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose)
)
names = [
"dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_{}".format(
*vals
)
for vals in values
]
@pytest.mark.parametrize(
"dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
values,
ids=names,
)
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
for i in range(k):
if dims == 2:
A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
dtype
)
elif dims == 3:
A = torch.randint(
10, 99, size=(dim1, dim2, dim3), device="cuda"
).to(dtype)
A.view(-1)[-1] = -1
if transpose:
At = A.t().contiguous()
out1, S1 = F.nvidia_transform(At, to_order=orderOut)
else:
out1, S1 = F.nvidia_transform(A, to_order=orderOut)
out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)
assert S1[0][0] == S2[0][0]
assert S1[0][1] == S2[0][1]
# print(out1)
# print(out2)
torch.testing.assert_allclose(out1, out2)
n = 2
# dim1 = torch.randint(2,1024, size=(n,)).tolist()
# dim2 = torch.randint(2,1024, size=(n,)).tolist()
dim1 = [1]
dim2 = [33]
dtype = [torch.int8]
# a_order = ['col_turing', 'col_ampere']
a_order = ["col_turing"]
out_order = ["row"]
values = list(product(dim1, dim2, dtype, a_order, out_order))
names = [
"dim1_{}_dim2_{}_dtype_{}_orderA_{}_orderOut_{}".format(*vals)
for vals in values
]
def test_overflow():
formatB = F.get_special_format_str()
print(formatB)
for i in range(2):
a = torch.arange(5, 15).cuda().to(torch.int8).view(-1, 1)
b = torch.arange(5, 15).cuda().to(torch.int8).view(-1, 1)
Ca, Sa = F.nvidia_transform(a, "col32")
Cb, Sb = F.nvidia_transform(b, formatB)
c = F.igemmlt(Ca, Cb, Sa, Sb, dtype=torch.int8)
c2 = torch.matmul(a.float(), b.float().t())
n = 2
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
# dim1 = [4]
# dim2 = [5]
values = list(product(dim1, dim2))
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
threshold = 3.00
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
idx = torch.abs(A) >= threshold
CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
if coo_tensor is not None:
A1 = A * idx
A2 = torch.zeros_like(A)
A2[
coo_tensor.rowidx.long(), coo_tensor.colidx.long()
] = coo_tensor.values
torch.testing.assert_allclose(A1, A2)
A1 = A * (idx == 0)
A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
torch.testing.assert_allclose(
A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
)
n = 2
dim1 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
# dim1 = [7]
# dim2 = [11]
transposed_B = [False, True]
values = list(product(dim1, dim2, transposed_B))
names = ["dim1_{}_dim2_{}_transposed_B_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, transposed_B", values, ids=names)
def test_spmm_coo(dim1, dim2, transposed_B):
threshold = 1.5
dim3 = torch.randint(32, 128, size=(1,)).item()
# dim3 = 17
for i in range(k):
A = torch.randn(dim1, dim2).cuda().half()
if transposed_B:
B = torch.randn(dim3, dim2).cuda().half()
else:
B = torch.randn(dim2, dim3).cuda().half()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
if transposed_B:
out2 = F.spmm_coo(cooA, B.t())
out1 = torch.matmul(A2, B.t())
else:
out2 = F.spmm_coo(cooA, B)
out1 = torch.matmul(A2, B)
assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=30)
def test_spmm_bench():
batch = 2
model = 1024 * 1
hidden = model * 4
seq = 1024
dim1 = batch * seq
dim2 = model
dim3 = hidden
threshold = 4
A = torch.randn(dim1, dim2, device="cuda").half()
B = torch.randn(dim2, dim3, device="cuda").half()
for i in range(10):
C1 = bnb.matmul(A, B.t())
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
C1 = bnb.matmul(A, B.t())
torch.cuda.synchronize()
t8 = time.time() - t0
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
print(nnz / idx.numel())
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
for i in range(10):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
tsp = time.time() - t0
print(tsp, t8)
print(tsp / t8)
n = 2
dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
values = list(product(dim1, dim2))
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_integrated_sparse_decomp(dim1, dim2):
threshold = 3.0
formatB = "col_turing"
for i in range(k):
A = torch.randn(dim1, dim2).cuda().half()
w1 = torch.randn(dim1, dim2).cuda().half()
out1 = torch.matmul(A, w1.t())
Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
CTw1, Sw1 = F.transform(Cw1, formatB)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
C32A, SA = F.transform(CA, "col32")
out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1)
out2 = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
C32A, SA = F.transform(CA, "col32")
out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1)
out3 = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)
assert coo_tensor is not None
out4 = F.spmm_coo(coo_tensor, w1.t())
out5 = out3 + out4
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out5).mean().item()
assert err2 < err1
def test_matmuls():
a = torch.randn(256, 512).half().cuda()
b = torch.randn(256, 512).half().cuda()
c1 = torch.matmul(a, b.t())
c2 = bnb.matmul(a, b)
c3 = bnb.matmul_cublas(a, b.t())
err1 = torch.abs(c1 - c2).mean().item()
err2 = torch.abs(c1 - c3).mean().item()
assert err1 < 0.2
assert err2 < 0.2
print(err1, err2)
n = 2
# dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dim1 = [1 * 2048]
dim2 = [12288]
# dim1 = [32]
# dim2 = [32]
# dtype = [torch.float16, torch.int8]
dtype = [torch.float16]
out_function = ["zeros", "ones"]
values = list(product(dim1, dim2, dtype, out_function))
names = [
"dim1_{}_dim2_{}_dtype_{}_out_func_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dtype, out_func", values, ids=names)
def test_spmm_coo_very_sparse(dim1, dim2, dtype, out_func):
out_func = getattr(torch, out_func)
threshold = 3.3
# threshold = 2.8
# threshold = 0.0
A = torch.randn(dim1, dim2, device="cuda").half()
if dtype == torch.float16:
B = torch.randn(dim2, dim2 * 4, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
else:
B = torch.randn(dim2, dim2 * 4, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
B, SB = F.vectorwise_quant(B, quant_type="linear")
# B = torch.randint(-127, 127, size=(dim2, dim2*4), device='cuda').to(torch.int8)
print("")
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
out1 = torch.matmul(A2.half(), B.half())
out = out_func(out1.shape, dtype=torch.float16, device=out1.device)
out1 += out.clone()
out2 = F.spmm_coo_very_sparse(cooA, B, out=out)
# print(B)
# print(out1)
# print(out2)
p = 200 / (2048 * 12288 * 4)
n = out1.numel()
count = math.ceil(p * n)
std = out1.std()
out1 /= std
out2 /= std
assert_all_approx_close(
out1, out2.half(), rtol=0.01, atol=3.0e-2, count=count
)
# assert_all_approx_close(out1, out2.half(), rtol=0.05, atol=0.01, count=count)
idx_col = torch.randint(0, A2.shape[-1], size=(15,))
# torch.testing.assert_allclose(out1, out2.half(), rtol=0.05, atol=0.001)
# Bt = torch.randn(dim2*4, dim2, device='cuda').half()
# torch.cuda.synchronize()
# t0 = time.time()
# print(A2.shape, B.shape)
# for i in range(100):
# #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
torch.testing.assert_allclose(A2[idx], csrA.values)
def test_coo2csc():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
cscA = F.coo2csc(cooA)
counts = cscA.colptr[1:] - cscA.colptr[:-1]
assert counts.numel() == A.shape[1]
torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
# torch uses row-major -> use transpose to transfer to col-major
idx = A2.t() != 0
torch.testing.assert_allclose(A2.t()[idx], cscA.values)
n = 2
# dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dim1 = [1 * 2048]
# dim2 = [12288]
dim2 = [2048]
# dim1 = [2]
# dim2 = [2]
dtype = [torch.int8]
values = list(product(dim1, dim2, dtype))
names = ["dim1_{}_dim2_{}_dtype_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, dtype", values, ids=names)
def test_spmm_coo_dequant(dim1, dim2, dtype):
threshold = 6.0
# threshold = 2.8
# threshold = 0.0
A = torch.randn(dim1, dim2, device="cuda").half()
B = torch.empty(dim2, dim2 * 4, device="cuda", dtype=torch.float16)
torch.nn.init.xavier_uniform_(B)
Bt = B.t().contiguous()
CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)
rowidx = torch.randint(0, A.shape[-1], size=(15,))
A[:, rowidx] = 8.0
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
out1 = torch.matmul(A2, B.half())
out3 = F.spmm_coo_very_sparse(cooA, CBt.half())
out3 = out3 * statsBt.half() / 127
values, counts = torch.unique(cooA.rowidx, return_counts=True)
offset = counts.cumsum(0).int()
max_count, max_idx = torch.sort(counts, descending=True)
print(torch.median(max_count.float()))
torch.testing.assert_allclose(out2, out3, rtol=0.05, atol=0.001)
p = 200 / (2048 * 12288 * 4)
n = out1.numel()
count = math.ceil(p * n)
assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=count)
# torch.cuda.synchronize()
# t0 = time.time()
# for i in range(100):
# out2 = F.spmm_coo_very_sparse(cooA, B)
# torch.cuda.synchronize()
# print('fp16', time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
print("cusparse fp16", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo_very_sparse(cooA, CBt)
torch.cuda.synchronize()
print("int8", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
torch.cuda.synchronize()
print("int8+dequant", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = torch.matmul(A, B)
torch.cuda.synchronize()
print("matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
out = out1 + out2
torch.cuda.synchronize()
print("sparse+ matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)
torch.cuda.synchronize()
print("partial matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
torch.cuda.synchronize()
print("partial matmul", time.time() - t0)
batch_size = 1
seqdim = 1
values = []
values.append((batch_size, seqdim, 768, 4 * 768))
# values.append((batch_size, seqdim, 1024, 4*1024))
# values.append((batch_size, seqdim, 1536, 4*1536))
# values.append((batch_size, seqdim, 2048, 4*2048))
# values.append((batch_size, seqdim, 2560, 4*2560))
# values.append((batch_size, seqdim, 4096, 4*4096))
# values.append((batch_size, seqdim, 5140, 4*5140))
#values.append((batch_size, seqdim, 12288, 4*12288))
names = [
"batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
iters = 128
formatB = F.get_special_format_str()
A = torch.randn(batch, seq, model, device="cuda").half()
B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
torch.nn.init.xavier_uniform_(B)
linear8bit = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
linear8bit.eval()
outliers = torch.randint(0, model, size=(5,)).cuda()
A[:, :, outliers] = 8.0
linearMixedBit = (
bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
)
linearMixedBit.eval()
# warmup
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print("")
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print(
f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul(A, B)
torch.cuda.synchronize()
print(f"CB -> CxB conversion (each iteration): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul(A, B, threshold=6.0)
torch.cuda.synchronize()
print(f"CB -> CxB conversion + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)
C32A, SA = F.transform(CA, "col32")
CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)
CxB, SB = F.transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
torch.cuda.synchronize()
print(f"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
BA, statsB = F.vectorwise_quant(B, dim=1)
CxB, SB = F.nvidia_transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
A2 = A.view(-1, A.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1)
C32A, SA = F.nvidia_transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
F.vectorwise_mm_dequant(Cout, statsA, statsB.t())
torch.cuda.synchronize()
#print(f"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
BA, statsB = F.vectorwise_quant(B, dim=1, quant_type="linear")
CxB, SB = F.nvidia_transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
A2 = A.view(-1, A.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type="linear")
C32A, SA = F.nvidia_transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
out = Cout * statsB * statsA * (1.0 / (127 * 127))
torch.cuda.synchronize()
#print(f"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
linear8bit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linear8bit(A)
torch.cuda.synchronize()
print(
f"bnb linear8bitlt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
linearMixedBit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linearMixedBit(A)
torch.cuda.synchronize()
print(
f"bnb linear8bitlt with threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
def test_zeropoint():
def quant_zp(x):
dtype = x.dtype
x = x.float()
dyna = x.max() - x.min()
if dyna == 0:
dyna = 1
qx = 254.0 / dyna
minx = x.min()
# zpx = torch.round(minx* qx)
# zpx = 127 - torch.round(x.max()* qx)
zpx = torch.round(x.min() * qx) - 127
x = (qx * x) + zpx
return x, qx, zpx
batch = 2
seq = 512
model = 1024
hidden = 4 * model
A = torch.randn(batch * seq, model, device="cuda").half() * 0.1
B = torch.randn(model, hidden, device="cuda").half() * 0.1
C0 = torch.matmul(A, B)
# A, SA = F.vectorwise_quant(A, quant_type='linear')
# B, SB = F.vectorwise_quant(B, quant_type='linear')
A = A.float()
B = B.float()
C1 = torch.matmul(A, B)
C3 = bnb.matmul(A.half(), B.t().contiguous().half())
zp = 1
# C2 = torch.matmul(A-zp, B)
# C2 += B.sum(0).view(1, -1)*zp
C2 = torch.matmul(A, B - zp)
C2 -= A.sum(1).view(-1, 1) * zp
ca, cqa, cza = quant_zp(A)
print(ca.min(), ca.max())
print((ca - cza).min(), (ca - cza).max())
zp = 1
scale = 2.0
C5 = torch.matmul((A * scale) - zp, B)
C5 += B.sum(0) * zp
C5 /= scale
CA, qa, zpa = quant_zp(A)
C4 = torch.matmul(CA, B)
C4 -= B.sum(0) * zpa
C4 /= qa
zpb = 1
zpa = 1
qa = 2
qb = 2
C6 = torch.matmul((A * qa) + zpa, (B * qb) + zpb)
C6 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
C6 -= zpa * zpb * A.shape[1]
C6 /= qa * qb
CA, qa, zpa = quant_zp(A)
CB, qb, zpb = quant_zp(B)
C7 = torch.matmul(CA, CB)
C7 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
C7 -= zpa * zpb * A.shape[1]
C7 /= qa * qb
print("")
# print(C0.flatten()[:10])
print(C1.flatten()[:10])
print(C2.flatten()[:10])
print(C3.flatten()[:10])
print(C5.flatten()[:10])
print(C6.flatten()[:10])
print(C7.flatten()[:10])
err1 = torch.abs(C1 - C2).mean().item()
err2 = torch.abs(C1 - C3).mean().item()
err3 = torch.abs(C1 - C4).mean().item()
err4 = torch.abs(C1 - C5).mean().item()
err5 = torch.abs(C1 - C6).mean().item()
err6 = torch.abs(C1 - C7).mean().item()
print(err1, err2, err3, err4, err5, err6)
def test_extract_outliers():
for i in range(k):
shapeA = (4096, 4096 * 4)
idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
# idx = torch.Tensor([0]).int().cuda()
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
outliers1 = A[:, idx.long()]
CA, SA = F.transform(A, "col_turing")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
torch.testing.assert_allclose(outliers1, outliers2)
CA, SA = F.transform(A, "col_ampere")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
torch.testing.assert_allclose(outliers1, outliers2)
def test_blockwise_cpu_large():
diffs = []
reldiffs = []
batch = 128
seq = 128
for hidden in [128]:#, 14336]:
for blocksize in [4096, 16384]:
for i in range(2):
A1 = torch.randn(batch, seq, hidden, device='cpu')
t0 = time.time()
C, S = F.quantize_blockwise(A1, blocksize=blocksize)
A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
print(time.time() - t0)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diffs[-1] < 0.011
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
for e_bits in range(1, 7):
p_bits = 7-e_bits
code = F.create_fp8_map(True, e_bits, p_bits).cuda()
print(e_bits, p_bits)
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
#print('')
for bits in range(2, 9):
#print('='*30, bits, '='*30)
for method in ['linear', 'fp8', 'dynamic', 'quantile']:
abserrs = []
relerrs = []
code = None
if method == 'linear':
code = F.create_linear_map(True, total_bits=bits).cuda()
elif method == 'fp8':
ebits = math.ceil(bits/2)
pbits = bits-ebits-1
code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
elif method == 'dynamic':
code = F.create_dynamic_map(True, bits-0, bits).cuda()
elif method == 'quantile':
values = torch.randn(2048, 2048, device='cuda')
code = F.create_quantile_map(values, bits).cuda()
# for some data types we have no zero
# for some data types we have one zero
# for some data types we have two zeros
assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
#print(method, (code==0).sum())
assert code.numel() == 256
for i in range(10):
values = torch.randn(1, 32, device='cuda')
values /= values.abs().max()
#values[values.abs() < 1e-6] += 1e-5
q1 = []
v1 = []
for v in values[0]:
idx = torch.abs(v-code).argmin()
q1.append(idx.item())
v1.append(code[idx].item())
q1 = torch.Tensor(q1).cuda()
v1 = torch.Tensor(v1).cuda()
q2, S2 = F.quantize_blockwise(values, code=code)
v2 = F.dequantize_blockwise(q2, S2)
idx = torch.isclose(q1.int(), q2.int())
err2 = torch.abs(v2-values)
abserrs.append(err2.mean().item())
relerrs.append((err2/(1e-10+values).abs()).mean().item())
if idx.sum():
# some weird cases
err1 = torch.abs(v1-values).mean()
#assert err2.mean() <= err1
else:
torch.testing.assert_allclose(q1, q2)
#print(method, 'abserr:', sum(abserrs)/len(abserrs), 'relerr:', sum(relerrs)/len(relerrs))
#assert False
def test_kbit_quantile_estimation():
for i in range(100):
data = torch.randn(1024, 1024, device='cuda')
for bits in range(2, 9):
p = np.linspace(1.3e-4, 1-1.3e-4, 2**bits)
val1 = torch.Tensor(norm.ppf(p)).cuda()
val2 = F.estimate_quantiles(data, offset=0, num_quantiles=2**bits)
err = torch.abs(val1-val2).mean()
assert err < 0.038
for i in range(100):
data = torch.randn(1024, 1024, device='cuda')
for bits in range(2, 4):
total_values = 2**bits-1
p = np.linspace(0, 1, 2*total_values+1)
idx = np.arange(1, 2*total_values+1, 2)
p = p[idx]
offset = 1/(2*total_values)
p = np.linspace(offset, 1-offset, total_values)
val1 = torch.Tensor(norm.ppf(p)).cuda()
val2 = F.estimate_quantiles(data, num_quantiles=2**bits-1)
err = torch.abs(val1-val2).mean()
assert err < 0.035
def test_bench_dequantization():
a = torch.rand(1024, 1024, device='cuda').half()
qa, SA = F.quantize_blockwise(a)
max_theoretical_mu = 1024*1024*2/1024**3/672*1000*1000
#print(max_theoretical_mu)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
F.dequantize_blockwise(qa, SA, blocksize=2048)
torch.cuda.synchronize()
#print((time.time()-t0)/1e6)
|
import ctypes
import os
import shutil
import time
import uuid
from itertools import product
from os.path import join
import pytest
from lion_pytorch import Lion
import torch
import bitsandbytes as bnb
import bitsandbytes.functional as F
# import apex
k = 20
def get_temp_dir():
path = f"/tmp/autoswap/{str(uuid.uuid4())}"
os.makedirs(path, exist_ok=True)
return path
def rm_path(path):
shutil.rmtree(path)
str2optimizers = {}
str2optimizers["adam_pytorch"] = (None, torch.optim.Adam, bnb.optim.Adam)
# str2optimizers['adam_apex'] = (None, apex.optimizers.FusedAdam, bnb.optim.Adam)
# str2optimizers['momentum_apex'] = (None, lambda pxx: apex.optimizers.FusedSGD(pxx, 0.01, 0.9), bnb.optim.Adam)
str2optimizers["lion_pytorch"] = (None, Lion, bnb.optim.Lion)
str2optimizers["momentum_pytorch"] = (
None,
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
bnb.optim.Adam,
)
str2optimizers["adam"] = (torch.optim.Adam, bnb.optim.Adam)
# str2optimizers['fused_adam'] = (apex.optimizers.FusedAdam, bnb.optim.Adam)
str2optimizers["lion"] = (Lion, bnb.optim.Lion)
str2optimizers["momentum"] = (
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.SGD(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["lars"] = (
lambda pxx: bnb.optim.PytorchLARS(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.LARS(pxx, 0.01, 0.9),
)
str2optimizers["rmsprop"] = (
lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.RMSprop(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["adam8bit"] = (
torch.optim.Adam,
lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=False),
)
str2optimizers["lion8bit"] = (
Lion,
lambda pxx: bnb.optim.Lion8bit(pxx, block_wise=False),
)
str2optimizers["momentum8bit"] = (
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["rmsprop8bit"] = (
lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["lars8bit"] = (
lambda pxx: bnb.optim.PytorchLARS(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.LARS8bit(pxx, 0.01, 0.9),
)
str2optimizers["adam8bit_blockwise"] = (
torch.optim.Adam,
lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True),
)
str2optimizers["lion8bit_blockwise"] = (
Lion,
lambda pxx: bnb.optim.Lion8bit(pxx, block_wise=True),
)
str2optimizers["momentum8bit_blockwise"] = (
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=True),
)
str2optimizers["rmsprop8bit_blockwise"] = (
lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=True),
)
str2statenames = {}
str2statenames["adam"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
str2statenames["lion"] = [("exp_avg", "state1")]
str2statenames["momentum"] = [("momentum_buffer", "state1")]
str2statenames["lars"] = [("momentum_buffer", "state1")]
str2statenames["lamb"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
str2statenames["rmsprop"] = [("square_avg", "state1")]
str2statenames["adam8bit"] = [
("exp_avg", "state1", "qmap1", "max1"),
("exp_avg_sq", "state2", "qmap2", "max2"),
]
str2statenames["lion8bit"] = [
("exp_avg", "state1", "qmap1", "max1")
]
str2statenames["lamb8bit"] = [
("exp_avg", "state1", "qmap1", "max1"),
("exp_avg_sq", "state2", "qmap2", "max2"),
]
str2statenames["adam8bit_blockwise"] = [
("exp_avg", "state1", "qmap1", "absmax1"),
("exp_avg_sq", "state2", "qmap2", "absmax2"),
]
str2statenames["lion8bit_blockwise"] = [
("exp_avg", "state1", "qmap1", "absmax1")
]
str2statenames["momentum8bit"] = [
("momentum_buffer", "state1", "qmap1", "max1")
]
str2statenames["momentum8bit_blockwise"] = [
("momentum_buffer", "state1", "qmap1", "absmax1")
]
str2statenames["lars8bit"] = [("momentum_buffer", "state1", "qmap1", "max1")]
str2statenames["rmsprop8bit"] = [("square_avg", "state1", "qmap1", "max1")]
str2statenames["rmsprop8bit_blockwise"] = [
("square_avg", "state1", "qmap1", "absmax1")
]
dim1 = [1024]
dim2 = [32, 1024, 4097, 1]
gtype = [torch.float32, torch.float16]
optimizer_names = ["adam", "momentum", "rmsprop", "lars", "lion"]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
p2 = p1.clone()
p1 = p1.float()
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 1e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
for i in range(k):
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(
torch_optimizer.state[p1][name1],
bnb_optimizer.state[p2][name2],
atol=atol,
rtol=rtol,
)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
if i % (k // 5) == 0 and i > 0:
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(
torch_optimizer.state[p1][name1],
bnb_optimizer.state[p2][name2],
atol=atol,
rtol=rtol,
)
if gtype == torch.float16:
# the adam buffers should also be close because they are 32-bit
# but the paramters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.half().float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.half(), p2)
if optim_name in ["lars", "lamb"]:
assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32, torch.float16]
values = list(product(dim1, dim2, gtype))
names = ["dim1_{}_dim2_{}_gtype_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype", values, ids=names)
def test_global_config(dim1, dim2, gtype):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
p2 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
p3 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
mask = torch.rand_like(p2) < 0.1
beta1 = 0.9
beta2 = 0.999
lr = 0.001
eps = 1e-8
bnb.optim.GlobalOptimManager.get_instance().initialize()
bnb.optim.GlobalOptimManager.get_instance().override_config(
p3, "optim_bits", 8
)
bnb.optim.GlobalOptimManager.get_instance().register_parameters(
[p1, p2, p3]
)
p1 = p1.cuda()
p2 = p2.cuda()
p3 = p3.cuda()
adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)
if gtype == torch.float32:
atol, rtol = 1e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
for i in range(50):
g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
g2 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
g3 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
p1.grad = g1
p2.grad = g2
p3.grad = g3
adam2.step()
assert adam2.state[p3]["state1"].dtype == torch.uint8
assert adam2.state[p3]["state2"].dtype == torch.uint8
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32, torch.float16]
optimizer_names = [
"adam8bit",
"lion8bit",
"momentum8bit",
"rmsprop8bit",
"adam8bit_blockwise",
"lion8bit_blockwise",
"lars8bit",
"momentum8bit_blockwise",
"rmsprop8bit_blockwise",
]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
p2 = p1.clone()
p1 = p1.float()
blocksize = 2048
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 3e-3, 1e-3
patol, prtol = 1e-5, 1e-3
else:
atol, rtol = 3e-3, 1e-3
patol, prtol = 1e-5, 1e-3
errors = []
relerrors = []
for i in range(50):
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
torch.testing.assert_allclose(p1, p2.float(), atol=patol, rtol=prtol)
dequant_states = []
for name1, name2, qmap, max_val in str2statenames[optim_name]:
# print(bnb_optimizer.state[p2][max_val], name1)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol
)
== 0
)
assert num_not_close.sum().item() < 20
dequant_states.append(s1.clone())
err = torch.abs(p1 - p2)
relerr = err / torch.abs(p1)
assert err.mean() < 0.0001
assert relerr.mean() < 0.001
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
s1cpy = s.clone()
raws1cpy = bnb_optimizer.state[p2][name2].clone()
qmap1 = bnb_optimizer.state[p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_allclose(
raws1cpy, bnb_optimizer.state[p2][name2]
)
torch.testing.assert_allclose(
qmap1, bnb_optimizer.state[p2][qmap]
)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
torch.testing.assert_allclose(s1cpy, s1)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1],
s1,
atol=atol,
rtol=rtol,
)
== 0
)
assert num_not_close.sum().item() < 20
torch.testing.assert_allclose(
p1, p2.float(), atol=patol, rtol=prtol
)
# the parameters diverge quickly. Here we keep them close
# together so we can test against the Adam error
p1.data = p1.data.to(gtype).float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.to(gtype), p2)
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
torch_optimizer.state[p1][name1].copy_(s.data)
# print(sum(errors)/len(errors))
# print(sum(relerrors)/len(relerrors))
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32]
optim_bits = [32, 8]
values = list(product(dim1, dim2, gtype, optim_bits))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_bits_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_bits", values, ids=names)
def test_adam_percentile_clipping(dim1, dim2, gtype, optim_bits):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
beta1 = 0.9
beta2 = 0.999
lr = 0.001
eps = 1e-8
p1 = p1.cuda()
p2 = p1.clone()
adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)
adam2 = bnb.optim.Adam(
[p2],
lr,
(beta1, beta2),
eps,
optim_bits=optim_bits,
percentile_clipping=5,
)
gnorm_vec = torch.zeros(100).cuda()
step = 0
for i in range(50):
step += 1
g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + (
0.01 * i
)
g2 = g1.clone()
p2.grad = g2
current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(
g1, gnorm_vec, step, 5
)
g1 = (g1.float() * gnorm_scale).to(gtype)
p1.grad = g1
adam1.step()
adam2.step()
# gnorm_scale is not deterministic (warp reductions), as such there can be slight differences in state
if optim_bits == 32:
torch.testing.assert_allclose(p1, p2)
torch.testing.assert_allclose(
adam1.state[p1]["state1"],
adam2.state[p2]["state1"],
atol=5e-5,
rtol=1e-4,
)
torch.testing.assert_allclose(
adam1.state[p1]["state2"],
adam2.state[p2]["state2"],
atol=5e-5,
rtol=1e-4,
)
elif optim_bits == 8:
torch.testing.assert_allclose(p1, p2, atol=1e-4, rtol=1e-3)
torch.testing.assert_allclose(
adam1.state[p1]["state1"],
adam2.state[p2]["state1"],
atol=2,
rtol=1e-3,
)
torch.testing.assert_allclose(
adam1.state[p1]["state2"],
adam2.state[p2]["state2"],
atol=2,
rtol=1e-3,
)
adam1.state[p1]["state1"].copy_(adam2.state[p2]["state1"])
adam1.state[p1]["state2"].copy_(adam2.state[p2]["state2"])
if i % 10 == 0 and i > 0:
path = get_temp_dir()
torch.save(adam2.state_dict(), join(path, "opt.pt"))
del adam2
adam2 = None
adam2 = bnb.optim.Adam(
[p2],
lr,
(beta1, beta2),
eps,
optim_bits=optim_bits,
percentile_clipping=5,
)
adam2.load_state_dict(torch.load(join(path, "opt.pt")))
dim1 = [4096]
dim2 = [4096]
gtype = [torch.float32, torch.float16]
# optimizer_names = ['adam8bit_blockwise', 'adam8bit', 'lamb8bit']
# optimizer_names = ['adam8bit_blockwise', 'adam_apex', 'adam8bit', 'adam', 'adam_pytorch']
# optimizer_names = ['momentum_apex', 'momentum8bit', 'momentum_pytorch']
# optimizer_names = ['lamb_apex', 'lamb8bit']
# optimizer_names = ['lars_apex', 'lars8bit']
optimizer_names = ["adam8bit_blockwise"]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_benchmark_blockwise(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
bnb_optimizer = str2optimizers[optim_name][1]([p1])
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g
for i in range(k):
if i == k // 5:
# 100 iterations for burn-in
torch.cuda.synchronize()
t0 = time.time()
bnb_optimizer.step()
torch.cuda.synchronize()
s = time.time() - t0
print("")
params = (k - k // 5) * dim1 * dim2
print(optim_name, gtype, s / params)
# assert s < 3.9
|
import os
from typing import List, NamedTuple
import pytest
import bitsandbytes as bnb
from bitsandbytes.cuda_setup.main import (
CUDA_RUNTIME_LIB,
determine_cuda_runtime_lib_path,
evaluate_cuda_setup,
extract_candidate_paths,
)
"""
'LD_LIBRARY_PATH': ':/mnt/D/titus/local/cuda-11.1/lib64/'
'CONDA_EXE': '/mnt/D/titus/miniconda/bin/conda'
'LESSCLOSE': '/usr/bin/lesspipe %s %s'
'OLDPWD': '/mnt/D/titus/src'
'CONDA_PREFIX': '/mnt/D/titus/miniconda/envs/8-bit'
'SSH_AUTH_SOCK': '/mnt/D/titus/.ssh/ssh-agent.tim-uw.sock'
'CONDA_PREFIX_1': '/mnt/D/titus/miniconda'
'PWD': '/mnt/D/titus/src/8-bit'
'HOME': '/mnt/D/titus'
'CONDA_PYTHON_EXE': '/mnt/D/titus/miniconda/bin/python'
'CUDA_HOME': '/mnt/D/titus/local/cuda-11.1/'
'TMUX': '/tmp/tmux-1007/default,59286,1'
'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop'
'SSH_TTY': '/dev/pts/0'
'MAIL': '/var/mail/titus'
'SHELL': '/bin/bash'
'DBUS_SESSION_BUS_ADDRESS': 'unix:path=/run/user/1007/bus'
'XDG_RUNTIME_DIR': '/run/user/1007'
'PATH': '/mnt/D/titus/miniconda/envs/8-bit/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/mnt/D/titus/local/cuda-11.1/bin'
'LESSOPEN': '| /usr/bin/lesspipe %s'
'_': '/mnt/D/titus/miniconda/envs/8-bit/bin/python'
# any that include 'CONDA' that are not 'CONDA_PREFIX'
# we search for
'CUDA_HOME': '/mnt/D/titus/local/cuda-11.1/'
"""
class InputAndExpectedOutput(NamedTuple):
input: str
output: str
HAPPY_PATH__LD_LIB_TEST_PATHS: List[InputAndExpectedOutput] = [
(
f"some/other/dir:dir/with/{CUDA_RUNTIME_LIB}",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f":some/other/dir:dir/with/{CUDA_RUNTIME_LIB}",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"some/other/dir:dir/with/{CUDA_RUNTIME_LIB}:",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"some/other/dir::dir/with/{CUDA_RUNTIME_LIB}",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"dir/with/{CUDA_RUNTIME_LIB}:some/other/dir",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"dir/with/{CUDA_RUNTIME_LIB}:other/dir/libcuda.so",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
]
@pytest.fixture(params=HAPPY_PATH__LD_LIB_TEST_PATHS)
def happy_path_path_string(tmpdir, request):
for path in extract_candidate_paths(request.param):
test_dir.mkdir()
if CUDA_RUNTIME_LIB in path:
(test_input / CUDA_RUNTIME_LIB).touch()
UNHAPPY_PATH__LD_LIB_TEST_PATHS = [
f"a/b/c/{CUDA_RUNTIME_LIB}:d/e/f/{CUDA_RUNTIME_LIB}",
f"a/b/c/{CUDA_RUNTIME_LIB}:d/e/f/{CUDA_RUNTIME_LIB}:g/h/j/{CUDA_RUNTIME_LIB}",
]
def test_full_system():
## this only tests the cuda version and not compute capability
# if CONDA_PREFIX exists, it has priority before all other env variables
# but it does not contain the library directly, so we need to look at the a sub-folder
version = ""
if "CONDA_PREFIX" in os.environ:
ls_output, err = bnb.utils.execute_and_return(f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so')
major, minor, revision = (ls_output.split(" ")[-1].replace("libcudart.so.", "").split("."))
version = float(f"{major}.{minor}")
if version == "" and "LD_LIBRARY_PATH" in os.environ:
ld_path = os.environ["LD_LIBRARY_PATH"]
paths = ld_path.split(":")
version = ""
for p in paths:
if "cuda" in p:
idx = p.rfind("cuda-")
version = p[idx + 5 : idx + 5 + 4].replace("/", "")
version = float(version)
break
assert version > 0
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
binary_name = binary_name.replace("libbitsandbytes_cuda", "")
assert binary_name.startswith(str(version).replace(".", ""))
|
import bitsandbytes as bnb
import pytest
import torch
from bitsandbytes import functional as F
from bitsandbytes.autograd import get_inverse_transform_indices, undo_layout
from bitsandbytes.nn.modules import Linear8bitLt
# contributed by Alex Borzunov, see:
# https://github.com/bigscience-workshop/petals/blob/main/tests/test_linear8bitlt.py
@pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.get_device_capability() < (7, 5),
reason="this test requires a turing-generation or newer GPU, see bitsandbytes docs",
)
def test_layout_exact_match():
x = (torch.randn(14336 * 3, 14336) * 10).to(torch.int8).cuda()
for tile_size, order in ((8, 32), "col_turing"), ((32, 32), "col_ampere"):
transform = lambda x: F.transform(x.cuda(), from_order="row", to_order=order)[0].to(x.device)
tile_indices = get_inverse_transform_indices(transform, tile_size)
cxb = transform(x)
torch.cuda.synchronize()
restored_x = undo_layout(cxb, tile_indices)
torch.cuda.synchronize()
assert restored_x.is_contiguous()
assert torch.all(torch.eq(restored_x, x))
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
def test_linear_no_igemmlt():
linear = torch.nn.Linear(1024, 3072)
x = torch.randn(3, 1024, dtype=torch.half)
linear_custom = Linear8bitLt(
linear.in_features,
linear.out_features,
linear.bias is not None,
has_fp16_weights=False,
threshold=6.0,
)
linear_custom.state.force_no_igemmlt = True
linear_custom.weight = bnb.nn.Int8Params(
linear.weight.data.clone(), requires_grad=False, has_fp16_weights=False
).to(linear.weight.dtype)
linear_custom.bias = linear.bias
linear = linear_custom.cuda()
linear = linear.half().cuda()
x_ref = x.clone().cuda().requires_grad_(True)
x_ours = x.clone().cuda().requires_grad_(True)
fx_ref = linear(x_ref).float()
grad_proj = torch.randn_like(fx_ref)
(fx_ref * grad_proj).mean().backward()
fx_ours = linear_custom(x_ours).float()
(fx_ours * grad_proj).mean().backward()
assert torch.allclose(fx_ref, fx_ours, atol=0.02)
assert torch.allclose(x_ref.grad, x_ours.grad, atol=0.01)
assert not linear_custom.state.has_fp16_weights
assert linear_custom.state.CB is not None
assert linear_custom.state.CxB is None
|
from itertools import permutations, product
import pytest
import torch
import bitsandbytes as bnb
n = 1
k = 25
dim1 = torch.randint(16, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 96, size=(n,)).tolist()
dim3 = torch.randint(32, 96, size=(n,)).tolist()
dim4 = torch.randint(32, 96, size=(n,)).tolist()
funcs = [(torch.bmm, bnb.bmm_cublas), (torch.matmul, bnb.matmul_cublas)]
str_funcs = ["bmm", "matmul"]
req_grad = [(False, False), (True, False), (True, True), (False, True)]
req_grad_str = ["FF", "TF", "TT", "FT"]
transpose = [(False, False), (False, True), (True, True), (True, False)]
str_transpose = ["FF", "FT", "TT", "TF"]
dtype = [torch.float32, torch.float16]
values = list(
product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose)
)
str_values = list(
product(
dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose
)
)
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}".format(
*vals
)
for vals in str_values
]
@pytest.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose",
values,
ids=names,
)
def test_matmul(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
if not torch.cuda.is_available(): pytest.skip('No GPU found.')
if dim2 > 0:
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
dim4 = dim4 - (dim4 % 16)
for i in range(k):
# normal multiply
if funcs[0] in [torch.mm, torch.matmul]:
dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
A = torch.randn(size=dimA, device="cuda", requires_grad=req_grad[0])
B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1])
target = torch.randn(
size=(dim2, dim4), device="cuda", requires_grad=req_grad[1]
)
torch.nn.init.xavier_uniform_(B)
if not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B)
elif not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B.t())
elif transpose[0] and not transpose[1]:
out_torch = funcs[0](A.t(), B)
out_bnb = funcs[1](A.t(), B)
elif transpose[0] and transpose[1]:
out_torch = funcs[0](A.t(), B.t())
out_bnb = funcs[1](A.t(), B.t())
n = out_bnb.numel()
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() < n * 0.0175
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() < n * 0.001
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.02
torch.testing.assert_allclose(
gradB1, gradB2, atol=0.18, rtol=0.3
)
# batched matrix multiply
if funcs[0] in [torch.bmm, torch.matmul]:
A = torch.randn(
size=(dim1, dim2, dim3),
device="cuda",
requires_grad=req_grad[0],
)
B = torch.randn(
size=(dim1, dim3, dim4),
device="cuda",
requires_grad=req_grad[1],
)
target = torch.randn(
size=(dim1, dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
)
torch.nn.init.xavier_uniform_(B)
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B)
n = out_bnb.numel()
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() < n * 0.01
torch.testing.assert_allclose(
out_bnb, out_torch, atol=0.027, rtol=0.2
)
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.02
if funcs[0] in [torch.matmul]:
dim1 = dim1 - (dim1 % 16)
A = torch.randn(
size=(dim1, dim2, dim3),
device="cuda",
requires_grad=req_grad[0],
)
dimB = (dim4, dim3) if transpose[1] else (dim3, dim4)
B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1])
target = torch.randn(
size=(dim1, dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
)
torch.nn.init.xavier_uniform_(B)
if transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B.t())
else:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B)
n = out_bnb.numel()
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() < n * 0.0175
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() < n * 0.001
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.02
n = 1
k = 3
dim1 = torch.randint(16, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 96, size=(n,)).tolist()
dim3 = torch.randint(32, 96, size=(n,)).tolist()
dim4 = torch.randint(32, 96, size=(n,)).tolist()
dim2.append(0)
decomp = [0.0, 6.0]
funcs = [(torch.matmul, bnb.matmul)]
str_funcs = ["matmul"]
req_grad = [(False, False), (True, False), (True, True), (False, True)]
req_grad = list(product([True, False], repeat=3))
req_grad_str = []
for c in req_grad:
strval = ''
for v in c:
if v == True: strval += 'T'
else: strval += 'F'
req_grad_str.append(strval)
transpose = [(False, True), (False, False)]
str_transpose = ["NT", "NN"]
dtype = [torch.float16, torch.bfloat16, torch.float32]
has_fp16_weights = [True, False]
has_bias = [True, False]
values = list(
product(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
)
)
str_values = list(
product(
dim1,
dim2,
dim3,
dim4,
str_funcs,
dtype,
req_grad_str,
str_transpose,
decomp,
has_fp16_weights,
has_bias
)
)
names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}_decomp_{}_has_fp16_weights_{}_has_bias_{}".format(*vals) for vals in str_values]
@pytest.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
if not torch.cuda.is_available(): pytest.skip('No GPU found.')
dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device="cuda")
if has_bias == False:
req_grad = list(req_grad)
req_grad[2] = False
for i in range(k):
# normal multiply
if funcs[0] in [torch.mm, torch.matmul]:
A = torch.randn(
size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype
)
if decomp == 6.0:
with torch.no_grad():
A[:, outlier_dim] = 6.0
B = torch.randn(
size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype
)
target = torch.randn(
size=(dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
dtype=dtype,
)
bias = None
bias2 = None
if has_bias:
bias = torch.randn(dim4, device='cuda', dtype=dtype, requires_grad=req_grad[2])
bias2 = bias.clone()
torch.nn.init.xavier_uniform_(B)
B2 = B.clone()
state = bnb.MatmulLtState()
state.threshold = decomp
state.has_fp16_weights = has_fp16_weights
if not has_fp16_weights:
if not transpose[0] and not transpose[1]:
B2 = B2.t().contiguous()
(
state.CB,
CBt,
state.SCB,
SCBt,
coo_tensorB,
) = bnb.functional.double_quant(B2.to(torch.float16))
B2 = state.CB
if not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B2, state=state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print(f'abs error {err:.4f}')
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() <= n * 0.001
if has_fp16_weights:
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(
out_bnb, target
).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias1 = bias.grad
bias.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias2 = bias.grad
bias.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
torch.testing.assert_allclose(
gradB1, gradB2, atol=0.18, rtol=0.3
)
if req_grad[2]:
torch.testing.assert_allclose(gradBias1, gradBias2)
|
from itertools import product
import pytest
import torch
from torch import nn
import bitsandbytes as bnb
class MockArgs:
def __init__(self, initial_data):
for key in initial_data:
setattr(self, key, initial_data[key])
class MLP8bit(torch.nn.Module):
def __init__(self, dim1, dim2, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0):
super().__init__()
self.fc1 = bnb.nn.Linear8bitLt(
dim1, dim2, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward,
threshold=threshold
)
self.fc2 = bnb.nn.Linear8bitLt(
dim2, dim1, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward,
threshold=threshold
)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def get_args():
args = MockArgs([])
args.quant_type = "vector"
args.use_8bit_training = "full"
args.clip_freq = 9999
return args
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
torch.testing.assert_allclose(a, b, rtol, atol)
class LinearFunction(torch.autograd.Function):
@staticmethod
def get_8bit_linear_trimmed(x, stochastic=False, trim_value=3.0):
round_func = (
LinearFunction.round_stoachastic if stochastic else torch.round
)
norm = math.sqrt(math.pi) / math.sqrt(2.0)
# std = torch.abs(x).mean()*norm
std = torch.std(x)
max1 = std * trim_value
x = x / max1 * 127
x = round_func(x)
x[x > 127] = 127
x[x < -127] = -127
x = x / 127 * max1
return x
def quant(x, quant_type, dim=1):
if quant_type == "linear":
max1 = torch.abs(x).max().float()
xq = torch.round(x / max1 * 127).to(torch.int8)
return xq, max1
elif quant_type == "vector":
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
xq = torch.round(x / max1 * 127).to(torch.int8)
return xq, max1
elif quant_type == "min-max":
maxA = torch.amax(x, dim=dim, keepdim=True).float()
minA = torch.amin(x, dim=dim, keepdim=True).float()
scale = (maxA - minA) / 2.0
xq = torch.round(127 * (x - minA - scale) / scale).to(torch.int8)
return xq, (minA.float(), scale.float())
else:
return None
def dequant(xq, S1, S2, dtype, quant_type):
if quant_type == "linear":
norm = S1 * S2 / (127 * 127)
# double cast needed to prevent overflows
return (xq.float() * norm).to(dtype)
elif quant_type == "vector":
x = xq.float()
if len(xq.shape) == 2 and len(S1.shape) == 3:
S1 = S1.squeeze(0)
if len(xq.shape) == 2 and len(S2.shape) == 3:
S2 = S2.squeeze(0)
# print(x.shape, S1.shape, S2.shape)
if len(S1.shape) == 2:
x *= S1.t() / 127
else:
x *= S1 / 127
x *= S2 / 127
return x.to(dtype)
else:
return None
def dequant_min_max(xq, A, B, SA, SB, dtype):
offset = B.float().t().sum(0) * (SA[0] + SA[1])
x = xq.float()
if len(xq.shape) == 2 and len(SB.shape) == 3:
SB = SB.squeeze(0)
if len(xq.shape) == 2 and len(SA.shape) == 3:
SA = SA.squeeze(0)
if len(SB.shape) == 2:
x *= SB.t() / 127
else:
x *= SB / 127
x *= SA[1] / 127
x += offset
return x.to(dtype)
def get_8bit_linear(x, stochastic=False):
round_func = (
LinearFunction.round_stoachastic if stochastic else torch.round
)
max1 = torch.abs(x).max()
x = x / max1 * 127
x = round_func(x) / 127 * max1
# x = torch.round(x)/128*max1
return x
@staticmethod
def get_8bit_vector_wise(x, dim, stochastic=False):
round_func = (
LinearFunction.round_stoachastic if stochastic else torch.round
)
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
max1[max1 == 0] = 1.0
x = (x * 127) / max1
x = round_func(x) / 127 * max1
return x
@staticmethod
def round_stoachastic(x):
sign = torch.sign(x)
absx = torch.abs(x)
decimal = absx - torch.floor(absx)
rdm = torch.rand_like(decimal)
return sign * (torch.floor(absx) + (rdm < decimal).to(x.dtype))
@staticmethod
def fake_8bit_storage(w, exponent_bits):
code = bnb.functional.create_dynamic_map(n=exponent_bits).to(w.device)
absmax, C = bnb.functional.quantize_blockwise(w.data, code=code)
out = bnb.functional.dequantize_blockwise(absmax, C, code)
out = out.half()
w.copy_(out)
return out
@staticmethod
def fake_8bit_storage_quantile(w, args):
code = bnb.functional.estimate_quantiles(w.data, offset=args.offset)
# C = bnb.functional.quantize_no_absmax(code, w)
# out = bnb.functional.dequantize_no_absmax(code, C, out=w.data)
# print(out)
# out = out.half()
code /= torch.max(torch.abs(code))
absmax, C = bnb.functional.quantize_blockwise(w.data, code=code)
out = bnb.functional.dequantize_blockwise(absmax, C, code)
out = out.half()
w.copy_(out)
return out
@staticmethod
def fake_8bit_storage_stoachstic(w):
rand = torch.rand(1024, device=w.device)
absmax, C = bnb.functional.quantize_blockwise(w.data, rand=rand)
out = bnb.functional.dequantize_blockwise(absmax, C)
out = out.half()
w.copy_(out)
return out
@staticmethod
def fake_8bit_storage_with_max(w, topk=8):
blocked_w = einops.rearrange(w.flatten(), "(h b) -> h b", b=256)
max_val, idx = torch.sort(torch.abs(blocked_w), dim=1, descending=True)
idx = idx[:, :topk]
max_val = max_val[:, :topk]
mask = torch.zeros_like(blocked_w)
mask.scatter_(dim=1, index=idx, src=torch.ones_like(max_val))
mask = mask.bool()
# 1. zero out max values
# 2. quantize + dequantize
# 3. write back max values
# 4. copy matrix back to weight
values = blocked_w[mask]
blocked_w[mask] = 0
code = bnb.functional.create_dynamic_map()
code = code.to(w.device)
absmax, C = bnb.functional.quantize_blockwise(blocked_w.data)
bnb.functional.dequantize_blockwise(absmax, C, out=blocked_w)
blocked_w[mask] = values
unblocked_w = blocked_w.flatten().view(w.shape)
w.copy_(unblocked_w)
return unblocked_w
@staticmethod
def forward(ctx, x, weight, bias=None, args=None):
if args.use_8bit_training != "off":
weight8, S1 = LinearFunction.quant(weight, args.quant_type, dim=1)
x8, S2 = LinearFunction.quant(x, args.quant_type, dim=2)
outputq = bnb.functional.igemm(x8, weight8.t())
output = LinearFunction.dequant(
outputq, S1, S2, x.dtype, args.quant_type
)
# if torch.rand(1) < 0.01:
# output32 = torch.matmul(x, weight.t())
# err = torch.abs(output-output32).float()
# relerr = err/(torch.abs(output32).float()+1e-8)
# print(f'{err.mean().item():.4f}, {relerr.mean().item():.4f}', args.quant_type, 'forward', proxy)
else:
# output = torch.matmul(x, weight.t())
output = torch.einsum("bsi,oi->bso", x, weight)
ctx.save_for_backward(x, weight, bias)
ctx.args = args
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
@staticmethod
def backward(ctx, grad_output):
x, weight, bias = ctx.saved_tensors
args = ctx.args
stochastic = False
grad_input = grad_weight = grad_bias = None
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
# weight and x are already 8bit
# -> transform grad_output to 8-bit
if args.use_8bit_training == "forward+wgrad":
grad_output8, S1 = LinearFunction.quant(
grad_output, args.quant_type, dim=[0, 1]
)
x8, S2 = LinearFunction.quant(x, args.quant_type, dim=[0, 1])
grad_weight8 = bnb.functional.igemm(grad_output8, x8)
grad_weight = LinearFunction.dequant(
grad_weight8, S1, S2, grad_output.dtype, args.quant_type
)
# grad_weight32 = torch.einsum('bso,bsi->oi', grad_output, x)
grad_input = grad_output.matmul(weight)
elif args.use_8bit_training == "full":
grad_output8, S1 = LinearFunction.quant(
grad_output, args.quant_type, dim=[0, 1]
)
x8, S2 = LinearFunction.quant(x, args.quant_type, dim=[0, 1])
grad_weight8 = torch.zeros_like(weight, dtype=torch.int32)
bnb.functional.igemm(grad_output8, x8, out=grad_weight8)
grad_weight = LinearFunction.dequant(
grad_weight8, S1, S2, grad_output.dtype, args.quant_type
)
grad_output8, S1 = LinearFunction.quant(
grad_output, args.quant_type, dim=2
)
weight8, S3 = LinearFunction.quant(weight, args.quant_type, dim=0)
grad_input8 = bnb.functional.igemm(grad_output8, weight8)
grad_input = LinearFunction.dequant(
grad_input8, S1, S3, grad_output.dtype, args.quant_type
)
else:
grad_input = grad_output.matmul(weight)
grad_weight = torch.einsum("bsi,bso->oi", x, grad_output)
return grad_input, grad_weight, grad_bias, None
class Linear8bit(nn.Module):
def __init__(self, input_features, output_features, bias=True, args=None):
super().__init__()
self.input_features = input_features
self.output_features = output_features
self.args = args
self.weight = nn.Parameter(torch.empty(output_features, input_features))
if bias:
self.bias = nn.Parameter(torch.empty(output_features))
else:
self.register_parameter("bias", None)
torch.nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
def forward(self, x):
self.args.training = self.training
return LinearFunction.apply(x, self.weight, self.bias, self.args)
threshold = [0.0, 3.0]
values = threshold
names = [f"threshold_{vals}" for vals in values]
@pytest.mark.parametrize("threshold", values, ids=names)
def test_linear8bitlt_inference(threshold):
l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold).cuda().half()
assert l1.weight.device.type == "cuda"
assert l1.weight.dtype == torch.float16
l1.eval()
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
if i == 1:
assert l1.state.CxB is not None
def test_linear8bitlt_accumulated_gradient():
l1 = torch.nn.Sequential(
*[bnb.nn.Linear8bitLt(32, 32).cuda().half() for i in range(2)]
)
l2 = torch.nn.Sequential(
*[torch.nn.Linear(32, 32).cuda().half() for i in range(2)]
)
l2[0].weight = torch.nn.Parameter(l1[0].weight.clone())
l2[0].bias = torch.nn.Parameter(l1[0].bias.clone())
l2[1].weight = torch.nn.Parameter(l1[1].weight.clone())
l2[1].bias = torch.nn.Parameter(l1[1].bias.clone())
opt1 = bnb.optim.Adam8bit(l1.parameters(), lr=0.001)
opt2 = bnb.optim.Adam8bit(l2.parameters(), lr=0.001)
acc_steps = 10
for i in range(10):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
o2 = l2(b1)
loss1 = o1.mean()
loss2 = o2.mean()
loss1.backward()
loss2.backward()
if i == 2:
assert l1[0].state.CxB is not None
assert l1[1].state.CxB is not None
if i > 0 and i % acc_steps == 0:
opt1.step()
opt1.zero_grad(True)
opt2.step()
opt2.zero_grad(True)
assert_all_approx_close(
l1[0].weight, l2[0].weight, rtol=1.05, atol=0.01, count=2
)
assert_all_approx_close(
l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2
)
# we do this copy because otherwise we have small divergences over time that add up
l1[0].weight.data.copy_(l2[0].weight.data)
l1[1].weight.data.copy_(l2[1].weight.data)
else:
torch.testing.assert_allclose(l1[0].weight.grad, l2[0].weight.grad)
torch.testing.assert_allclose(l1[1].weight.grad, l2[1].weight.grad)
threshold = [0.0, 2.0]
values = threshold
names = [f"threshold_{vals}" for vals in values]
@pytest.mark.parametrize("threshold", values, ids=names)
@pytest.mark.parametrize("memory_efficient_backward", [False])
def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
l1 = (
bnb.nn.Linear8bitLt(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
.cuda()
.half()
)
assert l1.weight.dtype == torch.int8
l1.eval()
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
assert o1.dtype == torch.float16
mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).cuda()
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
mlp = (
MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False)
.cuda()
.half()
)
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
mlp = (
MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False)
.half()
.cuda()
)
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
mlp = (
MLP8bit(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
.half()
.to("cuda")
)
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
assert mlp.fc1.weight.device.type == "cuda"
assert mlp.fc2.weight.device.type == "cuda"
mlp = MLP8bit(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
w1, w2 = mlp.fc1.weight.clone().cuda(), mlp.fc2.weight.clone().cuda() # grab weights before quantization,
mlp = mlp.cuda().half() # and this line triggers quantization
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
assert mlp.fc1.weight.device.type == "cuda"
assert mlp.fc2.weight.device.type == "cuda"
if memory_efficient_backward:
b1 = torch.randn(16, 8, 32, device="cuda", requires_grad=True, dtype=torch.half)
o1 = mlp(b1)
assert o1.dtype == torch.float16
assert o1.requires_grad
grad_proj = torch.randn_like(o1)
mlp.zero_grad()
(o1 * grad_proj).sum().backward()
grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()
scale = grad_ref.abs().mean()
torch.testing.assert_allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)
idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)
assert (idx == 0).sum().item() <= b1.numel() * 0.005
def test_linear8bitlt_fp32_bias():
# casts model to fp16 -> int8 automatically
l1 = bnb.nn.Linear8bitLt(32, 64, has_fp16_weights=False).cuda()
assert l1.weight.dtype == torch.int8
assert l1.bias.dtype == torch.float32
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
# casts bias to fp32
o1 = l1(b1)
assert l1.bias.dtype == torch.float16
# casts model to fp16 -> int8 automatically
l1 = bnb.nn.Linear8bitLt(32, 64, has_fp16_weights=False, bias=False).cuda()
assert l1.weight.dtype == torch.int8
assert l1.bias is None
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
assert l1.bias is None
|
import ctypes as ct
import os
import torch
from pathlib import Path
from warnings import warn
from bitsandbytes.cuda_setup.main import CUDASetup
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
setup.print_log_stack()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs above to fix your environment!
If you cannot find any issues and suspect a bug, please open an issue with detals about your environment:
https://github.com/TimDettmers/bitsandbytes/issues''')
lib.cadam32bit_g32
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import cuda_setup, utils
from .autograd._functions import (
MatmulLtState,
bmm_cublas,
matmul,
matmul_cublas,
mm_cublas,
)
from .cextension import COMPILED_WITH_CUDA
from .nn import modules
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes as ct
import itertools
import operator
import random
import torch
import itertools
import math
from functools import reduce # Required in Python 3
from typing import Tuple
from torch import Tensor
from .cextension import COMPILED_WITH_CUDA, lib
# math.prod not compatible with python < 3.8
def prod(iterable):
return reduce(operator.mul, iterable, 1)
name2qmap = {}
if COMPILED_WITH_CUDA:
"""C FUNCTIONS FOR OPTIMIZERS"""
str2optimizer32bit = {}
str2optimizer32bit["adam"] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer32bit["momentum"] = (
lib.cmomentum32bit_g32,
lib.cmomentum32bit_g16,
)
str2optimizer32bit["rmsprop"] = (
lib.crmsprop32bit_g32,
lib.crmsprop32bit_g16,
)
str2optimizer32bit["lion"] = (
lib.clion32bit_g32,
lib.clion32bit_g16,
)
str2optimizer32bit["adagrad"] = (
lib.cadagrad32bit_g32,
lib.cadagrad32bit_g16,
)
str2optimizer32bit["lars"] = (
lib.cmomentum32bit_g32,
lib.cmomentum32bit_g16,
)
str2optimizer32bit["lamb"] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer8bit = {}
str2optimizer8bit["adam"] = (
lib.cadam_static_8bit_g32,
lib.cadam_static_8bit_g16,
)
str2optimizer8bit["momentum"] = (
lib.cmomentum_static_8bit_g32,
lib.cmomentum_static_8bit_g16,
)
str2optimizer8bit["rmsprop"] = (
lib.crmsprop_static_8bit_g32,
lib.crmsprop_static_8bit_g16,
)
str2optimizer8bit["lion"] = (
lib.clion_static_8bit_g32,
lib.clion_static_8bit_g16,
)
str2optimizer8bit["lamb"] = (
lib.cadam_static_8bit_g32,
lib.cadam_static_8bit_g16,
)
str2optimizer8bit["lars"] = (
lib.cmomentum_static_8bit_g32,
lib.cmomentum_static_8bit_g16,
)
str2optimizer8bit_blockwise = {}
str2optimizer8bit_blockwise["adam"] = (
lib.cadam_8bit_blockwise_fp32,
lib.cadam_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["momentum"] = (
lib.cmomentum_8bit_blockwise_fp32,
lib.cmomentum_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["rmsprop"] = (
lib.crmsprop_8bit_blockwise_fp32,
lib.crmsprop_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["lion"] = (
lib.clion_8bit_blockwise_fp32,
lib.clion_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["adagrad"] = (
lib.cadagrad_8bit_blockwise_fp32,
lib.cadagrad_8bit_blockwise_fp16,
)
class CUBLAS_Context:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.context = {}
# prev_device = torch.cuda.current_device()
# for i in range(torch.cuda.device_count()):
# torch.cuda.set_device(torch.device('cuda', i))
# self.context.append(ct.c_void_p(lib.get_context()))
# torch.cuda.set_device(prev_device)
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def get_context(self, device):
if device.index not in self.context:
prev_device = torch.cuda.current_device()
torch.cuda.set_device(device)
self.context[device.index] = ct.c_void_p(lib.get_context())
torch.cuda.set_device(prev_device)
return self.context[device.index]
class Cusparse_Context:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.context = ct.c_void_p(lib.get_cusparse())
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def create_linear_map(signed=True, total_bits=8, add_zero=True):
sign = (-1.0 if signed else 0.0)
total_values = 2**total_bits
if add_zero or total_bits < 8:
# add a zero
# since we simulate less bits by having zeros in the data type, we
# we need to center the quantization around zero and as such lose
# a single value
total_values = (2**total_bits if not signed else 2**total_bits-1)
values = torch.linspace(sign, 1.0, total_values)
gap = 256 - values.numel()
if gap == 0:
return values
else:
l = values.numel()//2
#return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist())
return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist())
def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):
e = exponent_bits
p = precision_bits
has_sign = 1 if signed else 0
assert e+p == total_bits-has_sign
# the exponent is biased to 2^(e-1) -1 == 0
evalues = []
pvalues = []
for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)):
evalues.append(2**val)
values = []
lst = list(itertools.product([0, 1], repeat=precision_bits))
#for ev in evalues:
bias = 2**(exponent_bits-1)-1
for evalue in range(2**(exponent_bits)):
for bit_pattern in lst:
value = (1 if evalue != 0 else 0)
for i, pval in enumerate(list(bit_pattern)):
value += pval*(2**-(i+1))
if evalue == 0:
# subnormals
value = value*2**-(bias-1)
else:
# normals
value = value*2**-(evalue-bias-2)
values.append(value)
if signed:
values.append(-value)
assert len(values) == 2**total_bits
values.sort()
if total_bits < 8:
gap = 256 - len(values)
for i in range(gap):
values.append(0)
values.sort()
code = torch.Tensor(values)
code /= code.max()
return code
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
"""
Creates the dynamic quantiztion map.
The dynamic data type is made up of a dynamic exponent and
fraction. As the exponent increase from 0 to -7 the number
of bits available for the fraction shrinks.
This is a generalization of the dynamic type where a certain
number of the bits and be reserved for the linear quantization
region (the fraction). n determines the maximum number of
exponent bits.
For more details see
(8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
"""
data = []
# these are additional items that come from the case
# where all the exponent bits are zero and no
# indicator bit is present
non_sign_bits = total_bits - (1 if signed else 0)
additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1
if not signed:
additional_items = 2 * additional_items
for i in range(max_exponent_bits):
fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1))
boundaries = torch.linspace(0.1, 1, fraction_items)
means = (boundaries[:-1] + boundaries[1:]) / 2.0
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if additional_items > 0:
boundaries = torch.linspace(0.1, 1, additional_items + 1)
means = (boundaries[:-1] + boundaries[1:]) / 2.0
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
data.append(0)
data.append(1.0)
gap = 256 - len(data)
for i in range(gap):
data.append(0)
data.sort()
return Tensor(data)
def create_quantile_map(A, total_bits=8):
q = estimate_quantiles(A, num_quantiles=2**total_bits-1)
q = q.tolist()
q.append(0)
gap = 256 - len(q)
for i in range(gap):
q.append(0)
q.sort()
q = Tensor(q)
q = q/q.abs().max()
return q
def get_special_format_str():
if not torch.cuda.is_available(): return 'col_turing'
major, _minor = torch.cuda.get_device_capability()
if major <= 7:
return "col_turing"
if major == 8:
return "col_ampere"
return "col_turing"
def is_on_gpu(tensors):
on_gpu = True
for t in tensors:
if t is None: continue # NULL pointers are fine
on_gpu &= t.device.type == 'cuda'
return on_gpu
def get_ptr(A: Tensor) -> ct.c_void_p:
"""
Get the ctypes pointer from a PyTorch Tensor.
Parameters
----------
A : torch.tensor
The PyTorch tensor.
Returns
-------
ctypes.c_void_p
"""
if A is None:
return None
else:
return ct.c_void_p(A.data.data_ptr())
def pre_call(device):
prev_device = torch.cuda.current_device()
torch.cuda.set_device(device)
return prev_device
def post_call(prev_device):
torch.cuda.set_device(prev_device)
def get_transform_func(dtype, orderA, orderOut, transpose=False):
name = f'ctransform_{(8 if dtype == torch.int8 else 32)}_{orderA}_to_{orderOut}_{"t" if transpose else "n"}'
if not hasattr(lib, name):
print(name)
raise ValueError(
f"Transform function not supported: {orderA} to {orderOut} for data type {dtype} and transpose={transpose}"
)
else:
return getattr(lib, name)
def get_transform_buffer(
shape, dtype, device, to_order, from_order="row", transpose=False
):
# init_func = torch.empty
init_func = torch.zeros
dims = len(shape)
if dims == 2:
rows = shape[0]
elif dims == 3:
rows = shape[0] * shape[1]
cols = shape[-1]
state = (shape, to_order)
if transpose:
# swap dims
tmp = rows
rows = cols
cols = tmp
state = (shape[::-1], to_order)
if to_order == "row" or to_order == "col":
return init_func(shape, dtype=dtype, device=device), state
elif to_order == "col32":
# blocks of 32 columns (padded)
cols = 32 * ((cols + 31) // 32)
return init_func((rows, cols), dtype=dtype, device=device), state
elif to_order == "col_turing":
# blocks of 32 columns and 8 rows
cols = 32 * ((cols + 31) // 32)
rows = 8 * ((rows + 7) // 8)
return init_func((rows, cols), dtype=dtype, device=device), state
elif to_order == "col_ampere":
# blocks of 32 columns and 32 rows
cols = 32 * ((cols + 31) // 32)
rows = 32 * ((rows + 31) // 32)
return init_func((rows, cols), dtype=dtype, device=device), state
else:
raise NotImplementedError(f"To_order not supported: {to_order}")
def nvidia_transform(
A,
to_order,
from_order="row",
out=None,
transpose=False,
state=None,
ld=None,
):
if state is None:
state = (A.shape, from_order)
else:
from_order = state[1]
if out is None:
out, new_state = get_transform_buffer(
state[0], A.dtype, A.device, to_order, state[1]
)
else:
new_state = (state[1], to_order)
func = get_transform_func(A.dtype, from_order, to_order, transpose)
shape = state[0]
if len(shape) == 2:
dim1 = ct.c_int32(shape[0])
dim2 = ct.c_int32(shape[1])
elif ld is not None:
n = prod(shape)
dim1 = prod([shape[i] for i in ld])
dim2 = ct.c_int32(n // dim1)
dim1 = ct.c_int32(dim1)
else:
dim1 = ct.c_int32(shape[0] * shape[1])
dim2 = ct.c_int32(shape[2])
ptr = CUBLAS_Context.get_instance().get_context(A.device)
func(ptr, get_ptr(A), get_ptr(out), dim1, dim2)
return out, new_state
def estimate_quantiles(A: Tensor, out: Tensor = None, offset: float = 1 / 512, num_quantiles=256) -> Tensor:
'''
Estimates 256 equidistant quantiles on the input tensor eCDF.
Uses SRAM-Quantiles algorithm to quickly estimate 256 equidistant quantiles
via the eCDF of the input tensor `A`. This is a fast but approximate algorithm
and the extreme quantiles close to 0 and 1 have high variance / large estimation
errors. These large errors can be avoided by using the offset variable which trims
the distribution. The default offset value of 1/512 ensures minimum entropy encoding -- it
trims 1/512 = 0.2% from each side of the distrivution. An offset value of 0.01 to 0.02
usually has a much lower error but is not a minimum entropy encoding. Given an offset
of 0.02 equidistance points in the range [0.02, 0.98] are used for the quantiles.
Parameters
----------
A : torch.Tensor
The input tensor. Any shape.
out : torch.Tensor
Tensor with the 256 estimated quantiles.
offset : float
The offset for the first and last quantile from 0 and 1. Default: 1/(2*num_quantiles)
num_quantiles : int
The number of equally spaced quantiles.
Returns
-------
torch.Tensor:
The 256 quantiles in float32 datatype.
'''
if A.numel() < 256: raise NotImplementedError(f'Quantile estimation needs at least 256 values in the Tensor, but Tensor had only {A.numel()} values.')
if num_quantiles > 256: raise NotImplementedError(f"Currently only a maximum of 256 equally spaced quantiles are supported, but the argument num_quantiles={num_quantiles}")
if num_quantiles < 256 and offset == 1/(512):
# override default arguments
offset = 1/(2*num_quantiles)
if out is None: out = torch.zeros((256,), dtype=torch.float32, device=A.device)
is_on_gpu([A, out])
device = pre_call(A.device)
if A.dtype == torch.float32:
lib.cestimate_quantiles_fp32(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cestimate_quantiles_fp16(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
else:
raise NotImplementedError(f"Not supported data type {A.dtype}")
post_call(device)
if num_quantiles < 256:
step = round(256/num_quantiles)
idx = torch.linspace(0, 255, num_quantiles).long().to(A.device)
out = out[idx]
return out
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
rand : torch.Tensor
The tensor for stochastic rounding.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
if rand is not None:
is_on_gpu([code, A, out, absmax, rand])
assert blocksize==4096
assert rand.numel() >= 1024
rand_offset = random.randint(0, 1023)
if A.dtype == torch.float32:
lib.cquantize_blockwise_stochastic_fp32(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_stochastic_fp16(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
else:
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
assert rand is None
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out, (absmax, code)
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if out is None:
out = torch.zeros_like(A, dtype=torch.float32)
if quant_state is None:
quant_state = (absmax, code)
else:
absmax, code = quant_state
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
def quantize(A: Tensor, code: Tensor = None, out: Tensor = None) -> Tensor:
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
code = code.to(A.device)
absmax = torch.abs(A).max()
inp = A / absmax
out = quantize_no_absmax(inp, code, out)
return out, (absmax, code)
def dequantize(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
) -> Tensor:
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
code = code.to(A.device)
if quant_state is None:
quant_state = (absmax, code)
out = dequantize_no_absmax(A, quant_state[1], out)
return out * quant_state[0]
def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor:
'''
Quantizes input tensor to 8-bit.
Quantizes the 32-bit input tensor `A` to the 8-bit output tensor
`out` using the quantization map `code`.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
out : torch.Tensor, optional
The output tensor. Needs to be of type byte.
Returns
-------
torch.Tensor:
Quantized 8-bit tensor.
'''
if out is None: out = torch.zeros_like(A, dtype=torch.uint8)
is_on_gpu([A, out])
lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
return out
def dequantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor:
'''
Dequantizes the 8-bit tensor to 32-bit.
Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via
the quantization map `code`.
Parameters
----------
A : torch.Tensor
The 8-bit input tensor.
code : torch.Tensor
The quantization map.
out : torch.Tensor
The 32-bit output tensor.
Returns
-------
torch.Tensor:
32-bit output tensor.
'''
if out is None: out = torch.zeros_like(A, dtype=torch.float32)
is_on_gpu([code, A, out])
lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
return out
def optimizer_update_32bit(
optimizer_name: str,
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
"""
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
skip_zeros : bool
Whether to skip zero-valued gradients or not (default: False).
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementedError(
f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}'
)
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
def optimizer_update_8bit(
optimizer_name: str,
g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
max1: Tensor,
max2: Tensor,
new_max1: Tensor,
new_max2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
) -> None:
"""
Performs an inplace Adam update.
Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights.
Uses AdamW formulation if weight decay > 0.0.
Parameters
----------
optimizer_name : str
The name of the optimizer. Choices {adam, momentum}
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Adam state 1.
state2 : torch.Tensor
Adam state 2.
beta1 : float
Adam beta1.
beta2 : float
Adam beta2.
eps : float
Adam epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
qmap1 : torch.Tensor
Quantization map for first Adam state.
qmap2 : torch.Tensor
Quantization map for second Adam state.
max1 : torch.Tensor
Max value for first Adam state update.
max2 : torch.Tensor
Max value for second Adam state update.
new_max1 : torch.Tensor
Max value for the next Adam update of the first state.
new_max2 : torch.Tensor
Max value for the next Adam update of the second state.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(max1),
get_ptr(max2),
get_ptr(new_max1),
get_ptr(new_max2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(max1),
get_ptr(max2),
get_ptr(new_max1),
get_ptr(new_max2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
def optimizer_update_8bit_blockwise(
optimizer_name: str,
g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
def percentile_clipping(
grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5
):
"""Applies percentile clipping
grad: torch.Tensor
The gradient tensor.
gnorm_vec: torch.Tensor
Vector of gradient norms. 100 elements expected.
step: int
The current optimiation steps (number of past gradient norms).
"""
is_on_gpu([grad, gnorm_vec])
if grad.dtype == torch.float32:
lib.cpercentile_clipping_g32(
get_ptr(grad),
get_ptr(gnorm_vec),
ct.c_int32(step),
ct.c_int32(grad.numel()),
)
elif grad.dtype == torch.float16:
lib.cpercentile_clipping_g16(
get_ptr(grad),
get_ptr(gnorm_vec),
ct.c_int32(step),
ct.c_int32(grad.numel()),
)
else:
raise ValueError(f"Gradient type {grad.dtype} not supported!")
current_gnorm = torch.sqrt(gnorm_vec[step % 100])
vals, idx = torch.sort(gnorm_vec)
clip_value = torch.sqrt(vals[percentile])
gnorm_scale = 1.0
if current_gnorm > clip_value:
gnorm_scale = clip_value / current_gnorm
return current_gnorm, clip_value, gnorm_scale
def histogram_scatter_add_2d(
histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor
):
assert len(histogram.shape) == 2
assert histogram.dtype == torch.float32
assert source.dtype == torch.float32
assert index1.dtype == torch.int32
assert index2.dtype == torch.int32
assert histogram.device.type == "cuda"
assert index1.device.type == "cuda"
assert index2.device.type == "cuda"
assert source.device.type == "cuda"
maxdim1 = ct.c_int32(histogram.shape[0])
n = ct.c_int32(index1.numel())
is_on_gpu([histogram, index1, index2, source])
lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n)
def check_matmul(A, B, out, transposed_A, transposed_B, expected_type=torch.int8):
if not torch.cuda.is_initialized(): torch.cuda.init()
if A.dtype != expected_type or B.dtype != expected_type:
raise TypeError(
f"Expected torch.int8 input tensors A and B, but got {A.dtype} and {B.dtype}"
)
sA = A.shape
sB = B.shape
tA = transposed_A
tB = transposed_B
correct = True
if len(sA) == 2 and len(sB) == 2:
if not tA and not tB and A.shape[1] != B.shape[0]:
correct = False
elif tA and not tB and A.shape[0] != B.shape[0]:
correct = False
elif tA and tB and A.shape[0] != B.shape[1]:
correct = False
elif not tA and tB and A.shape[1] != B.shape[1]:
correct = False
elif len(sA) == 3 and len(sB) == 2:
if not tA and not tB and A.shape[2] != B.shape[0]:
correct = False
elif tA and not tB and A.shape[1] != B.shape[0]:
correct = False
elif tA and tB and A.shape[1] != B.shape[1]:
correct = False
elif not tA and tB and A.shape[2] != B.shape[1]:
correct = False
elif len(sA) == 3 and len(sB) == 3:
if not tA and not tB and A.shape[2] != B.shape[1]:
correct = False
elif tA and not tB and A.shape[1] != B.shape[1]:
correct = False
elif tA and tB and A.shape[1] != B.shape[2]:
correct = False
elif not tA and tB and A.shape[2] != B.shape[2]:
correct = False
if out is not None:
sout = out.shape
# special case common in backprop
if not correct and len(sA) == 3 and len(sB) == 3:
if (
sout[0] == sA[2]
and sout[1] == sB[2]
and sA[0] == sB[0]
and sA[1] == sB[1]
):
correct = True
else:
if len(sA) == 2 and len(sB) == 2:
if not tA and not tB:
sout = (sA[0], sB[1])
elif tA and tB:
sout = (sA[1], sB[0])
elif tA and not tB:
sout = (sA[1], sB[1])
elif not tA and tB:
sout = (sA[0], sB[0])
elif len(sA) == 3 and len(sB) == 2:
if not tA and not tB:
sout = (sA[0], sA[1], sB[1])
elif tA and tB:
sout = (sA[0], sA[2], sB[0])
elif tA and not tB:
sout = (sA[0], sA[2], sB[1])
elif not tA and tB:
sout = (sA[0], sA[1], sB[0])
elif len(sA) == 3 and len(sB) == 3:
if not tA and not tB:
sout = (sA[0], sA[1], sB[2])
elif tA and tB:
sout = (sA[0], sA[2], sB[1])
elif tA and not tB:
sout = (sA[0], sA[2], sB[2])
elif not tA and tB:
sout = (sA[0], sA[1], sB[1])
if not correct:
raise ValueError(
f"Tensor dimensions incorrect for matrix mulitiplication: A x B: {sA} x {sB} with transpose for A x B: {tA} x {tB}."
)
return sout
def igemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
):
sout = check_matmul(A, B, out, transposed_A, transposed_B)
if out is None:
out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)
if len(A.shape) == 3 and len(B.shape) == 3:
if A.shape[0] == B.shape[0] and A.shape[2] == B.shape[1]:
return batched_igemm(A, B, out)
sA = A.shape
sB = B.shape
if transposed_A and len(sA) == 2:
sA = (sA[1], sA[0])
elif transposed_A and len(sA) == 3:
sA = (sA[0], sA[2], sA[0])
if transposed_B and len(sB) == 2:
sB = (sB[1], sB[0])
elif transposed_B and len(sB) == 3:
sB = (sB[0], sB[2], sB[0])
# this is a mess: cuBLAS expect column major, but PyTorch is row major.
# So to perform the matrix multiplication, we have to treat A, B, and C matrices
# (transpose of row major is column major)
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [m, k] @ [k, n] = [m, n]
# row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[(1 if transposed_B else 0)]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = m
ldb = sA[2]
ldc = m
ptr = CUBLAS_Context.get_instance().get_context(A.device)
# B^T @ A^T = C^T
# [km, nk -> mn]
is_on_gpu([B, A, out])
lib.cigemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k),
get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc))
return out
def batched_igemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
):
if not len(A.shape) == 3 or not len(B.shape) == 3:
raise ValueError(
f"Expected 3-dimensional tensors for bmm, but got shapes A and B: {A.shape} and {B.shape}"
)
sout = check_matmul(A, B, out, transposed_A, transposed_B)
if out is None:
out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)
if B.is_contiguous():
lda = B.stride()[1]
transposed_A = False
else:
s = B.stride()
if s[0] != B.shape[0]:
B = B.contiguous()
lda = B.stride()[1]
elif s[2] == B.shape[1]:
transposed_A = True
lda = B.stride()[2]
else:
if s[2] == 1:
B = B.contiguous()
lda = B.stride()[1]
elif s[1] == 1:
B = B.contiguous()
lda = B.stride()[1]
else:
B = B.contiguous()
lda = B.stride()[1]
if A.is_contiguous():
ldb = A.stride()[1]
transposed_B = False
else:
s = A.stride()
if s[0] != A.shape[0]:
A = A.contiguous()
ldb = A.stride()[1]
transposed_B = False
elif s[2] == A.shape[1]:
ldb = A.stride()[2]
transposed_B = True
else:
A = A.contiguous()
ldb = A.stride()[1]
transposed_B = False
# this is a mess: cuBLAS expect column major, but PyTorch is row major.
# So to perform the matrix multiplication, we have to treat A, B, and C matrices
# (transpose of row major is column major)
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [batch, m, k] @ [batch, k, n] = [batch, m, n]
# row major: B^T @ A^T = C^T: [batch, m, k] @ [batch, k, n] = [batch, m, n]
# column major with row major layout: B^T @ A^T = C^T: [batch, k, m] @ [batch, n, k] = [batch, n, m]
num_batch = A.shape[0]
n = A.shape[1]
m = B.shape[2]
k = B.shape[1]
ldc = m
strideA = B.shape[1] * B.shape[2]
strideB = A.shape[1] * A.shape[2]
strideC = A.shape[1] * B.shape[2]
ptr = CUBLAS_Context.get_instance().get_context(A.device)
is_on_gpu([B, A, out])
lib.cbatched_igemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k),
get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc),
ct.c_long(strideA), ct.c_long(strideB), ct.c_long(strideC), ct.c_uint32(num_batch))
return out
def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32):
shapeA = SA[0]
shapeB = SB[0]
dimsA = len(shapeA)
dimsB = len(shapeB)
assert dimsB == 2, 'Only two dimensional matrices are supported for argument B'
if dimsA == 2:
m = shapeA[0]
elif dimsA == 3:
m = shapeA[0] * shapeA[1]
rows = n = shapeB[0]
assert prod(list(shapeA)) > 0, f'Input tensor dimensions need to be > 0: {shapeA}'
# if the tensor is empty, return a transformed empty tensor with the right dimensions
if shapeA[0] == 0 and dimsA == 2:
return torch.empty((0, shapeB[0]), device=A.device, dtype=torch.float16)
elif shapeA[1] == 0 and dimsA == 3:
return torch.empty(tuple(shapeA[:2] + [shapeB[0]]), device=A.device, dtype=torch.float16)
if dimsA == 2 and out is None:
out, Sout = get_transform_buffer(
(shapeA[0], shapeB[0]), dtype, A.device, "col32", "row"
)
elif dimsA == 3 and out is None:
out, Sout = get_transform_buffer(
(shapeA[0], shapeA[1], shapeB[0]), dtype, A.device, "col32", "row"
)
assert dimsB != 3, "len(B.shape)==3 not supported"
assert A.device.type == "cuda"
assert B.device.type == "cuda"
assert A.dtype == torch.int8
assert B.dtype == torch.int8
assert out.dtype == dtype
assert SA[1] == "col32"
assert SB[1] in ["col_turing", "col_ampere"]
assert Sout[1] == "col32"
assert (
shapeA[-1] == shapeB[-1]
), f"Matmullt only supports A @ B^T. Inner matrix dimensions do not match: A @ B = {shapeA} @ {shapeB}"
formatB = SB[1]
prev_device = A.device
torch.cuda.set_device(A.device)
ptr = CUBLAS_Context.get_instance().get_context(A.device)
ptrA = get_ptr(A)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
k = shapeA[-1]
lda = ct.c_int32(m * 32)
if formatB == "col_turing":
# turing: tiles with rows filled up to multiple of 8 rows by 32 columns
# n = rows
ldb = ct.c_int32(((rows + 7) // 8) * 8 * 32)
else:
# ampere: tiles with rows filled up to multiple of 32 rows by 32 columns
# n = rows
ldb = ct.c_int32(((rows + 31) // 32) * 32 * 32)
ldc = ct.c_int32(m * 32)
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
has_error = 0
ptrRowScale = get_ptr(None)
is_on_gpu([A, B, out])
if formatB == 'col_turing':
if dtype == torch.int32:
has_error = lib.cigemmlt_turing_32(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
else:
has_error = lib.cigemmlt_turing_8(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
elif formatB == "col_ampere":
if dtype == torch.int32:
has_error = lib.cigemmlt_ampere_32(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
else:
has_error = lib.cigemmlt_ampere_8(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
if has_error == 1:
print(f'A: {shapeA}, B: {shapeB}, C: {Sout[0]}; (lda, ldb, ldc): {(lda, ldb, ldc)}; (m, n, k): {(m, n, k)}')
raise Exception('cublasLt ran into an error!')
torch.cuda.set_device(prev_device)
return out, Sout
def mm_dequant(
A,
quant_state,
row_stats,
col_stats,
out=None,
new_row_stats=None,
new_col_stats=None,
bias=None
):
assert A.dtype == torch.int32
if bias is not None: assert bias.dtype == torch.float16
out_shape = quant_state[0]
if len(out_shape) == 3:
out_shape = (out_shape[0] * out_shape[1], out_shape[2])
if out is None:
out = torch.empty(out_shape, dtype=torch.float16, device=A.device)
if new_row_stats is None:
new_row_stats = torch.empty(
out_shape[0], dtype=torch.float32, device=A.device
)
if new_col_stats is None:
new_col_stats = torch.empty(
out_shape[1], dtype=torch.float32, device=A.device
)
assert (
new_row_stats.shape[0] == row_stats.shape[0]
), f"{new_row_stats.shape} vs {row_stats.shape}"
assert (
new_col_stats.shape[0] == col_stats.shape[0]
), f"{new_col_stats.shape} vs {col_stats.shape}"
prev_device = pre_call(A.device)
ptrA = get_ptr(A)
ptrOut = get_ptr(out)
ptrRowStats = get_ptr(row_stats)
ptrColStats = get_ptr(col_stats)
ptrNewRowStats = get_ptr(new_row_stats)
ptrNewColStats = get_ptr(new_col_stats)
ptrBias = get_ptr(bias)
numRows = ct.c_int32(out_shape[0])
numCols = ct.c_int32(out_shape[1])
is_on_gpu([A, row_stats, col_stats, out, new_row_stats, new_col_stats, bias])
lib.cdequant_mm_int32_fp16(ptrA, ptrRowStats, ptrColStats, ptrOut, ptrNewRowStats, ptrNewColStats, ptrBias, numRows, numCols)
post_call(prev_device)
return out
def get_colrow_absmax(
A, row_stats=None, col_stats=None, nnz_block_ptr=None, threshold=0.0
):
assert A.dtype == torch.float16
device = A.device
cols = A.shape[-1]
if len(A.shape) == 3:
rows = A.shape[0] * A.shape[1]
else:
rows = A.shape[0]
col_tiles = (cols + 255) // 256
tiled_rows = ((rows + 15) // 16) * 16
if row_stats is None:
row_stats = torch.empty(
(rows,), dtype=torch.float32, device=device
).fill_(-50000.0)
if col_stats is None:
col_stats = torch.empty(
(cols,), dtype=torch.float32, device=device
).fill_(-50000.0)
if nnz_block_ptr is None and threshold > 0.0:
nnz_block_ptr = torch.zeros(
((tiled_rows * col_tiles) + 1,), dtype=torch.int32, device=device
)
ptrA = get_ptr(A)
ptrRowStats = get_ptr(row_stats)
ptrColStats = get_ptr(col_stats)
ptrNnzrows = get_ptr(nnz_block_ptr)
rows = ct.c_int32(rows)
cols = ct.c_int32(cols)
prev_device = pre_call(A.device)
is_on_gpu([A, row_stats, col_stats, nnz_block_ptr])
lib.cget_col_row_stats(ptrA, ptrRowStats, ptrColStats, ptrNnzrows, ct.c_float(threshold), rows, cols)
post_call(prev_device)
if threshold > 0.0:
nnz_block_ptr.cumsum_(0)
return row_stats, col_stats, nnz_block_ptr
class COOSparseTensor:
def __init__(self, rows, cols, nnz, rowidx, colidx, values):
assert rowidx.dtype == torch.int32
assert colidx.dtype == torch.int32
assert values.dtype == torch.float16
assert values.numel() == nnz
assert rowidx.numel() == nnz
assert colidx.numel() == nnz
self.rows = rows
self.cols = cols
self.nnz = nnz
self.rowidx = rowidx
self.colidx = colidx
self.values = values
class CSRSparseTensor:
def __init__(self, rows, cols, nnz, rowptr, colidx, values):
assert rowptr.dtype == torch.int32
assert colidx.dtype == torch.int32
assert values.dtype == torch.float16
assert values.numel() == nnz
assert colidx.numel() == nnz
assert rowptr.numel() == rows + 1
self.rows = rows
self.cols = cols
self.nnz = nnz
self.rowptr = rowptr
self.colidx = colidx
self.values = values
class CSCSparseTensor:
def __init__(self, rows, cols, nnz, colptr, rowidx, values):
assert colptr.dtype == torch.int32
assert rowidx.dtype == torch.int32
assert values.dtype == torch.float16
assert values.numel() == nnz
assert rowidx.numel() == nnz
assert colptr.numel() == cols + 1
self.rows = rows
self.cols = cols
self.nnz = nnz
self.colptr = colptr
self.rowidx = rowidx
self.values = values
def coo2csr(cooA):
values, counts = torch.unique(cooA.rowidx, return_counts=True)
values.add_(1)
rowptr = torch.zeros(
(cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device
)
rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)
rowptr.cumsum_(0)
return CSRSparseTensor(
cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values
)
def coo2csc(cooA):
val, col2rowidx = torch.sort(cooA.colidx)
rowidx = cooA.rowidx[col2rowidx]
values = cooA.values[col2rowidx]
colvalues, counts = torch.unique(val, return_counts=True)
colvalues.add_(1)
colptr = torch.zeros(
(cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device
)
colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)
colptr.cumsum_(0)
return CSCSparseTensor(
cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values
)
def coo_zeros(rows, cols, nnz, device, dtype=torch.half):
rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)
colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)
values = torch.zeros((nnz,), dtype=dtype, device=device)
return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)
def double_quant(
A, col_stats=None, row_stats=None, out_col=None, out_row=None, threshold=0.0
):
device = A.device
assert A.dtype == torch.half
assert device.type == "cuda"
prev_device = pre_call(A.device)
cols = A.shape[-1]
if len(A.shape) == 3:
rows = A.shape[0] * A.shape[1]
else:
rows = A.shape[0]
if row_stats is None or col_stats is None:
row_stats, col_stats, nnz_row_ptr = get_colrow_absmax(
A, threshold=threshold
)
if out_col is None:
out_col = torch.zeros(A.shape, device=device, dtype=torch.int8)
if out_row is None:
out_row = torch.zeros(A.shape, device=device, dtype=torch.int8)
coo_tensor = None
ptrA = get_ptr(A)
ptrColStats = get_ptr(col_stats)
ptrRowStats = get_ptr(row_stats)
ptrOutCol = get_ptr(out_col)
ptrOutRow = get_ptr(out_row)
is_on_gpu([A, col_stats, row_stats, out_col, out_row])
if threshold > 0.0:
nnz = nnz_row_ptr[-1].item()
if nnz > 0:
coo_tensor = coo_zeros(
A.shape[0], A.shape[1], nnz_row_ptr[-1].item(), device
)
ptrRowIdx = get_ptr(coo_tensor.rowidx)
ptrColIdx = get_ptr(coo_tensor.colidx)
ptrVal = get_ptr(coo_tensor.values)
ptrRowPtr = get_ptr(nnz_row_ptr)
lib.cdouble_rowcol_quant(
ptrA,
ptrRowStats,
ptrColStats,
ptrOutCol,
ptrOutRow,
ptrRowIdx,
ptrColIdx,
ptrVal,
ptrRowPtr,
ct.c_float(threshold),
ct.c_int32(rows),
ct.c_int32(cols),
)
val, idx = torch.sort(coo_tensor.rowidx)
coo_tensor.rowidx = val
coo_tensor.colidx = coo_tensor.colidx[idx]
coo_tensor.values = coo_tensor.values[idx]
else:
lib.cdouble_rowcol_quant(
ptrA,
ptrRowStats,
ptrColStats,
ptrOutCol,
ptrOutRow,
None,
None,
None,
None,
ct.c_float(0.0),
ct.c_int32(rows),
ct.c_int32(cols),
)
else:
lib.cdouble_rowcol_quant(
ptrA,
ptrRowStats,
ptrColStats,
ptrOutCol,
ptrOutRow,
None,
None,
None,
None,
ct.c_float(threshold),
ct.c_int32(rows),
ct.c_int32(cols),
)
post_call(prev_device)
return out_row, out_col, row_stats, col_stats, coo_tensor
def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None):
prev_device = pre_call(A.device)
if state is None: state = (A.shape, from_order)
else: from_order = state[1]
if out is None: out, new_state = get_transform_buffer(state[0], A.dtype, A.device, to_order, state[1], transpose)
else: new_state = (state[0], to_order) # (shape, order)
shape = state[0]
if len(shape) == 2:
dim1 = ct.c_int32(shape[0])
dim2 = ct.c_int32(shape[1])
else:
dim1 = ct.c_int32(shape[0] * shape[1])
dim2 = ct.c_int32(shape[2])
is_on_gpu([A, out])
if to_order == 'col32':
if transpose:
lib.ctransform_row2col32T(get_ptr(A), get_ptr(out), dim1, dim2)
else:
lib.ctransform_row2col32(get_ptr(A), get_ptr(out), dim1, dim2)
elif to_order == "col_turing":
if transpose:
lib.ctransform_row2turingT(get_ptr(A), get_ptr(out), dim1, dim2)
else:
lib.ctransform_row2turing(get_ptr(A), get_ptr(out), dim1, dim2)
elif to_order == "col_ampere":
if transpose:
lib.ctransform_row2ampereT(get_ptr(A), get_ptr(out), dim1, dim2)
else:
lib.ctransform_row2ampere(get_ptr(A), get_ptr(out), dim1, dim2)
elif to_order == "row":
if from_order == "col_turing":
lib.ctransform_turing2row(get_ptr(A), get_ptr(out), dim1, dim2)
elif from_order == "col_ampere":
lib.ctransform_ampere2row(get_ptr(A), get_ptr(out), dim1, dim2)
else:
raise NotImplementedError(f'Transform function not implemented: From {from_order} to {to_order}')
post_call(prev_device)
return out, new_state
def spmm_coo(cooA, B, out=None):
if out is None:
out = torch.empty(
(cooA.rows, B.shape[1]), device=B.device, dtype=B.dtype
)
nnz = cooA.nnz
assert cooA.rowidx.numel() == nnz
assert cooA.colidx.numel() == nnz
assert cooA.values.numel() == nnz
assert cooA.cols == B.shape[0]
transposed_B = False if B.is_contiguous() else True
ldb = B.stride()[(1 if transposed_B else 0)]
ldc = B.shape[1]
ptr = Cusparse_Context.get_instance().context
ptrRowidx = get_ptr(cooA.rowidx)
ptrColidx = get_ptr(cooA.colidx)
ptrValues = get_ptr(cooA.values)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
cnnz = ct.c_int32(cooA.nnz)
crowsA = ct.c_int32(cooA.rows)
ccolsA = ct.c_int32(cooA.cols)
ccolsB = ct.c_int32(B.shape[1])
cldb = ct.c_int32(ldb)
cldc = ct.c_int32(ldc)
is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out])
lib.cspmm_coo(ptr, ptrRowidx, ptrColidx, ptrValues, cnnz, crowsA, ccolsA, ccolsB, cldb, ptrB, cldc, ptrC, ct.c_bool(transposed_B))
return out
def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
if out is None:
out = torch.zeros(
(cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype
)
nnz = cooA.nnz
assert cooA.rowidx.numel() == nnz
assert cooA.colidx.numel() == nnz
assert cooA.values.numel() == nnz
assert cooA.cols == B.shape[0], f"{cooA.cols} vs {B.shape}"
transposed_B = False if B.is_contiguous() else True
ldb = B.stride()[(1 if transposed_B else 0)]
ldc = B.shape[1]
values, counts = torch.unique(cooA.rowidx, return_counts=True)
offset = counts.cumsum(0).int()
max_count, max_idx = torch.sort(counts, descending=True)
max_idx = max_idx.int()
max_count = max_count.int()
assert (
max_count[0] <= 32
), f"Current max count per row is 8 but found {max_count[0]}."
assert B.dtype in [torch.float16, torch.int8]
ptrOffset = get_ptr(offset)
ptrMaxCount = get_ptr(max_count)
ptrMaxIdx = get_ptr(max_idx)
ptrRowidx = get_ptr(cooA.rowidx)
ptrColidx = get_ptr(cooA.colidx)
ptrValues = get_ptr(cooA.values)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
ptrDequantStats = get_ptr(dequant_stats)
cnnz_rows = ct.c_int32(counts.numel())
cnnz = ct.c_int32(cooA.nnz)
crowsA = ct.c_int32(cooA.rows)
ccolsA = ct.c_int32(cooA.cols)
crowsB = ct.c_int32(B.shape[1])
ccolsB = ct.c_int32(B.shape[1])
cldb = ct.c_int32(ldb)
cldc = ct.c_int32(ldc)
# print(cooA.rowidx[:64])
# print(cooA.colidx[:64].sort()[0])
is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats])
if B.dtype == torch.float16:
lib.cspmm_coo_very_sparse_naive_fp16(
ptrMaxCount,
ptrMaxIdx,
ptrOffset,
ptrRowidx,
ptrColidx,
ptrValues,
ptrB,
ptrC,
ptrDequantStats,
cnnz_rows,
cnnz,
crowsA,
crowsB,
ccolsB,
)
elif B.dtype == torch.int8:
lib.cspmm_coo_very_sparse_naive_int8(
ptrMaxCount,
ptrMaxIdx,
ptrOffset,
ptrRowidx,
ptrColidx,
ptrValues,
ptrB,
ptrC,
ptrDequantStats,
cnnz_rows,
cnnz,
crowsA,
crowsB,
ccolsB,
)
# else: assertion error
return out
C = 127.0
def vectorwise_quant(x, dim=1, quant_type="vector"):
if quant_type == "linear":
max1 = torch.abs(x).max().float()
xq = torch.round(x / max1 * 127).to(torch.int8)
return xq, max1
elif quant_type in ["vector", "row"]:
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
xq = torch.round(x * (C / max1)).to(torch.int8)
return xq, max1
elif quant_type == "zeropoint":
dtype = x.dtype
x = x.float()
dyna = x.max() - x.min()
if dyna == 0:
dyna = 1
qx = 255.0 / dyna
minx = x.min()
zpx = torch.round(minx * qx)
x = torch.round(qx * x - zpx) + zpx
return x, qx
elif quant_type in ["vector-zeropoint", "row-zeropoint"]:
dtype = x.dtype
x = x.float()
dyna = torch.amax(x, dim=dim, keepdim=True) - torch.amin(
x, dim=dim, keepdim=True
)
dyna[dyna == 0] = 1
qx = 255.0 / dyna
minx = torch.amin(x, dim=dim, keepdim=True)
zpx = torch.round(minx * qx)
x = torch.round(qx * x - zpx) + zpx
return x, qx
elif quant_type == "truncated-vector":
with torch.no_grad():
absx = torch.abs(x)
max1 = torch.amax(absx, dim=dim, keepdim=True)
max1 = max1 * 0.7
idx = absx > max1.expand_as(absx)
sign = torch.sign(x[idx])
x[idx] = max1.expand_as(absx)[idx] * sign
xq = torch.round(x / max1 * C).to(torch.int8)
return xq, max1
else:
return None
def vectorwise_dequant(xq, max1, quant_type="vector"):
if quant_type == "vector":
x = (xq / C * max1).to(torch.float32)
return x
else:
return None
def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half, quant_type="vector"):
if quant_type == "linear":
norm = S1 * S2 / (C * C)
# double cast needed to prevent overflows
return (xq.float() * norm).to(dtype)
elif quant_type == "zeropoint":
norm = 1.0 / (S1 * S2)
return (xq.float() * norm).to(dtype)
elif quant_type == "row-zeropoint":
norm = 1.0 / (S1 * S2)
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= norm
else:
x *= norm
return x.to(dtype)
elif quant_type == "vector-zeropoint":
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= 1.0 / S1
else:
x *= 1.0 / S1
x *= 1.0 / S2.t()
return x.to(dtype)
elif quant_type == "row":
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= S1 * S2 / (C * C)
else:
x *= S1 * S2 / (C * C)
return x.to(dtype)
elif quant_type in ["truncated-vector", "vector"]:
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= S1 / C
else:
x *= S1 / C
x *= S2 / C
return x.to(dtype)
else:
return None
def dequant_min_max(xq, A, B, SA, SB, dtype=torch.half):
offset = B.float().t().sum(0) * (SA[0] + SA[1])
x = xq.float()
if len(xq.shape) == 2 and len(SB.shape) == 3:
SB = SB.squeeze(0)
if len(SB.shape) == 2:
x *= SB.t() / 127
else:
x *= SB / 127
x *= SA[1] / 127
x += offset
return x.to(dtype)
def extract_outliers(A, SA, idx):
shapeA = SA[0]
formatA = SA[1]
assert formatA in ["col_turing", "col_ampere"]
assert A.device.type == "cuda"
out = torch.zeros(
(shapeA[0], idx.numel()), dtype=torch.int8, device=A.device
)
idx_size = ct.c_int32(idx.numel())
rows = ct.c_int32(shapeA[0])
cols = ct.c_int32(shapeA[1])
ptrA = get_ptr(A)
ptrIdx = get_ptr(idx)
ptrOut = get_ptr(out)
prev_device = pre_call(A.device)
if formatA == 'col_turing':
lib.cextractOutliers_turing(ptrA, ptrIdx, ptrOut, idx_size, rows, cols)
elif formatA == "col_ampere":
lib.cextractOutliers_ampere(ptrA, ptrIdx, ptrOut, idx_size, rows, cols)
post_call(prev_device)
return out
|
import shlex
import subprocess
from typing import Tuple
def execute_and_return(command_string: str) -> Tuple[str, str]:
def _decode(subprocess_err_out_tuple):
return tuple(
to_decode.decode("UTF-8").strip()
for to_decode in subprocess_err_out_tuple
)
def execute_and_return_decoded_std_streams(command_string):
return _decode(
subprocess.Popen(
shlex.split(command_string),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()
)
std_out, std_err = execute_and_return_decoded_std_streams(command_string)
return std_out, std_err
|
import os
import sys
from warnings import warn
import torch
HEADER_WIDTH = 60
def print_header(
txt: str, width: int = HEADER_WIDTH, filler: str = "+"
) -> None:
txt = f" {txt} " if txt else ""
print(txt.center(width, filler))
def print_debug_info() -> None:
print(
"\nAbove we output some debug information. Please provide this info when "
f"creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose ...\n"
)
print_header("")
print_header("DEBUG INFORMATION")
print_header("")
print()
from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL
from .cuda_setup.env_vars import to_be_ignored
from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle
print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS")
for k, v in os.environ.items():
if "/" in v and not to_be_ignored(k, v):
print(f"'{k}': '{v}'")
print_header("")
print(
"\nWARNING: Please be sure to sanitize sensible info from any such env vars!\n"
)
print_header("OTHER")
print(f"{COMPILED_WITH_CUDA = }")
cuda = get_cuda_lib_handle()
print(f"COMPUTE_CAPABILITIES_PER_GPU = {get_compute_capabilities(cuda)}")
print_header("")
print_header("DEBUG INFO END")
print_header("")
print(
"""
Running a quick check that:
+ library is importable
+ CUDA function is callable
"""
)
try:
from bitsandbytes.optim import Adam
p = torch.nn.Parameter(torch.rand(10, 10).cuda())
a = torch.rand(10, 10).cuda()
p1 = p.data.sum().item()
adam = Adam([p])
out = a * p
loss = out.sum()
loss.backward()
adam.step()
p2 = p.data.sum().item()
assert p1 != p2
print("SUCCESS!")
print("Installation was successful!")
sys.exit(0)
except ImportError:
print()
warn(
f"WARNING: {__package__} is currently running as CPU-only!\n"
"Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
f"If you think that this is so erroneously,\nplease report an issue!"
)
print_debug_info()
sys.exit(0)
except Exception as e:
print(e)
print_debug_info()
sys.exit(1)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .modules import Int8Params, Linear8bitLt, StableEmbedding
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, TypeVar, Union, overload
import torch
import torch.nn.functional as F
from torch import Tensor, device, dtype, nn
import bitsandbytes as bnb
from bitsandbytes.optim import GlobalOptimManager
T = TypeVar("T", bound="torch.nn.Module")
class StableEmbedding(torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
device=None,
dtype=None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
device,
dtype,
)
self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.weight)
self._fill_padding_idx_with_zero()
""" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
to make the Layer compatible with Pytorch < 1.9.
This means that if this changes in future PyTorch releases this need to change too
which is cumbersome. However, with this we can ensure compatibility with previous
PyTorch releases.
"""
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
emb = F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# always apply layer norm in full precision
emb = emb.to(torch.get_default_dtype())
return self.norm(emb).to(self.weight.dtype)
class Embedding(torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.weight)
self._fill_padding_idx_with_zero()
""" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
to make the Layer compatible with Pytorch < 1.9.
This means that if this changes in future PyTorch releases this need to change too
which is cumbersome. However, with this we can ensure compatibility with previous
PyTorch releases.
"""
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
emb = F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb
class Int8Params(torch.nn.Parameter):
def __new__(
cls,
data=None,
requires_grad=True,
has_fp16_weights=False,
CB=None,
SCB=None,
):
cls.has_fp16_weights = has_fp16_weights
cls.CB = None
cls.SCB = None
if data is None:
data = torch.empty(0)
return torch.Tensor._make_subclass(cls, data, requires_grad)
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
setattr(self, "CB", CB)
setattr(self, "SCB", SCB)
return self
@overload
def to(
self: T,
device: Optional[Union[int, device]] = ...,
dtype: Optional[Union[dtype, str]] = ...,
non_blocking: bool = ...,
) -> T:
...
@overload
def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
...
@overload
def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
...
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(
*args, **kwargs
)
if (
device is not None
and device.type == "cuda"
and self.data.device.type == "cpu"
):
return self.cuda(device)
else:
new_param = Int8Params(
super().to(
device=device, dtype=dtype, non_blocking=non_blocking
),
requires_grad=self.requires_grad,
has_fp16_weights=self.has_fp16_weights,
)
new_param.CB = self.CB
new_param.SCB = self.SCB
return new_param
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
memory_efficient_backward=False, threshold=0.0, index=None):
super().__init__(input_features, output_features, bias)
assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
def init_8bit_state(self):
self.state.CB = self.weight.CB
self.state.SCB = self.weight.SCB
self.weight.CB = None
self.weight.SCB = None
def forward(self, x: torch.Tensor):
self.state.is_training = self.training
if self.weight.CB is not None:
self.init_8bit_state()
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
if not self.state.has_fp16_weights:
if self.state.CB is not None and self.state.CxB is not None:
# we converted 8-bit row major to turing/ampere format in the first inference pass
# we no longer need the row-major weight
del self.state.CB
self.weight.data = self.state.CxB
return out
|
import operator
import warnings
from dataclasses import dataclass
from functools import reduce # Required in Python 3
from typing import Tuple, Optional
import torch
import bitsandbytes.functional as F
# math.prod not compatible with python < 3.8
def prod(iterable):
return reduce(operator.mul, iterable, 1)
tensor = torch.Tensor
# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:
# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py
"""
This class pools outlier dimensions across layers.
This is particularly important for small models where outlier features
are less systematic and occur with low frequency.
"""
class GlobalOutlierPooler:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.outliers = set()
self.model_dim = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def add_outliers(self, outlier_idx, feature_dim):
if self.model_dim is None:
self.model_dim = feature_dim
if feature_dim != self.model_dim:
return # we do not encode outliers for the 2nd FFN layer
self.outliers.update(outlier_idx.tolist())
def get_current_outlier_idx(self):
return torch.Tensor(list(self.outliers)).to(torch.int64)
def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]):
"""
Compute a permutation of indices that invert the specified (tiled) matrix transformation
:param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]
:param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere
:note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size
:example: transform_tile function for the turing layout (bitsandbytes.functional as F)
:returns: indices
"""
d1, d2 = tile_size
assert 0 < d1 * d2 < 2**64
tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2)
# encode each position in tile as a tuple of <= 8 unique bytes
permuted_tile_indices = torch.zeros_like(tile_indices)
for i in range(8):
# select i-th byte, apply transformation and trace where each index ended up
ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode="trunc") % 256
sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous()
assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), "int overflow"
permuted_tile_i = transform_tile(sample_tile_i)
ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128
permuted_tile_indices += ith_permuted_indices * (256**i)
if d1 * d2 < 256**i:
break # if all indices fit in i bytes, stop early
return permuted_tile_indices
def undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor:
"""
Undo a tiled permutation such as turing or ampere layout
:param permuted_tensor: torch tensor in a permuted layout
:param tile_indices: reverse transformation indices, from get_inverse_transform_indices
:return: contiguous row-major tensor
"""
(rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape
assert rows % tile_rows == cols % tile_cols == 0, "tensor must contain a whole number of tiles"
tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t()
outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda
outputs[tile_indices.flatten()] = tensor
outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows)
outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols)
return outputs.reshape(rows, cols).contiguous()
class MatMul8bit(torch.autograd.Function):
@staticmethod
def forward(ctx, A, B, out=None, quant_type="vector", precision=None):
if precision is None:
precision = [8, 8, 8]
if precision[0] != 8:
with torch.no_grad():
output = torch.matmul(A, B)
else:
if len(B.shape) == 2:
dim = 0
else:
dim = 1
qA, SA = F.vectorwise_quant(A, dim=-1, quant_type=quant_type)
qB, SB = F.vectorwise_quant(B, dim=dim, quant_type=quant_type)
iout = F.igemm(qA, qB)
output = F.vectorwise_mm_dequant(iout, SA, SB, A.dtype, quant_type)
if A.requires_grad or B.requires_grad:
ctx.save_for_backward(A, B)
ctx.quant_type = quant_type
ctx.precision = precision
return output
@staticmethod
def backward(ctx, grad_output):
A, B = ctx.saved_tensors
quant_type = ctx.quant_type
precision = ctx.precision
grad_A = grad_B = None
if B.requires_grad:
if len(A.shape) == 3:
dims = [0, 1]
# bsi -> ibs
permute_dim = [0, 2, 1]
else:
dims = [0]
# bs -> sb
permute_dim = [1, 0]
if precision[1] != 8:
with torch.no_grad():
grad_B = torch.matmul(A.permute(permute_dim), grad_output)
else:
if len(B.shape) == 2 and len(A.shape) == 3:
grad_output = grad_output.contiguous()
if not grad_output.is_contiguous():
grad_output.contiguous()
qgrad_output, S1 = F.vectorwise_quant(
grad_output.view(-1, grad_output.shape[2]),
dim=0,
quant_type=quant_type,
)
if not A.is_contiguous():
A = A.contiguous()
qA, S2 = F.vectorwise_quant(
A.view(-1, A.shape[2]), dim=0, quant_type=quant_type
)
igrad_B = F.igemm(qA.t(), qgrad_output)
grad_B = F.vectorwise_mm_dequant(
igrad_B, S2.t(), S1, grad_output.dtype, quant_type
)
else:
qgrad_output, S1 = F.vectorwise_quant(
grad_output, dim=dims, quant_type=quant_type
)
qA, S2 = F.vectorwise_quant(
A, dim=dims, quant_type=quant_type
)
igrad_B = F.igemm(qA.permute(permute_dim), qgrad_output)
grad_B = F.vectorwise_mm_dequant(
igrad_B,
S2.permute(permute_dim),
S1,
grad_output.dtype,
quant_type,
)
if A.requires_grad:
if len(grad_output.shape) == 3:
dims = [2]
else:
dims = [1]
if len(B.shape) == 3:
# bio -> boi
permute_dim = [0, 2, 1]
dim_B = dims
else:
# io -> oi
permute_dim = [1, 0]
dim_B = [1]
if precision[2] != 8:
with torch.no_grad():
grad_A = torch.matmul(grad_output, B.permute(permute_dim))
else:
qgrad_output, S1 = F.vectorwise_quant(
grad_output, dim=dims, quant_type=quant_type
)
qB, S3 = F.vectorwise_quant(B, dim=dim_B, quant_type=quant_type)
igrad_A = F.igemm(qgrad_output, qB.permute(permute_dim))
grad_A = F.vectorwise_mm_dequant(
igrad_A,
S1,
S3.permute(permute_dim),
grad_output.dtype,
quant_type,
)
return grad_A, grad_B, None, None, None
mm_cublas = MatMul8bit.apply
bmm_cublas = MatMul8bit.apply
matmul_cublas = MatMul8bit.apply
@dataclass
class MatmulLtState:
tile_indices: Optional[torch.Tensor] = None
force_no_igemmlt: bool = False
CB = None
CxB = None
SB = None
SCB = None
CxBt = None
SBt = None
CBt = None
subB = None
outlier_pool = None
has_accumulated_gradients = False
threshold = 0.0
idx = None
is_training = True
has_fp16_weights = True
memory_efficient_backward = False
use_pool = False
formatB = F.get_special_format_str()
def reset_grads(self):
self.CB = None
self.CxB = None
self.SB = None
self.SCB = None
self.CxBt = None
self.SBt = None
self.CBt = None
def get_tile_size(self):
assert self.formatB in (
"col_turing",
"col_ampere",
), f"please find this assert and manually enter tile size for {self.formatB}"
return (8, 32) if self.formatB == "col_turing" else (32, 32)
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
using_igemmlt = torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
if A.shape[-1] == B.shape[0]:
return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)
# 1. Quantize A
# 2. Quantize B
# 3. Matmul
# 4. Mixed-precision decomposition matmul
# 5. Save state
formatB = state.formatB
input_shape = A.shape
if state.outlier_pool is None:
state.outlier_pool = GlobalOutlierPooler.get_instance()
# Cast A to fp16
if A.dtype != torch.float16:
warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
# 1. Quantize A
if len(A.shape) == 3:
A = A.view(-1, A.shape[-1]).contiguous()
CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A.to(torch.float16), threshold=state.threshold)
if state.threshold > 0.0 and coo_tensorA is not None:
if state.has_fp16_weights:
idx = torch.unique(coo_tensorA.colidx).long()
CA[:, idx] = 0
CAt[:, idx] = 0
subA = A[:, idx]
state.subB = B[:, idx].t().contiguous()
state.idx = idx
else:
if state.CxB is None and using_igemmlt:
# B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions
# we also need to convert it to the turing/ampere format
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
else:
if not state.has_fp16_weights and state.CxB is None and using_igemmlt:
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
subA = None
# 2. Quantize B
if state.has_fp16_weights:
has_grad = True if (getattr(B, "grad", None) is not None) else False
is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
if is_transposed:
B = B.contiguous()
if (state.is_training and not has_grad) or state.CxB is None:
state.reset_grads()
(
CB,
state.CBt,
state.SCB,
state.SCBt,
coo_tensorB,
) = F.double_quant(B.to(torch.float16))
if using_igemmlt:
state.CxB, state.SB = F.transform(CB, to_order=formatB)
else:
state.CB = CB
else:
has_grad = False
if coo_tensorA is not None and not state.has_fp16_weights:
# extract outliers
outlier_idx = torch.unique(coo_tensorA.colidx)
state.idx = outlier_idx
# state.outlier_pool.add_outliers(outlier_idx, A.shape[-1])
# if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]:
# # do not use pool for 2nd FFN layer
# state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device)
# else:
# state.idx = outlier_idx
if state.CxB is not None:
outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int())
else:
outliers = state.CB[:, state.idx.long()].clone()
state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)
CA[:, state.idx.long()] = 0
CAt[:, state.idx.long()] = 0
subA = A[:, state.idx.long()]
shapeB = state.SB[0] if state.SB else B.shape
if len(input_shape) == 3:
output_shape = (input_shape[0], input_shape[1], shapeB[0])
else:
output_shape = (input_shape[0], shapeB[0])
# 3. Matmul
if using_igemmlt:
C32A, SA = F.transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB)
if bias is None or bias.dtype == torch.float16:
# we apply the fused bias here
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias)
output = output.to(A.dtype)
else: # apply bias separately
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None)
output = output.to(A.dtype).add_(bias)
else:
A_wo_outliers = A.clone()
if state.idx is not None:
A_wo_outliers[:, state.idx.long()] = 0
output = torch.nn.functional.linear(A_wo_outliers, state.CB.to(A.dtype))
output = output.mul_(state.SCB.unsqueeze(0).mul(1.0 / 127.0))
if bias is not None:
output = output.add_(bias)
# 4. Mixed-precision decomposition matmul
if coo_tensorA is not None and subA is not None:
output += torch.matmul(subA, state.subB)
# 5. Save state
ctx.state = state
ctx.formatB = formatB
ctx.grad_shape = input_shape
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (CAt, subA)
ctx.tensor_states = (SCAt, state.idx)
else:
ctx.tensors = [None, None]
ctx.tensor_states = (None, None)
ctx.save_for_backward(None, None)
clone_func = torch.clone if len(output_shape) == 3 else lambda x: x
return clone_func(output.view(output_shape))
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
CAt, subA = ctx.tensors
SCAt, idx = ctx.tensor_states
formatB = ctx.formatB
state = ctx.state
grad_A = grad_B = grad_bias = None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# Cast grad_output to fp16
if len(grad_output.shape) == 3:
grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16))
if req_gradB:
CxAt, SAt = F.transform(CAt, formatB, transpose=True)
C32grad, Sgrad = F.transform(Cgradt, "col32", transpose=True)
gradB32, SgradB32 = F.igemmlt(C32grad, CxAt, Sgrad, SAt)
grad_B = F.mm_dequant(gradB32, SgradB32, SCgradt, SCAt)
if state.threshold > 0.0 and subA is not None:
grad_B[:, idx] += torch.matmul(grad_output.t(), subA)
if req_gradA:
if state.CBt is not None:
C32grad, Sgrad = F.transform(Cgrad, "col32")
if state.CxBt is None:
state.CxBt, state.SBt = F.transform(state.CBt, to_order=formatB, transpose=True)
gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt)
grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A)
elif state.CB is not None:
CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
elif state.CxB is not None:
if state.tile_indices is None:
order, tile_size = state.formatB, state.get_tile_size()
transform = lambda x: F.transform(x.cuda(), from_order="row", to_order=order)[0].to(x.device)
with torch.no_grad():
state.tile_indices = get_inverse_transform_indices(transform, tile_size).to(state.CxB.device)
CB = (
undo_layout(state.CxB, state.tile_indices)
.to(ctx.dtype_A)
.mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
)
grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
else:
raise Exception("State must contain either CBt or CB or CxB matrix for backward")
return grad_A, grad_B, None, grad_bias, None
def matmul(
A: tensor,
B: tensor,
out: tensor = None,
state: MatmulLtState = None,
threshold=0.0,
bias=None
):
state = state or MatmulLtState()
if threshold > 0.0:
state.threshold = threshold
return MatMul8bitLt.apply(A, B, out, bias, state)
|
from ._functions import undo_layout, get_inverse_transform_indices
|
import os
from typing import Dict
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
"_", # current Python interpreter
}
return env_var in ignorable
def might_contain_a_path(candidate: str) -> bool:
return "/" in candidate
def is_active_conda_env(env_var: str) -> bool:
return "CONDA_PREFIX" == env_var
def is_other_conda_env_var(env_var: str) -> bool:
return "CONDA" in env_var
def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
return is_active_conda_env(env_var) or (
might_contain_a_path(value) and not
is_other_conda_env_var(env_var) and not
to_be_ignored(env_var, value)
)
def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
return {
env_var: value
for env_var, value in os.environ.items()
if is_relevant_candidate_env_var(env_var, value)
}
|
"""
extract factors the build is dependent on:
[X] compute capability
[ ] TODO: Q - What if we have multiple GPUs of different makes?
- CUDA version
- Software:
- CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
- CuBLAS-LT: full-build 8-bit optimizer
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
evaluation:
- if paths faulty, return meaningful error
- else:
- determine CUDA version
- determine capabilities
- based on that set the default path
"""
import ctypes as ct
import os
import errno
import torch
from warnings import warn
from pathlib import Path
from typing import Set, Union
from .env_vars import get_potentially_lib_path_containing_env_vars
CUDA_RUNTIME_LIB: str = "libcudart.so"
class CUDASetup:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def generate_instructions(self):
if self.cuda is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
package_dir = Path(__file__).parent.parent
binary_path = package_dir / binary_name
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. CUDA driver not installed')
self.add_log_entry('2. CUDA not installed')
self.add_log_entry('3. You have multiple conflicting CUDA libraries')
self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
self.print_log_stack()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
self.print_log_stack()
def add_log_entry(self, msg, is_warning=False):
self.cuda_setup_log.append((msg, is_warning))
def print_log_stack(self):
for msg, is_warning in self.cuda_setup_log:
if is_warning:
warn(msg)
else:
print(msg)
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
return {
path / CUDA_RUNTIME_LIB
for path in candidate_paths
if (path / CUDA_RUNTIME_LIB).is_file()
}
def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
"""
Searches a given environmental var for the CUDA runtime library,
i.e. `libcudart.so`.
"""
return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
return get_cuda_runtime_lib_paths(
resolve_paths_list(paths_list_candidate)
)
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
if len(results_paths) > 1:
warning_msg = (
f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
"We'll flip a coin and try one of these, in order to fail forward.\n"
"Either way, this might cause trouble in the future:\n"
"If you get `CUDA error: invalid device function` errors, the above "
"might be the cause and the solution is to make sure only one "
f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.")
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
"""
Searches for a cuda installations, in the following order of priority:
1. active conda env
2. LD_LIBRARY_PATH
3. any other env vars, while ignoring those that
- are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
- don't contain the path separator `/`
If multiple libraries are found in part 3, we optimistically try one,
while giving a warning message.
"""
candidate_env_vars = get_potentially_lib_path_containing_env_vars()
if "CONDA_PREFIX" in candidate_env_vars:
conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
warn_in_case_of_duplicates(conda_cuda_libs)
if conda_cuda_libs:
return next(iter(conda_cuda_libs))
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
if "LD_LIBRARY_PATH" in candidate_env_vars:
lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
if lib_ld_cuda_libs:
return next(iter(lib_ld_cuda_libs))
warn_in_case_of_duplicates(lib_ld_cuda_libs)
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
remaining_candidate_env_vars = {
env_var: value for env_var, value in candidate_env_vars.items()
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
}
cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value))
if len(cuda_runtime_libs) == 0:
CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...')
cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
warn_in_case_of_duplicates(cuda_runtime_libs)
return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
def check_cuda_result(cuda, result_val):
# 3. Check for CUDA errors
if result_val != 0:
error_str = ct.c_char_p()
cuda.cuGetErrorString(result_val, ct.byref(error_str))
if error_str.value is not None:
CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
else:
CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
def get_cuda_version(cuda, cudart_path):
if cuda is None: return None
try:
cudart = ct.CDLL(cudart_path)
except OSError:
CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
return None
version = ct.c_int()
try:
check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
except AttributeError as e:
CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
version = int(version.value)
major = version//1000
minor = (version-(major*1000))//10
if major < 11:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
return f'{major}{minor}'
def get_cuda_lib_handle():
# 1. find libcuda.so library (GPU driver) (/usr/lib)
try:
cuda = ct.CDLL("libcuda.so")
except OSError:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
return None
check_cuda_result(cuda, cuda.cuInit(0))
return cuda
def get_compute_capabilities(cuda):
"""
1. find libcuda.so library (GPU driver) (/usr/lib)
init_device -> init variables -> call function by reference
2. call extern C function to determine CC
(https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
3. Check for CUDA errors
https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
# bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
"""
nGpus = ct.c_int()
cc_major = ct.c_int()
cc_minor = ct.c_int()
device = ct.c_int()
check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus)))
ccs = []
for i in range(nGpus.value):
check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i))
ref_major = ct.byref(cc_major)
ref_minor = ct.byref(cc_minor)
# 2. call extern C function to determine CC
check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
ccs.append(f"{cc_major.value}.{cc_minor.value}")
return ccs
# def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
def get_compute_capability(cuda):
"""
Extracts the highest compute capbility from all available GPUs, as compute
capabilities are downwards compatible. If no GPUs are detected, it returns
None.
"""
if cuda is None: return None
# TODO: handle different compute capabilities; for now, take the max
ccs = get_compute_capabilities(cuda)
if ccs: return ccs[-1]
def evaluate_cuda_setup():
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
print('')
print('='*35 + 'BUG REPORT' + '='*35)
print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
print('='*80)
if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None
cuda_setup = CUDASetup.get_instance()
cudart_path = determine_cuda_runtime_lib_path()
cuda = get_cuda_lib_handle()
cc = get_compute_capability(cuda)
cuda_version_string = get_cuda_version(cuda, cudart_path)
failure = False
if cudart_path is None:
failure = True
cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
else:
cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
if cc == '' or cc is None:
failure = True
cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
else:
cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
if cuda is None:
failure = True
else:
cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if failure:
binary_name = "libbitsandbytes_cpu.so"
elif has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
return binary_name, cudart_path, cuda, cc, cuda_version_string
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class RMSprop(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if alpha == 0:
raise NotImplementedError(
"RMSprop with alpha==0.0 is not supported!"
)
if centered:
raise NotImplementedError("Centered RMSprop is not supported!")
super().__init__(
"rmsprop",
params,
lr,
(alpha, momentum),
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class RMSprop8bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if alpha == 0:
raise NotImplementedError(
"RMSprop with alpha==0.0 is not supported!"
)
if centered:
raise NotImplementedError("Centered RMSprop is not supported!")
super().__init__(
"rmsprop",
params,
lr,
(alpha, momentum),
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class RMSprop32bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if alpha == 0:
raise NotImplementedError(
"RMSprop with alpha==0.0 is not supported!"
)
if centered:
raise NotImplementedError("Centered RMSprop is not supported!")
super().__init__(
"rmsprop",
params,
lr,
(alpha, momentum),
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class Lion(Optimizer1State):
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"lion",
params,
lr,
betas,
0.,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Lion8bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"lion",
params,
lr,
betas,
0.,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Lion32bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"lion",
params,
lr,
betas,
0.,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer2State
class LAMB(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adam_w_mode=True,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=False,
max_unorm=1.0,
):
super().__init__(
"lamb",
params,
lr,
betas,
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
max_unorm=1.0,
)
class LAMB8bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adam_w_mode=True,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=False,
max_unorm=1.0,
):
super().__init__(
"lamb",
params,
lr,
betas,
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
max_unorm=1.0,
)
class LAMB32bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adam_w_mode=True,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=False,
max_unorm=1.0,
):
super().__init__(
"lamb",
params,
lr,
betas,
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
max_unorm=1.0,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class SGD(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if momentum == 0:
raise NotImplementedError("SGD without momentum is not supported!")
super().__init__(
"momentum",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class SGD8bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if momentum == 0:
raise NotImplementedError("SGD without momentum is not supported!")
super().__init__(
"momentum",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class SGD32bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if momentum == 0:
raise NotImplementedError("SGD without momentum is not supported!")
super().__init__(
"momentum",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim import Optimizer
from bitsandbytes.optim.optimizer import Optimizer1State
class LARS(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
max_unorm=0.02,
):
if momentum == 0:
raise NotImplementedError(
"LARS without momentum is not supported!"
)
super().__init__(
"lars",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
max_unorm=max_unorm,
block_wise=False,
)
class LARS8bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
max_unorm=0.02,
):
if momentum == 0:
raise NotImplementedError(
"LARS without momentum is not supported!"
)
super().__init__(
"lars",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
max_unorm=max_unorm,
block_wise=False,
)
class LARS32bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
max_unorm=0.02,
):
if momentum == 0:
raise NotImplementedError(
"LARS without momentum is not supported!"
)
super().__init__(
"lars",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
max_unorm=max_unorm,
block_wise=False,
)
class PytorchLARS(Optimizer):
def __init__(
self,
params,
lr=0.01,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
max_unorm=0.02,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
max_unorm=max_unorm,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening"
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
max_unorm = group["max_unorm"]
lr = group["lr"]
for p in group["params"]:
if p.grad is None:
continue
state = self.state[p]
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
buf = state.get("momentum_buffer", None)
if buf is None:
buf = torch.clone(d_p).detach()
state["momentum_buffer"] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
update = d_p + buf * momentum
else:
update = buf
update_scale = 1.0
if max_unorm > 0.0:
assert p.dtype == torch.float32
pnorm = torch.norm(p.detach())
unorm = torch.norm(update)
if unorm > max_unorm * pnorm:
update_scale = max_unorm * pnorm / unorm
p.add_(update, alpha=-lr * update_scale)
return loss
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.cextension import COMPILED_WITH_CUDA
from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit
from .adam import Adam, Adam8bit, Adam32bit
from .adamw import AdamW, AdamW8bit, AdamW32bit
from .lamb import LAMB, LAMB8bit, LAMB32bit
from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS
from .optimizer import GlobalOptimManager
from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit
from .lion import Lion, Lion8bit, Lion32bit
from .sgd import SGD, SGD8bit, SGD32bit
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class Adagrad(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if initial_accumulator_value != 0.0:
raise ValueError("Initial accumulator value != 0.0 not supported!")
if lr_decay != 0.0:
raise ValueError("Lr Decay != 0.0 not supported!")
super().__init__(
"adagrad",
params,
lr,
(0.0, 0.0),
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adagrad8bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
optim_bits=8,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if initial_accumulator_value != 0.0:
raise ValueError("Initial accumulator value != 0.0 not supported!")
if lr_decay != 0.0:
raise ValueError("Lr Decay != 0.0 not supported!")
assert block_wise
super().__init__(
"adagrad",
params,
lr,
(0.0, 0.0),
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adagrad32bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if initial_accumulator_value != 0.0:
raise ValueError("Initial accumulator value != 0.0 not supported!")
if lr_decay != 0.0:
raise ValueError("Lr Decay != 0.0 not supported!")
super().__init__(
"adagrad",
params,
lr,
(0.0, 0.0),
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer2State
class AdamW(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2,
amsgrad=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class AdamW8bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class AdamW32bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import torch
import torch.distributed as dist
import bitsandbytes.functional as F
from bitsandbytes.optim.optimizer import Optimizer2State
class Adam(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adam8bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adam32bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class AnalysisAdam(torch.optim.Optimizer):
"""Adam that performs 8-bit vs 32-bit error analysis.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
bnb_analysis="dynamic-blockwise",
savedir=None,
):
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
super().__init__(params, defaults)
self.analysis = bnb_analysis
self.savedir = savedir
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p_id, p in enumerate(group["params"]):
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
assert not amsgrad
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
state["abserrors"] = torch.zeros(
(256, 256), device=p_data_fp32.device
)
state["relerrors"] = torch.zeros(
(256, 256), device=p_data_fp32.device
)
state["counts"] = torch.zeros(
(256, 256), device=p_data_fp32.device
)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
state["step"] += 1
beta1, beta2 = group["betas"]
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = (
group["lr"] * math.sqrt(bias_correction2) / bias_correction1
)
e = state["abserrors"]
rele = state["relerrors"]
counts = state["counts"]
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
update_fp32 = exp_avg / denom
if (
p_data_fp32.numel() <= 8192
or p_data_fp32.numel() > 50000 * 1000
):
# embedding layer or too small
p_data_fp32 += -step_size * update_fp32
else:
if self.analysis == "dynamic-blockwise":
code1 = F.create_dynamic_map(signed=True).to(p.device)
code2 = F.create_dynamic_map(signed=False).to(p.device)
C1, S1 = F.quantize_blockwise(exp_avg, code=code1)
state1 = F.dequantize_blockwise(C1, S1)
C2, S2 = F.quantize_blockwise(exp_avg_sq, code=code2)
state2 = F.dequantize_blockwise(C2, S2)
elif self.analysis == "dynamic":
code1 = F.create_dynamic_map(signed=True).to(p.device)
code2 = F.create_dynamic_map(signed=False).to(p.device)
C1, S1 = F.quantize(exp_avg, code=code1)
state1 = F.dequantize(C1, S1)
C2, S2 = F.quantize(exp_avg_sq, code=code2)
state2 = F.dequantize(C2, S2)
elif self.analysis == "linear":
code1 = F.create_linear_map(signed=True).to(p.device)
code2 = F.create_linear_map(signed=False).to(p.device)
C1, S1 = F.quantize(exp_avg, code=code1)
state1 = F.dequantize(C1, S1)
C2, S2 = F.quantize(exp_avg_sq, code=code2)
state2 = F.dequantize(C2, S2)
elif self.analysis == "quantile":
code1 = F.estimate_quantiles(exp_avg)
code2 = F.estimate_quantiles(exp_avg_sq)
C1 = F.quantize_no_absmax(exp_avg, code=code1)
state1 = F.dequantize_no_absmax(C1, code1)
C2 = F.quantize_no_absmax(exp_avg_sq, code=code2)
state2 = F.dequantize_no_absmax(C2, code2)
elif self.analysis == "my-quantization-routine":
pass
# 1. get code
# 2. quantize
# 3. dequantize
# Error will be calculated automatically!
else:
raise ValueError(
f"Invalid analysis value: {self.analysis}!"
)
denom = state2.sqrt().add_(group["eps"])
update_8bit = state1 / denom
abserr = torch.abs(update_8bit - update_fp32)
relerr = abserr / torch.abs(update_fp32 + 1e-6)
C1, C2 = C1.int(), C2.int()
F.histogram_scatter_add_2d(e, C1.int(), C2.int(), abserr)
F.histogram_scatter_add_2d(rele, C1.int(), C2.int(), relerr)
F.histogram_scatter_add_2d(
counts, C1.int(), C2.int(), torch.ones_like(abserr)
)
p_data_fp32 += -step_size * update_fp32
if not dist.is_initialized() or dist.get_rank() == 0:
if self.savedir != "" and state["step"] % 100 == 0:
if not os.path.exists(self.savedir):
os.makedirs(self.savedir)
shapestr = "_".join(
[str(dim) for dim in p_data_fp32.shape]
)
pathe = os.path.join(
self.savedir, f"{p_id}_{shapestr}_abserr.pkl"
)
pathrele = os.path.join(
self.savedir, f"{p_id}_{shapestr}_relerr.pkl"
)
pathcounts = os.path.join(
self.savedir, f"{p_id}_{shapestr}_counts.pkl"
)
torch.save(e, pathe)
torch.save(rele, pathrele)
torch.save(counts, pathcounts)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import abc as container_abcs
from collections import defaultdict
from copy import deepcopy
from itertools import chain
import torch
import bitsandbytes.functional as F
class MockArgs:
def __init__(self, initial_data):
for key in initial_data:
setattr(self, key, initial_data[key])
class GlobalOptimManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.pid2config = {}
self.index2config = {}
self.optimizer = None
self.uses_config_override = False
self.module_weight_config_triple = []
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def register_parameters(self, params):
param_groups = list(params)
if not isinstance(param_groups[0], dict):
param_groups = [{"params": param_groups}]
for group_index, group in enumerate(param_groups):
for p_index, p in enumerate(group["params"]):
if id(p) in self.pid2config:
self.index2config[(group_index, p_index)] = self.pid2config[
id(p)
]
def override_config(
self, parameters, key=None, value=None, key_value_dict=None
):
"""
Overrides initial optimizer config for specific parameters.
The key-values of the optimizer config for the input parameters are overridden
This can be both, optimizer parameters like "betas", or "lr" or it can be
8-bit specific parameters like "optim_bits", "percentile_clipping".
Parameters
----------
parameters : torch.Tensor or list(torch.Tensors)
The input parameters.
key : str
The hyperparamter to override.
value : object
The value for the hyperparamters.
key_value_dict : dict
A dictionary with multiple key-values to override.
"""
self.uses_config_override = True
if isinstance(parameters, torch.nn.Parameter):
parameters = [parameters]
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
if key is not None and value is not None:
assert key_value_dict is None
key_value_dict = {key: value}
if key_value_dict is not None:
for p in parameters:
if id(p) in self.pid2config:
self.pid2config[id(p)].update(key_value_dict)
else:
self.pid2config[id(p)] = key_value_dict
def register_module_override(self, module, param_name, config):
self.module_weight_config_triple.append((module, param_name, config))
class Optimizer8bit(torch.optim.Optimizer):
def __init__(self, params, defaults, optim_bits=32):
super().__init__(params, defaults)
self.initialized = False
self.name2qmap = {}
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = {
"qmap1",
"qmap2",
"max1",
"max2",
"new_max1",
"new_max2",
"state1",
"state2",
"gnorm_vec",
"absmax1",
"absmax2",
"unorm_vec",
}
if optim_bits == 8:
self.fill_qmap()
def fill_qmap(self):
self.name2qmap["dynamic"] = F.create_dynamic_map(signed=True)
self.name2qmap["udynamic"] = F.create_dynamic_map(signed=False)
def __setstate__(self, state):
super().__setstate__(state)
def load_state_dict(self, state_dict):
r"""Loads the optimizer state.
Args:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of "
"parameter groups"
)
param_lens = (len(g["params"]) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
# Update the state
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable(g["params"] for g in saved_groups),
chain.from_iterable(g["params"] for g in groups),
)
}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
if param.is_floating_point() and value.dtype != torch.uint8:
value = value.to(param.dtype)
return value
elif isinstance(value, dict):
for k, v in value.items():
if k in self.non_castable_tensor_keys:
value[k] = v.to(param.device)
else:
value[k] = cast(param, v)
return value
elif isinstance(value, container_abcs.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group["params"] = group["params"]
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups)
]
self.__setstate__({"state": state, "param_groups": param_groups})
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
self.state[p][k] = v.to(p.device)
def check_overrides(self):
for module, attr, config in self.mng.module_weight_config_triple:
pmodule = getattr(module, attr)
assert pmodule is not None
assert isinstance(pmodule, torch.Tensor) or isinstance(
pmodule, torch.Parameter
)
found = False
for gindex, group in enumerate(self.param_groups):
if found:
break
for pindex, p in enumerate(group["params"]):
if found:
break
if id(p) == id(pmodule):
# found the matching parameter
# init override
self.mng.pid2config[id(p)] = config
self.mng.index2config[
(gindex, pindex)
] = self.mng.pid2config[id(p)]
found = True
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
if not self.initialized:
self.check_overrides()
self.to_gpu() # needed for fairseq pure fp16 training
self.initialized = True
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
self.update_step(group, p, gindex, pindex)
return loss
def get_config(self, gindex, pindex, group):
config = {}
config["betas"] = group["betas"]
config["eps"] = group["eps"]
config["weight_decay"] = group["weight_decay"]
config["lr"] = group["lr"]
config["optim_bits"] = self.args.optim_bits
config["min_8bit_size"] = self.args.min_8bit_size
config["percentile_clipping"] = self.args.percentile_clipping
config["block_wise"] = self.args.block_wise
config["max_unorm"] = self.args.max_unorm
config["skip_zeros"] = self.args.skip_zeros
if (gindex, pindex) in self.mng.index2config:
config.update(self.mng.index2config[(gindex, pindex)])
return config
def init_state(self, group, p, gindex, pindex):
raise NotImplementedError("init_state method needs to be overridden")
def update_step(self, group, p, gindex, pindex):
raise NotImplementedError(
"The update_step method needs to be overridden"
)
class Optimizer2State(Optimizer8bit):
def __init__(
self,
optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if isinstance(betas, str):
# format: '(beta1, beta2)'
betas = betas.replace("(", "").replace(")", "").strip().split(",")
betas = [float(b) for b in betas]
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
config = self.get_config(gindex, pindex, group)
if config["optim_bits"] == 32:
dtype = torch.float32
elif config["optim_bits"] == 8:
dtype = torch.uint8
else:
raise NotImplementedError(
f'Amount of optimizer bits not supported: {config["optim_bits"]}'
)
if p.numel() < config["min_8bit_size"]:
dtype = torch.float32
state = self.state[p]
state["step"] = 0
if dtype == torch.float32 or (
dtype == torch.uint8 and p.numel() < 4096
):
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.float32,
device=p.device,
)
state["state2"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.float32,
device=p.device,
)
elif dtype == torch.uint8:
if state["step"] == 0:
if "dynamic" not in self.name2qmap:
self.fill_qmap()
self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to(
p.device
)
self.name2qmap["udynamic"] = self.name2qmap["udynamic"].to(
p.device
)
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap1"] = self.name2qmap["dynamic"]
state["state2"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap2"] = self.name2qmap["udynamic"]
if config["block_wise"]:
n = p.numel()
blocks = n // 2048
blocks += 1 if n % 2048 > 0 else 0
state["absmax1"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
state["absmax2"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
else:
state["max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["max2"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max2"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
if config["percentile_clipping"] < 100:
state["gnorm_vec"] = torch.zeros((100,), device=p.device)
if config["max_unorm"] > 0.0:
state["unorm_vec"] = torch.zeros((1,), device=p.device)
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
state = self.state[p]
grad = p.grad
config = self.get_config(gindex, pindex, group)
state["step"] += 1
step = state["step"]
if config["percentile_clipping"] < 100:
current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(
grad, state["gnorm_vec"], step, config["percentile_clipping"]
)
else:
gnorm_scale = 1.0
if state["state1"].dtype == torch.float:
F.optimizer_update_32bit(
self.optimizer_name,
grad,
p,
state["state1"],
config["betas"][0],
config["eps"],
step,
config["lr"],
state["state2"],
config["betas"][1],
config["weight_decay"],
gnorm_scale,
state["unorm_vec"] if config["max_unorm"] > 0.0 else None,
max_unorm=config["max_unorm"],
skip_zeros=config["skip_zeros"],
)
elif state["state1"].dtype == torch.uint8 and not config["block_wise"]:
F.optimizer_update_8bit(
self.optimizer_name,
grad,
p,
state["state1"],
state["state2"],
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
state["qmap2"],
state["max1"],
state["max2"],
state["new_max1"],
state["new_max2"],
config["weight_decay"],
gnorm_scale=gnorm_scale,
unorm_vec=state["unorm_vec"]
if config["max_unorm"] > 0.0
else None,
max_unorm=config["max_unorm"],
)
# swap maxes
state["max1"], state["new_max1"] = state["new_max1"], state["max1"]
state["max2"], state["new_max2"] = state["new_max2"], state["max2"]
elif state["state1"].dtype == torch.uint8 and config["block_wise"]:
F.optimizer_update_8bit_blockwise(
self.optimizer_name,
grad,
p,
state["state1"],
state["state2"],
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
state["qmap2"],
state["absmax1"],
state["absmax2"],
config["weight_decay"],
gnorm_scale=gnorm_scale,
skip_zeros=config["skip_zeros"],
)
class Optimizer1State(Optimizer8bit):
def __init__(
self,
optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.0),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
config = self.get_config(gindex, pindex, group)
if config["optim_bits"] == 32:
dtype = torch.float32
elif config["optim_bits"] == 8:
dtype = torch.uint8
else:
raise NotImplementedError(
f'Amount of optimizer bits not supported: {config["optim_bits"]}'
)
if p.numel() < config["min_8bit_size"]:
dtype = torch.float32
state = self.state[p]
state["step"] = 0
if dtype == torch.float32 or (
dtype == torch.uint8 and p.numel() < 4096
):
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.float32,
device=p.device,
)
elif dtype == torch.uint8:
if state["step"] == 0:
if "dynamic" not in self.name2qmap:
self.fill_qmap()
self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to(
p.device
)
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap1"] = self.name2qmap["dynamic"]
if config["block_wise"]:
n = p.numel()
blocks = n // 2048
blocks += 1 if n % 2048 > 0 else 0
state["absmax1"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
else:
state["max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
if config["percentile_clipping"] < 100:
state["gnorm_vec"] = torch.zeros((100,), device=p.device)
if config["max_unorm"] > 0.0:
state["unorm_vec"] = torch.zeros((1,), device=p.device)
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
state = self.state[p]
grad = p.grad
config = self.get_config(gindex, pindex, group)
state["step"] += 1
step = state["step"]
if config["percentile_clipping"] < 100:
current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(
grad, state["gnorm_vec"], step, config["percentile_clipping"]
)
else:
gnorm_scale = 1.0
if state["state1"].dtype == torch.float:
F.optimizer_update_32bit(
self.optimizer_name,
grad,
p,
state["state1"],
config["betas"][0],
config["eps"],
step,
config["lr"],
None,
0.0,
config["weight_decay"],
gnorm_scale,
state["unorm_vec"] if config["max_unorm"] > 0.0 else None,
max_unorm=config["max_unorm"],
skip_zeros=config["skip_zeros"],
)
elif state["state1"].dtype == torch.uint8 and not config["block_wise"]:
F.optimizer_update_8bit(
self.optimizer_name,
grad,
p,
state["state1"],
None,
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
None,
state["max1"],
None,
state["new_max1"],
None,
config["weight_decay"],
gnorm_scale,
state["unorm_vec"] if config["max_unorm"] > 0.0 else None,
max_unorm=config["max_unorm"],
)
state["max1"], state["new_max1"] = state["new_max1"], state["max1"]
elif state["state1"].dtype == torch.uint8 and config["block_wise"]:
F.optimizer_update_8bit_blockwise(
self.optimizer_name,
grad,
p,
state["state1"],
None,
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
None,
state["absmax1"],
None,
config["weight_decay"],
gnorm_scale=gnorm_scale,
skip_zeros=config["skip_zeros"],
)
|
from setuptools import setup, find_packages
setup(
name = 'anymal-belief-state-encoder-decoder-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.20',
license='MIT',
description = 'Anymal Belief-state Encoder Decoder - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/anymal-belief-state-encoder-decoder-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention gating',
'belief state',
'robotics'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from anymal_belief_state_encoder_decoder_pytorch.networks import Student, Teacher, MLP, Anymal
from anymal_belief_state_encoder_decoder_pytorch.ppo import PPO, MockEnv
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import GRUCell
from torch.distributions import Categorical
from torch.optim import Adam
from einops import rearrange
from einops_exts import check_shape
from einops.layers.torch import Rearrange
from anymal_belief_state_encoder_decoder_pytorch.running import RunningStats
# helper functions
def exists(val):
return val is not None
# freezing of neural networks (teacher needs to be frozen)
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
# in the paper
# the network attention gates the exteroception, and then sums it to the belief state
# todo: make sure the padding is on the right side
def sum_with_zeropad(x, y):
x_dim, y_dim = x.shape[-1], y.shape[-1]
if x_dim == y_dim:
return x + y
if x_dim < y_dim:
x = F.pad(x, (y_dim - x_dim, 0))
if y_dim < x_dim:
y = F.pad(y, (x_dim - y_dim, 0))
return x + y
# add basic MLP
class MLP(nn.Module):
def __init__(
self,
dims,
activation = nn.LeakyReLU,
final_activation = False
):
super().__init__()
assert isinstance(dims, (list, tuple))
assert len(dims) > 2, 'must have at least 3 dimensions (input, *hiddens, output)'
dim_pairs = list(zip(dims[:-1], dims[1:]))
*dim_pairs, dim_out_pair = dim_pairs
layers = []
for dim_in, dim_out in dim_pairs:
layers.extend([
nn.Linear(dim_in, dim_out),
activation()
])
layers.append(nn.Linear(*dim_out_pair))
if final_activation:
layers.append(activation())
self.net = nn.Sequential(*layers)
def forward(self, x):
if isinstance(x, (tuple, list)):
x = torch.cat(x, dim = -1)
return self.net(x)
class Student(nn.Module):
def __init__(
self,
num_actions,
proprio_dim = 133,
extero_dim = 52, # in paper, height samples was marked as 208, but wasn't sure if that was per leg, or (4 legs x 52) = 208
latent_extero_dim = 24,
extero_encoder_hidden = (80, 60),
belief_state_encoder_hiddens = (64, 64),
extero_gate_encoder_hiddens = (64, 64),
belief_state_dim = 120, # should be equal to teacher's extero_dim + privileged_dim (part of the GRU's responsibility is to maintain a hidden state that forms an opinion on the privileged information)
gru_num_layers = 2,
gru_hidden_size = 50,
mlp_hidden = (256, 160, 128),
num_legs = 4,
privileged_dim = 50,
privileged_decoder_hiddens = (64, 64),
extero_decoder_hiddens = (64, 64),
):
super().__init__()
assert belief_state_dim > (num_legs * latent_extero_dim)
self.num_legs = num_legs
self.proprio_dim = proprio_dim
self.extero_dim = extero_dim
# encoding of exteroception
self.extero_encoder = MLP((extero_dim, *extero_encoder_hidden, latent_extero_dim))
# GRU related parameters
gru_input_dim = (latent_extero_dim * num_legs) + proprio_dim
gru_input_dims = (gru_input_dim, *((gru_hidden_size,) * (gru_num_layers - 1)))
self.gru_cells = nn.ModuleList([GRUCell(input_dim, gru_hidden_size) for input_dim in gru_input_dims])
self.gru_hidden_size = gru_hidden_size
# belief state encoding
self.belief_state_encoder = MLP((gru_hidden_size, *belief_state_encoder_hiddens, belief_state_dim))
# attention gating of exteroception
self.to_latent_extero_attn_gate = MLP((gru_hidden_size, *extero_gate_encoder_hiddens, latent_extero_dim * num_legs))
# belief state decoder
self.privileged_decoder = MLP((gru_hidden_size, *privileged_decoder_hiddens, privileged_dim))
self.extero_decoder = MLP((gru_hidden_size, *extero_decoder_hiddens, extero_dim * num_legs))
self.to_extero_attn_gate = MLP((gru_hidden_size, *extero_gate_encoder_hiddens, extero_dim * num_legs))
# final MLP to action logits
self.to_logits = MLP((
belief_state_dim + proprio_dim,
*mlp_hidden
))
self.to_action_head = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(mlp_hidden[-1], num_actions)
)
def get_gru_hiddens(self):
device = next(self.parameters()).device
return torch.zeros((len(self.gru_cells), self.gru_hidden_size))
def forward(
self,
proprio,
extero,
hiddens = None,
return_estimated_info = False, # for returning estimated privileged info + exterceptive info, for reconstruction loss
return_action_categorical_dist = False
):
check_shape(proprio, 'b d', d = self.proprio_dim)
check_shape(extero, 'b n d', n = self.num_legs, d = self.extero_dim)
latent_extero = self.extero_encoder(extero)
latent_extero = rearrange(latent_extero, 'b ... -> b (...)')
# RNN
if not exists(hiddens):
prev_hiddens = (None,) * len(self.gru_cells)
else:
prev_hiddens = hiddens.unbind(dim = -2)
gru_input = torch.cat((proprio, latent_extero), dim = -1)
next_hiddens = []
for gru_cell, prev_hidden in zip(self.gru_cells, prev_hiddens):
gru_input = gru_cell(gru_input, prev_hidden)
next_hiddens.append(gru_input)
gru_output = gru_input
next_hiddens = torch.stack(next_hiddens, dim = -2)
# attention gating of exteroception
latent_extero_attn_gate = self.to_latent_extero_attn_gate(gru_output)
gated_latent_extero = latent_extero * latent_extero_attn_gate.sigmoid()
# belief state and add gated exteroception
belief_state = self.belief_state_encoder(gru_output)
belief_state = sum_with_zeropad(belief_state, gated_latent_extero)
# to action logits
belief_state_with_proprio = torch.cat((
proprio,
belief_state,
), dim = 1)
logits = self.to_logits(belief_state_with_proprio)
pi_logits = self.to_action_head(logits)
return_action = Categorical(pi_logits.softmax(dim = -1)) if return_action_categorical_dist else pi_logits
if not return_estimated_info:
return return_action, next_hiddens
# belief state decoding
# for reconstructing privileged and exteroception information from hidden belief states
recon_privileged = self.privileged_decoder(gru_output)
recon_extero = self.extero_decoder(gru_output)
extero_attn_gate = self.to_extero_attn_gate(gru_output)
gated_extero = rearrange(extero, 'b ... -> b (...)') * extero_attn_gate.sigmoid()
recon_extero = recon_extero + gated_extero
recon_extero = rearrange(recon_extero, 'b (n d) -> b n d', n = self.num_legs)
# whether to return raw policy logits or action probs wrapped with Categorical
return return_action, next_hiddens, (recon_privileged, recon_extero)
class Teacher(nn.Module):
def __init__(
self,
num_actions,
proprio_dim = 133,
extero_dim = 52, # in paper, height samples was marked as 208, but wasn't sure if that was per leg, or (4 legs x 52) = 208
latent_extero_dim = 24,
extero_encoder_hidden = (80, 60),
privileged_dim = 50,
latent_privileged_dim = 24,
privileged_encoder_hidden = (64, 32),
mlp_hidden = (256, 160, 128),
num_legs = 4
):
super().__init__()
self.num_legs = num_legs
self.proprio_dim = proprio_dim
self.extero_dim = extero_dim
self.privileged_dim = privileged_dim
self.extero_encoder = MLP((extero_dim, *extero_encoder_hidden, latent_extero_dim))
self.privileged_encoder = MLP((privileged_dim, *privileged_encoder_hidden, latent_privileged_dim))
self.to_logits = MLP((
latent_extero_dim * num_legs + latent_privileged_dim + proprio_dim,
*mlp_hidden
))
self.to_action_head = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(mlp_hidden[-1], num_actions)
)
self.to_value_head = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(mlp_hidden[-1], 1),
Rearrange('... 1 -> ...')
)
def forward(
self,
proprio,
extero,
privileged,
return_value_head = False,
return_action_categorical_dist = False
):
check_shape(proprio, 'b d', d = self.proprio_dim)
check_shape(extero, 'b n d', n = self.num_legs, d = self.extero_dim)
check_shape(privileged, 'b d', d = self.privileged_dim)
latent_extero = self.extero_encoder(extero)
latent_extero = rearrange(latent_extero, 'b ... -> b (...)')
latent_privileged = self.privileged_encoder(privileged)
latent = torch.cat((
proprio,
latent_extero,
latent_privileged,
), dim = -1)
logits = self.to_logits(latent)
pi_logits = self.to_action_head(logits)
if not return_value_head:
return pi_logits
value_logits = self.to_value_head(logits)
return_action = Categorical(pi_logits.softmax(dim = -1)) if return_action_categorical_dist else pi_logits
return return_action, value_logits
# manages both teacher and student under one module
class Anymal(nn.Module):
def __init__(
self,
num_actions,
proprio_dim = 133,
extero_dim = 52,
privileged_dim = 50,
num_legs = 4,
latent_extero_dim = 24,
latent_privileged_dim = 24,
teacher_extero_encoder_hidden = (80, 60),
teacher_privileged_encoder_hidden = (64, 32),
student_extero_gate_encoder_hiddens = (64, 64),
student_belief_state_encoder_hiddens = (64, 64),
student_belief_state_dim = 120,
student_gru_num_layers = 2,
student_gru_hidden_size = 50,
student_privileged_decoder_hiddens = (64, 64),
student_extero_decoder_hiddens = (64, 64),
student_extero_encoder_hidden = (80, 60),
mlp_hidden = (256, 160, 128),
recon_loss_weight = 0.5
):
super().__init__()
self.proprio_dim = proprio_dim
self.num_legs = num_legs
self.extero_dim = extero_dim
self.student = Student(
num_actions = num_actions,
proprio_dim = proprio_dim,
extero_dim = extero_dim,
latent_extero_dim = latent_extero_dim,
extero_encoder_hidden = student_extero_encoder_hidden,
belief_state_encoder_hiddens = student_belief_state_encoder_hiddens,
extero_gate_encoder_hiddens = student_extero_gate_encoder_hiddens,
belief_state_dim = student_belief_state_dim,
gru_num_layers = student_gru_num_layers,
gru_hidden_size = student_gru_hidden_size,
mlp_hidden = mlp_hidden,
num_legs = num_legs,
privileged_dim = privileged_dim,
privileged_decoder_hiddens = student_privileged_decoder_hiddens,
extero_decoder_hiddens = student_extero_decoder_hiddens,
)
self.teacher = Teacher(
num_actions = num_actions,
proprio_dim = proprio_dim,
extero_dim = extero_dim,
latent_extero_dim = latent_extero_dim,
extero_encoder_hidden = teacher_extero_encoder_hidden,
privileged_dim = privileged_dim,
latent_privileged_dim = latent_privileged_dim,
privileged_encoder_hidden = teacher_privileged_encoder_hidden,
mlp_hidden = mlp_hidden,
num_legs = num_legs
)
self.recon_loss_weight = recon_loss_weight
def get_observation_running_stats(self):
return RunningStats(self.proprio_dim), RunningStats((self.num_legs, self.extero_dim))
def init_student_with_teacher(self):
self.student.extero_encoder.load_state_dict(self.teacher.extero_encoder.state_dict())
self.student.to_logits.load_state_dict(self.teacher.to_logits.state_dict())
self.student.to_action_head.load_state_dict(self.teacher.to_action_head.state_dict())
def forward_teacher(self, *args, return_value_head = False, **kwargs):
return self.teacher(*args, return_value_head = return_value_head, **kwargs)
def forward_student(self, *args, **kwargs):
return self.student(*args, **kwargs)
# main forward for training the student with teacher as guide
def forward(
self,
proprio,
extero,
privileged,
teacher_states = None,
hiddens = None,
noise_strength = 0.1
):
self.teacher.eval()
freeze_all_layers_(self.teacher)
with torch.no_grad():
teacher_proprio, teacher_extero = teacher_states if exists(teacher_states) else (proprio, extero)
teacher_action_logits = self.forward_teacher(teacher_proprio, teacher_extero, privileged)
noised_extero = extero + torch.rand_like(extero) * noise_strength
student_action_logits, hiddens, recons = self.student(proprio, noised_extero, hiddens = hiddens, return_estimated_info = True)
# calculate reconstruction loss of privileged and denoised exteroception
(recon_privileged, recon_extero) = recons
recon_loss = F.mse_loss(recon_privileged, privileged) + F.mse_loss(recon_extero, extero)
# calculate behavior loss, which is also squared distance?
behavior_loss = F.mse_loss(teacher_action_logits, student_action_logits) # why not kl div on action probs?
loss = behavior_loss + recon_loss * self.recon_loss_weight
return loss, hiddens
|
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from collections import deque
from einops import rearrange
from anymal_belief_state_encoder_decoder_pytorch import Anymal
class ExperienceDataset(Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __len__(self):
return len(self.data[0])
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind], self.data))
def create_dataloader(data, batch_size):
ds = ExperienceDataset(data)
return DataLoader(ds, batch_size = batch_size, drop_last = True)
class StudentTrainer(nn.Module):
def __init__(
self,
*,
anymal,
env,
epochs = 2,
lr = 5e-4,
max_timesteps = 10000,
update_timesteps = 5000,
minibatch_size = 16,
truncate_tpbtt = 10
):
super().__init__()
self.env = env
self.anymal = anymal
self.optimizer = Adam(anymal.student.parameters(), lr = lr)
self.epochs = epochs
self.max_timesteps = max_timesteps
self.update_timesteps = update_timesteps
self.minibatch_size = minibatch_size
self.truncate_tpbtt = truncate_tpbtt
self.running_proprio, self.running_extero = anymal.get_observation_running_stats()
def learn_from_memories(
self,
memories,
next_states,
noise_strength = 0.
):
device = next(self.parameters()).device
# retrieve and prepare data from memory for training
states = []
teacher_states = []
hiddens = []
dones = []
for (state, teacher_state, hidden, done) in memories:
states.append(state)
teacher_states.append(teacher_state)
hiddens.append(hidden)
dones.append(torch.Tensor([done]))
states = tuple(zip(*states))
teacher_states = tuple(zip(*teacher_states))
# convert values to torch tensors
to_torch_tensor = lambda t: torch.stack(t).to(device).detach()
states = map(to_torch_tensor, states)
teacher_states = map(to_torch_tensor, teacher_states)
hiddens = to_torch_tensor(hiddens)
dones = to_torch_tensor(dones)
# prepare dataloader for policy phase training
dl = create_dataloader([*states, *teacher_states, hiddens, dones], self.minibatch_size)
current_hiddens = self.anymal.student.get_gru_hiddens()
current_hiddens = rearrange(current_hiddens, 'l d -> 1 l d')
for _ in range(self.epochs):
for ind, (proprio, extero, privileged, teacher_proprio, teacher_extero, episode_hiddens, done) in enumerate(dl):
straight_through_hiddens = current_hiddens - current_hiddens.detach() + episode_hiddens
loss, current_hiddens = self.anymal(
proprio,
extero,
privileged,
teacher_states = (teacher_proprio, teacher_extero),
hiddens = straight_through_hiddens,
noise_strength = noise_strength
)
loss.backward(retain_graph = True)
tbptt_limit = not ((ind + 1) % self.truncate_tpbtt)
if tbptt_limit: # how far back in time should the gradients go for recurrence
self.optimizer.step()
self.optimizer.zero_grad()
current_hiddens = current_hiddens.detach()
# detacher hiddens depending on whether it is a new episode or not
# todo: restructure dataloader to load one episode per batch rows
maybe_detached_hiddens = []
for current_hidden, done in zip(current_hiddens.unbind(dim = 0), dones.unbind(dim = 0)):
maybe_detached_hiddens.append(current_hidden.detached() if done else current_hidden)
current_hiddens = torch.stack(maybe_detached_hiddens)
def forward(
self,
noise_strength = 0.
):
device = next(self.parameters()).device
time = 0
done = False
states = self.env.reset()
memories = deque([])
hidden = self.anymal.student.get_gru_hiddens()
hidden = rearrange(hidden, 'l d -> 1 l d')
self.running_proprio.clear()
self.running_extero.clear()
for timestep in range(self.max_timesteps):
time += 1
states = list(map(lambda t: t.to(device), states))
anymal_states = list(map(lambda t: rearrange(t, '... -> 1 ...'), states))
# teacher needs to have normalized observations
(proprio, extero, privileged) = states
self.running_proprio.push(proprio)
self.running_extero.push(extero)
teacher_states = (
self.running_proprio.norm(proprio),
self.running_extero.norm(extero)
)
teacher_anymal_states = list(map(lambda t: rearrange(t, '... -> 1 ...'), teacher_states))
# add states to memories
memories.append((
states,
teacher_states,
rearrange(hidden, '1 ... -> ...'),
done
))
dist, hidden = self.anymal.forward_student(
*anymal_states[:-1],
hiddens = hidden,
return_action_categorical_dist = True
)
action = dist.sample()
action_log_prob = dist.log_prob(action)
action = action.item()
next_states, _, done, _ = self.env.step(action)
states = next_states
if time % self.update_timesteps == 0:
self.learn_from_memories(memories, next_states, noise_strength = noise_strength)
memories.clear()
if done:
break
|
from collections import namedtuple, deque
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from anymal_belief_state_encoder_decoder_pytorch import Anymal
from anymal_belief_state_encoder_decoder_pytorch.networks import unfreeze_all_layers_
from einops import rearrange
# they use basic PPO for training the teacher with privileged information
# then they used noisy student training, using the trained "oracle" teacher as guide
# ppo data
Memory = namedtuple('Memory', ['state', 'action', 'action_log_prob', 'reward', 'done', 'value'])
class ExperienceDataset(Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __len__(self):
return len(self.data[0])
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind], self.data))
def create_shuffled_dataloader(data, batch_size):
ds = ExperienceDataset(data)
return DataLoader(ds, batch_size = batch_size, shuffle = True)
# ppo helper functions
def normalize(t, eps = 1e-5):
return (t - t.mean()) / (t.std() + eps)
def clipped_value_loss(values, rewards, old_values, clip):
value_clipped = old_values + (values - old_values).clamp(-clip, clip)
value_loss_1 = (value_clipped.flatten() - rewards) ** 2
value_loss_2 = (values.flatten() - rewards) ** 2
return torch.mean(torch.max(value_loss_1, value_loss_2))
# mock environment
class MockEnv(object):
def __init__(
self,
proprio_dim,
extero_dim,
privileged_dim,
num_legs = 4
):
self.proprio_dim = proprio_dim
self.extero_dim = extero_dim
self.privileged_dim = privileged_dim
self.num_legs = num_legs
def rand_state(self):
return (
torch.randn((self.proprio_dim,)),
torch.randn((self.num_legs, self.extero_dim,)),
torch.randn((self.privileged_dim,))
)
def reset(self):
return self.rand_state()
def step(self, action):
reward = torch.randn((1,))
done = torch.tensor([False])
return self.rand_state(), reward, done, None
# main ppo class
class PPO(nn.Module):
def __init__(
self,
*,
env,
anymal,
epochs = 2,
lr = 5e-4,
betas = (0.9, 0.999),
eps_clip = 0.2,
beta_s = 0.005,
value_clip = 0.4,
max_timesteps = 10000,
update_timesteps = 5000,
lam = 0.95,
gamma = 0.99,
minibatch_size = 8300
):
super().__init__()
assert isinstance(anymal, Anymal)
self.env = env
self.anymal = anymal
self.minibatch_size = minibatch_size
self.optimizer = Adam(anymal.teacher.parameters(), lr = lr, betas = betas)
self.epochs = epochs
self.max_timesteps = max_timesteps
self.update_timesteps = update_timesteps
self.beta_s = beta_s
self.eps_clip = eps_clip
self.value_clip = value_clip
self.lam = lam
self.gamma = gamma
# in paper, they said observations fed to teacher were normalized
# by running mean
self.running_proprio, self.running_extero = anymal.get_observation_running_stats()
def learn_from_memories(
self,
memories,
next_states
):
device = next(self.parameters()).device
# retrieve and prepare data from memory for training
states = []
actions = []
old_log_probs = []
rewards = []
masks = []
values = []
for mem in memories:
states.append(mem.state)
actions.append(torch.tensor(mem.action))
old_log_probs.append(mem.action_log_prob)
rewards.append(mem.reward)
masks.append(1 - float(mem.done))
values.append(mem.value)
states = tuple(zip(*states))
# calculate generalized advantage estimate
next_states = map(lambda t: t.to(device), next_states)
next_states = map(lambda t: rearrange(t, '... -> 1 ...'), next_states)
_, next_value = self.anymal.forward_teacher(*next_states, return_value_head = True)
next_value = next_value.detach()
values = values + [next_value]
returns = []
gae = 0
for i in reversed(range(len(rewards))):
delta = rewards[i] + self.gamma * values[i + 1] * masks[i] - values[i]
gae = delta + self.gamma * self.lam * masks[i] * gae
returns.insert(0, gae + values[i])
# convert values to torch tensors
to_torch_tensor = lambda t: torch.stack(t).to(device).detach()
states = map(to_torch_tensor, states)
actions = to_torch_tensor(actions)
old_log_probs = to_torch_tensor(old_log_probs)
old_values = to_torch_tensor(values[:-1])
old_values = rearrange(old_values, '... 1 -> ...')
rewards = torch.tensor(returns).float().to(device)
# prepare dataloader for policy phase training
dl = create_shuffled_dataloader([*states, actions, old_log_probs, rewards, old_values], self.minibatch_size)
# policy phase training, similar to original PPO
for _ in range(self.epochs):
for proprio, extero, privileged, actions, old_log_probs, rewards, old_values in dl:
dist, values = self.anymal.forward_teacher(
proprio, extero, privileged,
return_value_head = True,
return_action_categorical_dist = True
)
action_log_probs = dist.log_prob(actions)
entropy = dist.entropy()
ratios = (action_log_probs - old_log_probs).exp()
advantages = normalize(rewards - old_values.detach())
surr1 = ratios * advantages
surr2 = ratios.clamp(1 - self.eps_clip, 1 + self.eps_clip) * advantages
policy_loss = - torch.min(surr1, surr2) - self.beta_s * entropy
value_loss = clipped_value_loss(values, rewards, old_values, self.value_clip)
(policy_loss.mean() + value_loss.mean()).backward()
self.optimizer.step()
self.optimizer.zero_grad()
# does one episodes worth of learning
def forward(self):
device = next(self.parameters()).device
unfreeze_all_layers_(self.anymal)
time = 0
states = self.env.reset() # states assumed to be (proprioception, exteroception, privileged information)
memories = deque([])
self.running_proprio.clear()
self.running_extero.clear()
for timestep in range(self.max_timesteps):
time += 1
states = list(map(lambda t: t.to(device), states))
proprio, extero, privileged = states
# update running means for observations, for teacher
self.running_proprio.push(proprio)
self.running_extero.push(extero)
# normalize observation states for teacher (proprio and extero)
states = (
self.running_proprio.norm(proprio),
self.running_extero.norm(extero),
privileged
)
anymal_states = list(map(lambda t: rearrange(t, '... -> 1 ...'), states))
dist, values = self.anymal.forward_teacher(
*anymal_states,
return_value_head = True,
return_action_categorical_dist = True
)
action = dist.sample()
action_log_prob = dist.log_prob(action)
action = action.item()
next_states, reward, done, _ = self.env.step(action)
memory = Memory(states, action, action_log_prob, reward, done, values)
memories.append(memory)
states = next_states
if time % self.update_timesteps == 0:
self.learn_from_memories(memories, next_states)
memories.clear()
if done:
break
print('trained for 1 episode')
|
import torch
from torch import nn
class RunningStats(nn.Module):
def __init__(self, shape, eps = 1e-5):
super().__init__()
shape = shape if isinstance(shape, tuple) else (shape,)
self.shape = shape
self.eps = eps
self.n = 0
self.register_buffer('old_mean', torch.zeros(shape), persistent = False)
self.register_buffer('new_mean', torch.zeros(shape), persistent = False)
self.register_buffer('old_std', torch.zeros(shape), persistent = False)
self.register_buffer('new_std', torch.zeros(shape), persistent = False)
def clear(self):
self.n = 0
def push(self, x):
self.n += 1
if self.n == 1:
self.old_mean.copy_(x.data)
self.new_mean.copy_(x.data)
self.old_std.zero_()
self.new_std.zero_()
return
self.new_mean.copy_(self.old_mean + (x - self.old_mean) / self.n)
self.new_std.copy_(self.old_std + (x - self.old_mean) * (x - self.new_mean))
self.old_mean.copy_(self.new_mean)
self.old_std.copy_(self.new_std)
def mean(self):
return self.new_mean if self.n else torch.zeros_like(self.new_mean)
def variance(self):
return (self.new_std / (self.n - 1)) if self.n > 1 else torch.zeros_like(self.new_std)
def rstd(self):
return torch.rsqrt(self.variance() + self.eps)
def norm(self, x):
return (x - self.mean()) * self.rstd()
|
from setuptools import setup, find_packages
setup(
name = 'bottleneck-transformer-pytorch',
packages = find_packages(),
version = '0.1.4',
license='MIT',
description = 'Bottleneck Transformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/bottleneck-transformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'image classification',
'vision'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from bottleneck_transformer_pytorch.bottleneck_transformer_pytorch import BottleStack, BottleBlock
|
import math
import torch
from torch import nn, einsum
from einops import rearrange
# translated from tensorflow code
# https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
# positional embedding helpers
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueeze(dim = dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, h, l, _, device, dtype = *x.shape, x.device, x.dtype
dd = {'device': device, 'dtype': dtype}
col_pad = torch.zeros((b, h, l, 1), **dd)
x = torch.cat((x, col_pad), dim = 3)
flat_x = rearrange(x, 'b h l c -> b h (l c)')
flat_pad = torch.zeros((b, h, l - 1), **dd)
flat_x_padded = torch.cat((flat_x, flat_pad), dim = 2)
final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)
final_x = final_x[:, :, :l, (l-1):]
return final_x
def relative_logits_1d(q, rel_k):
b, heads, h, w, dim = q.shape
logits = einsum('b h x y d, r d -> b h x y r', q, rel_k)
logits = rearrange(logits, 'b h x y r -> b (h x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, heads, h, w, w)
logits = expand_dim(logits, dim = 3, k = h)
return logits
# positional embeddings
class AbsPosEmb(nn.Module):
def __init__(
self,
fmap_size,
dim_head
):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.height = nn.Parameter(torch.randn(height, dim_head) * scale)
self.width = nn.Parameter(torch.randn(width, dim_head) * scale)
def forward(self, q):
emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d')
emb = rearrange(emb, ' h w d -> (h w) d')
logits = einsum('b h i d, j d -> b h i j', q, emb)
return logits
class RelPosEmb(nn.Module):
def __init__(
self,
fmap_size,
dim_head
):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.fmap_size = fmap_size
self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)
def forward(self, q):
h, w = self.fmap_size
q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)')
q = rearrange(q, 'b h x y d -> b h y x d')
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)')
return rel_logits_w + rel_logits_h
# classes
class Attention(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
heads = 4,
dim_head = 128,
rel_pos_emb = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
rel_pos_class = AbsPosEmb if not rel_pos_emb else RelPosEmb
self.pos_emb = rel_pos_class(fmap_size, dim_head)
def forward(self, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim + self.pos_emb(q)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return out
class BottleBlock(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
dim_out,
proj_factor,
downsample,
heads = 4,
dim_head = 128,
rel_pos_emb = False,
activation = nn.ReLU()
):
super().__init__()
# shortcut
if dim != dim_out or downsample:
kernel_size, stride, padding = (3, 2, 1) if downsample else (1, 1, 0)
self.shortcut = nn.Sequential(
nn.Conv2d(dim, dim_out, kernel_size, stride = stride, padding = padding, bias = False),
nn.BatchNorm2d(dim_out),
activation
)
else:
self.shortcut = nn.Identity()
# contraction and expansion
attn_dim_in = dim_out // proj_factor
attn_dim_out = heads * dim_head
self.net = nn.Sequential(
nn.Conv2d(dim, attn_dim_in, 1, bias = False),
nn.BatchNorm2d(attn_dim_in),
activation,
Attention(
dim = attn_dim_in,
fmap_size = fmap_size,
heads = heads,
dim_head = dim_head,
rel_pos_emb = rel_pos_emb
),
nn.AvgPool2d((2, 2)) if downsample else nn.Identity(),
nn.BatchNorm2d(attn_dim_out),
activation,
nn.Conv2d(attn_dim_out, dim_out, 1, bias = False),
nn.BatchNorm2d(dim_out)
)
# init last batch norm gamma to zero
nn.init.zeros_(self.net[-1].weight)
# final activation
self.activation = activation
def forward(self, x):
shortcut = self.shortcut(x)
x = self.net(x)
x = x + shortcut
return self.activation(x)
# main bottle stack
class BottleStack(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
dim_out = 2048,
proj_factor = 4,
num_layers = 3,
heads = 4,
dim_head = 128,
downsample = True,
rel_pos_emb = False,
activation = nn.ReLU()
):
super().__init__()
fmap_size = pair(fmap_size)
self.dim = dim
self.fmap_size = fmap_size
layers = []
for i in range(num_layers):
is_first = i == 0
dim = (dim if is_first else dim_out)
layer_downsample = is_first and downsample
fmap_divisor = (2 if downsample and not is_first else 1)
layer_fmap_size = tuple(map(lambda t: t // fmap_divisor, fmap_size))
layers.append(BottleBlock(
dim = dim,
fmap_size = layer_fmap_size,
dim_out = dim_out,
proj_factor = proj_factor,
heads = heads,
dim_head = dim_head,
downsample = layer_downsample,
rel_pos_emb = rel_pos_emb,
activation = activation
))
self.net = nn.Sequential(*layers)
def forward(self, x):
_, c, h, w = x.shape
assert c == self.dim, f'channels of feature map {c} must match channels given at init {self.dim}'
assert h == self.fmap_size[0] and w == self.fmap_size[1], f'height and width ({h} {w}) of feature map must match the fmap_size given at init {self.fmap_size}'
return self.net(x)
|
from setuptools import setup, find_packages
setup(
name = 'block-recurrent-transformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.4.3',
license='MIT',
description = 'Block Recurrent Transformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/block-recurrent-transformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'recurrence'
],
install_requires=[
'beartype',
'einops>=0.6.1',
'memorizing-transformers-pytorch>=0.4.0',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from accelerate import Accelerator
from block_recurrent_transformer_pytorch import BlockRecurrentTransformer, RecurrentTrainerWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 250
GENERATE_LENGTH = 2048
SEQ_LEN = 2048
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# accelerator
accelerator = Accelerator()
device = accelerator.device
acc_print = accelerator.print
# instantiate palm
model = BlockRecurrentTransformer(
num_tokens = 256,
dim = 512,
depth = 6,
dim_head = 64,
heads = 8,
max_seq_len = 1024,
block_width = 512,
num_state_vectors = 512,
recurrent_layers = (4,),
use_flash_attn = True
)
train_wrapper = RecurrentTrainerWrapper(
model,
xl_memories_dropout = 0.1,
state_dropout = 0.1,
)
model.to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
model, optim, train_loader, val_loader = accelerator.prepare(
model, optim, train_loader, val_loader
)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = train_wrapper(next(train_loader))
accelerator.backward(loss / GRADIENT_ACCUMULATE_EVERY)
acc_print(f"training loss: {loss.item()}")
accelerator.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = train_wrapper(next(val_loader))
acc_print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
acc_print(f"%s \n\n %s", (prime, "*" * 100))
sample = train_wrapper.generate(inp[None, ...], length = GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
acc_print(output_str, "\n")
|
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from block_recurrent_transformer_pytorch.block_recurrent_transformer_pytorch import BlockRecurrentTransformer, RecurrentTrainerWrapper
|
import math
from random import random
from functools import wraps, partial
from itertools import zip_longest
from collections import namedtuple, defaultdict
from packaging import version
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from beartype import beartype
from beartype.door import is_bearable
from beartype.typing import Optional, List, Tuple
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def is_empty(t: torch.Tensor):
return t.numel() == 0
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def all_unique(arr):
return len(arr) == len(set(arr))
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def compact(arr):
return [*filter(exists, arr)]
def and_reduce(arr: List[torch.Tensor]):
if len(arr) == 0:
return None
head, *rest = arr
for t in rest:
head = head & t
return head
def safe_cat(*args, dim = 1):
args = compact(args)
if len(args) == 0:
return None
return torch.cat(args, dim = dim)
def divisible_by(numer, denom):
return (numer % denom) == 0
def l2norm(t):
return F.normalize(t, dim = -1)
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
# bias-less layernorm
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# rotary positional embedding w/ xpos
# https://arxiv.org/abs/2104.09864
# https://arxiv.org/abs/2212.10554v1
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
width,
scale_base = 512,
theta = 10000
):
super().__init__()
self.width = width
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq, persistent = False)
self.scale_base = scale_base
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale, persistent = False)
self.register_buffer('cached_freqs', None, persistent = False)
self.register_buffer('cached_scales', None, persistent = False)
@property
def device(self):
return next(self.buffers()).device
def forward(self):
device, seq_len = self.device, self.width
if exists(self.cached_freqs):
cached_seq_len = self.cached_freqs.shape[-2]
if cached_seq_len >= seq_len:
return self.cached_freqs[:seq_len], self.cached_scales[:seq_len]
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
self.register_buffer('cached_freqs', freqs, persistent = False)
self.register_buffer('cached_scales', scale, persistent = False)
return freqs, scale
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, pos, scale = 1.):
scale = default(scale, 1.)
seq_len = t.shape[-2]
assert pos.shape[-2] >= seq_len
pos = pos[-seq_len:]
if isinstance(scale, torch.Tensor):
assert scale.shape[-2] >= seq_len
scale = scale[-seq_len:]
return (t * pos.cos() * scale) + (rotate_half(t) * pos.sin() * scale)
# memory management
class MemoryManager(nn.Module):
def __init__(
self,
dim,
*,
layers = 1,
mem_lengths = 512,
compress_factors = 1
):
super().__init__()
mem_lengths = cast_tuple(mem_lengths)
compress_factors = cast_tuple(compress_factors)
assert all([mem_length > 0 for mem_length in mem_lengths])
assert len(mem_lengths) == len(compress_factors)
assert layers >= 1
self.mem_lengths = mem_lengths
self.compress_factors = compress_factors
self.layers = nn.ModuleList([])
for _ in range(layers):
compress_fns = nn.ModuleList([])
for compress_factor in compress_factors:
compress_fn = nn.Identity()
if compress_factor > 1:
compress_fn = nn.Sequential(
Rearrange('b n d -> b d n'),
nn.Conv1d(
dim * 2,
dim * 2,
compress_factor,
stride = compress_factor,
groups = 2
),
Rearrange('b d n -> b n d'),
)
compress_fns.append(compress_fn)
self.layers.append(compress_fns)
def forward(
self,
past_memories: List[torch.Tensor],
new_memories: List[torch.Tensor]
):
next_memories = []
for past_memory, new_memory, compress_fns in zip_longest(past_memories, new_memories, self.layers):
# edge case if neither memories exist
if not (exists(past_memory) or exists(new_memory)):
next_memories.append(None)
continue
next_memory = None
for mem_length, compress_factor, compress_fn in zip(self.mem_lengths, self.compress_factors, compress_fns):
# first get the memories for the given compression factor "current_memory"
current_memory = None
if exists(past_memory):
past_memory, current_memory = past_memory[..., :-mem_length, :], past_memory[..., -mem_length:, :]
# compress the new memories coming in, based on the compression factors set at init
if (not is_empty(new_memory)) and compress_factor > 1:
# make sure memory length is divisible by compression factor
new_mem_length = new_memory.shape[-2]
curtailed_length = (new_mem_length // compress_factor) * compress_factor
curtailed_slice = slice(-curtailed_length, None) if curtailed_length > 0 else slice(0, 0)
new_memory = new_memory[..., curtailed_slice, :]
# compress the memory pushed to the next stage
if new_memory.shape[-2] > 0:
new_memory = rearrange(new_memory, 'm b n d -> b n (m d)')
new_memory = compress_fn(new_memory)
new_memory = rearrange(new_memory, 'b n (m d) -> m b n d', m = 2)
# fifo memory queue
# add the new memory on the right
current_memory = safe_cat(current_memory, new_memory, dim = -2)
# "new" memory is new with respect to the next compressed segment
new_memory, current_memory = current_memory[..., :-mem_length, :], current_memory[..., -mem_length:, :]
# concat the new memory to the left into the past
next_memory = safe_cat(current_memory, next_memory, dim = -2)
next_memories.append(next_memory)
return next_memories
# maybe flash attention, if using pytorch 2.0
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# state container
class StateContainer(nn.Module):
def __init__(
self,
dim,
*,
num_state_vectors,
dim_head = 64,
heads = 8,
qk_rmsnorm = False,
qk_rmsnorm_scale = 8,
use_flash_attn = False
):
super().__init__()
assert num_state_vectors > 0
self.heads = heads
inner_dim = dim_head * heads
self.state_norm = LayerNorm(dim)
self.q_to_state = nn.Linear(dim, inner_dim, bias = False)
self.q_from_state = nn.Linear(dim, inner_dim, bias = False)
self.state_to_q = nn.Linear(dim, inner_dim, bias = False)
self.state_to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.init_state = nn.Parameter(torch.randn(num_state_vectors, dim))
self.state_pos_ids = nn.Parameter(torch.randn(num_state_vectors, dim))
self.to_state_out = nn.Linear(inner_dim * 2, dim, bias = False)
self.to_state_cross_attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
self.state_self_attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
self.from_state_cross_attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
# gating related parameters - using the fixed simple config
self.state_out_to_gate = nn.Linear(dim, dim)
self.learned_ema_beta = nn.Parameter(torch.randn(dim))
# since each read should be followed by a write, just store cache in the container
self.cache = None
self.next_read_state = None
def set_next_read_state(
self,
states
):
if not exists(states):
states = self.init_state
self.next_read_state = (states,)
def read(self, x):
assert exists(self.next_read_state), 'states to be read must be set with .set_next_read_state'
states, = self.next_read_state
self.next_read_state = None
# pre norm state for attention
normed_states = self.state_norm(states)
# add the positional ids, as stated in the paper critical for it to work
normed_states = normed_states + self.state_pos_ids
# get queries for cross attention, which they do not share, although they share key / values. another intriguing detail
q_to_state = self.q_to_state(x)
q_to_state = rearrange(q_to_state, '... n (h d) -> ... h n d', h = self.heads)
# self attention qkv for states
state_k, state_v = self.state_to_kv(normed_states).chunk(2, dim = -1)
# cross attend to the past states key values
to_state_out = self.to_state_cross_attn(q_to_state, state_k, state_v)
to_state_out = rearrange(to_state_out, 'b h n d -> b n (h d)')
# cache for next write
self.cache = (states, normed_states, state_k, state_v)
return to_state_out
def write(
self,
*,
memories
):
assert exists(self.cache)
k, v = memories
batch = k.shape[0]
# get cached values from the previous read
states, normed_states, state_k, state_v = self.cache
self.cache = None
# derive queries
q_from_state = self.q_from_state(normed_states)
q_from_state = rearrange(q_from_state, '... n (h d) -> ... h n d', h = self.heads)
state_q = self.state_to_q(normed_states)
state_q_einsum = 'n (h d)' if state_q.ndim == 2 else 'b n (h d)'
state_q = repeat(state_q, f'{state_q_einsum} -> b h n d', h = self.heads, b = batch)
# states must also undergo self attention
if q_from_state.ndim == 3:
q_from_state = repeat(q_from_state, '... -> b ...', b = batch)
state_out = self.state_self_attn(state_q, state_k, state_v)
from_state_out = self.from_state_cross_attn(q_from_state, k, v)
state_out = torch.cat((state_out, from_state_out), dim = -1)
state_out = rearrange(state_out, 'b h n d -> b n (h d)')
state_out = self.to_state_out(state_out)
# use the best performing configuration
# fixed simple gate - nothing more than a learned EMA with some resemblance to highway networks
z = self.state_out_to_gate(state_out)
learned_ema_decay = self.learned_ema_beta.sigmoid()
# set new state with the learned EMA gating
return learned_ema_decay * z + (1 - learned_ema_decay) * states
def forward(self, x):
raise NotImplementedError
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
use_flash_attn = False
):
super().__init__()
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash_attn = use_flash_attn
assert not (use_flash_attn and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash_attn:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = repeat(k, 'b ... -> b h ...', h = q.shape[1])
if v.ndim == 3:
v = repeat(v, 'b ... -> b h ...', h = q.shape[1])
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
masks = []
if self.causal:
i, j = q_len, k_len
causal_mask = torch.ones((i, j), dtype = torch.bool, device = q.device).triu(j - i + 1)
masks.append(~causal_mask)
if exists(mask):
if mask.ndim != 2:
mask = repeat(mask, 'w ... -> (b w) ...', b = q.shape[0] // mask.shape[0])
masks.append(mask)
attn_mask = and_reduce(masks)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = attn_mask
)
return out
def forward(self, q, k, v, mask = None, use_flash_attn = None):
use_flash_attn = default(use_flash_attn, self.use_flash_attn)
b, n, device = q.shape[0], q.shape[-2], q.device
q, ps = pack_one(q, '* h n d')
k, _ = pack_one(k, '* n d')
v, _ = pack_one(v, '* n d')
if use_flash_attn:
out = self.flash_attn(q, k, v, mask = mask)
return unpack_one(out, ps, '* h n d')
scale = q.shape[-1] ** -0.5
k_einsum = 'b j d' if k.ndim == 3 else 'b h j d'
v_einsum = 'b j d' if v.ndim == 3 else 'b h j d'
# similarity
sim = einsum(f"b h i d, {k_einsum} -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
if mask.ndim != 2:
mask = repeat(mask, 'w ... -> (b w) ...', b = b)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = q.device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum(f"b h i j, {v_einsum} -> b h i d", attn, v)
return unpack_one(out, ps, '* h n d')
# geglu feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4):
inner_dim = int(dim * mult * 2 / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim_head,
causal = False,
qk_rmsnorm = False,
qk_rmsnorm_scale = 8,
use_flash_attn = False
):
super().__init__()
self.causal = causal
self.qk_rmsnorm = qk_rmsnorm
self.qk_rmsnorm_scale = qk_rmsnorm_scale
self.attend = Attend(causal = causal, use_flash_attn = use_flash_attn)
if qk_rmsnorm:
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
def forward(
self,
q, k, v,
mask = None,
rotary_pos_emb = None,
xpos_scale = None
):
scale = q.shape[-1] ** -0.5
if self.qk_rmsnorm:
q, k = map(l2norm, (q, k))
scale = self.qk_rmsnorm_scale
if self.qk_rmsnorm:
q = q * self.q_scale
k = k * self.k_scale
# rotary positional embedding with xpos for length extrapolation
if exists(rotary_pos_emb):
q = apply_rotary_pos_emb(q, rotary_pos_emb, xpos_scale)
k = apply_rotary_pos_emb(k, rotary_pos_emb, xpos_scale ** -1)
# attention
out = self.attend(q, k, v, mask = mask)
return out
class AttentionBlock(nn.Module):
def __init__(
self,
dim,
block_width,
dim_head = 64,
heads = 8,
qk_rmsnorm = False,
qk_rmsnorm_scale = 8,
use_flash_attn = False,
num_state_vectors = 0,
num_external_state_reads = 0,
state_read_before_write = True # this will be defaulted to on as in the paper, but will be turned off in the case the researcher wants to test out reading the state at a lower layer
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.norm = LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
self.block_width = block_width
self.is_recurrent_layer = num_state_vectors > 0
# decide how many states this attention layer is going to read from
num_state_reads = int(self.is_recurrent_layer and state_read_before_write) + num_external_state_reads
self.to_out = nn.Linear(inner_dim * (1 + num_state_reads), dim, bias = False)
if not self.is_recurrent_layer:
return
self.state_read_before_write = state_read_before_write
self.state_container = StateContainer(
dim,
dim_head = dim_head,
heads = heads,
num_state_vectors = num_state_vectors,
qk_rmsnorm = qk_rmsnorm,
qk_rmsnorm_scale = qk_rmsnorm_scale,
use_flash_attn = use_flash_attn
)
@property
def device(self):
return next(self.parameters()).device
def forward(
self,
x,
rotary_pos_emb = None,
xpos_scale = None,
attn_mask = None,
xl_memories: Optional[torch.Tensor] = None,
read_from_state_containers: List[StateContainer] = []
):
batch, seq_len, _, width, device = *x.shape, self.block_width, self.device
# pre normalization
x = self.norm(x)
# queries, keys, values and split out heads
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
split_head = partial(rearrange, pattern = 'b n (h d) -> b h n d', h = self.heads)
q = split_head(q)
# save the last key / values as memories for recurrence
memories = torch.stack((k, v))
mem_len = 0
if exists(xl_memories):
# if past memories are passed in, concat as the first bucket
mem_len = xl_memories.shape[-2]
past_k, past_v = xl_memories
k = torch.cat((past_k, k), dim = 1)
v = torch.cat((past_v, v), dim = 1)
# handle cropping of attention mask and positional embeddings
if exists(attn_mask):
attn_mask = attn_mask[:seq_len, :seq_len]
attn_mask = F.pad(attn_mask, (mem_len, 0), value = True)
# attention, but of course
out = self.attn(
q, k, v,
rotary_pos_emb = rotary_pos_emb,
xpos_scale = xpos_scale,
mask = attn_mask
)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# early return if not a recurrent layer
if not self.is_recurrent_layer and len(read_from_state_containers) == 0:
return self.to_out(out), memories, None
# whether to read from own state container, default to on, but may pass in more
if self.is_recurrent_layer and self.state_read_before_write:
read_from_state_containers = [self.state_container, *read_from_state_containers]
for read_state_container in read_from_state_containers:
# read from the states ...
to_state_out = read_state_container.read(x)
# and concat it to the output of self-attention
out = torch.cat((out, to_state_out), dim = -1)
new_states = None
if self.is_recurrent_layer:
# then write to the states as well if need be
new_states = self.state_container.write(memories = memories)
return self.to_out(out), memories, new_states
# classes
@beartype
class BlockRecurrentTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
dim_head = 64,
heads = 8,
all_layers_qk_rmsnorm = False,
ff_mult = 4,
max_seq_len = 1024,
block_width = 512,
recurrent_layers: Optional[Tuple[int, ...]] = None,
read_recurrent_layers: Optional[Tuple[int, ...]] = None,
num_state_vectors = None,
ignore_index = -100,
use_flash_attn = False,
use_compressed_mem = False,
compressed_mem_factor = 4
):
super().__init__()
num_state_vectors = default(num_state_vectors, block_width)
# set recurrent layers
recurrent_layers = default(recurrent_layers, (depth // 2,)) # default to one recurent layer at middle of the network
assert all([0 < layer <= depth for layer in recurrent_layers]), f'recurrent layers must range from 1 to the depth {depth}'
assert all_unique(recurrent_layers), 'recurrent layers must be all unique. no duplicate layers'
self.recurrent_layers = recurrent_layers
# set read recurrent layers
read_recurrent_layers = default(read_recurrent_layers, recurrent_layers)
assert all([read_layer <= write_layer for read_layer, write_layer in zip(read_recurrent_layers, recurrent_layers)]), 'the recurrent read layer must be always less than or equal to the write layer'
assert all([0 < layer <= depth for layer in read_recurrent_layers])
assert len(read_recurrent_layers) == len(recurrent_layers)
self.read_recurrent_layers = read_recurrent_layers
# token embedding
self.token_emb = nn.Embedding(num_tokens, dim)
self.rotary_pos_emb = RotaryEmbedding(dim = dim_head, width = (2 if not use_compressed_mem else 3) * block_width)
self.layers = nn.ModuleList([])
self.write_to_read_map = {write_layer: read_layer for write_layer, read_layer in zip(recurrent_layers, read_recurrent_layers)}
self.read_state_router = defaultdict(list)
for layer in range(1, depth + 1):
is_recurrent_layer = layer in self.recurrent_layers
layer_num_state_vectors = num_state_vectors if is_recurrent_layer else 0
num_external_state_reads = sum([int(layer == read_layer) for read_layer in read_recurrent_layers])
# only layers with xl memories
# or has recurrence in horizontal direction
# use qk rmsnorm (in paper, they use cosine sim attention, but i think qk rmsnorm is more proven given Vit 22B paper)
# one can also override to use all qk rmsnorm by setting all_layers_qk_rmsnorm = True
qk_rmsnorm = all_layers_qk_rmsnorm or is_recurrent_layer
attn_block = AttentionBlock(
dim,
block_width = block_width,
dim_head = dim_head,
heads = heads,
qk_rmsnorm = qk_rmsnorm,
num_state_vectors = layer_num_state_vectors,
use_flash_attn = use_flash_attn,
num_external_state_reads = num_external_state_reads,
state_read_before_write = False,
)
ff_block = FeedForward(dim, mult = ff_mult)
if is_recurrent_layer:
read_layer = self.write_to_read_map[layer]
self.read_state_router[read_layer].append(attn_block.state_container)
self.layers.append(nn.ModuleList([
attn_block,
ff_block
]))
# (compressed) memory management
self.mem_manager = MemoryManager(
dim = dim_head,
layers = depth,
mem_lengths = block_width if not use_compressed_mem else (block_width, block_width // 2),
compress_factors = 1 if not use_compressed_mem else (1, compressed_mem_factor)
)
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias = False)
)
self.max_seq_len = max_seq_len
self.block_width = block_width
assert divisible_by(max_seq_len, block_width)
self.ignore_index = ignore_index
self.register_buffer('cached_causal_attn_mask', None, persistent = False)
@property
def device(self):
return next(self.parameters()).device
def get_causal_attn_mask(self, width):
if exists(self.cached_causal_attn_mask):
cached_mask = self.cached_causal_attn_mask
cached_width = cached_mask.shape[-2]
padding = (width - cached_width) // 2
j_slice = Ellipsis if padding == 0 else slice(padding, -padding)
return cached_mask[:cached_width, j_slice]
device = self.device
causal_mask = torch.ones((width, width), device = device, dtype = torch.bool).triu(1)
return ~causal_mask
@torch.no_grad()
@eval_decorator
def generate(
self,
prime,
length = None,
xl_memories: List[torch.Tensor] = [],
states: List[torch.Tensor] = [],
temperature = 1.,
filter_thres = 0.9,
return_memories_and_states = False
):
length = default(length, self.max_seq_len + 1)
start_len = prime.shape[-1]
assert start_len < self.max_seq_len
assert length <= (self.max_seq_len + 1)
assert start_len < length
output = prime
memories = []
for ind in range(length - start_len):
logits, next_memories, next_states = self.forward(
output,
xl_memories = xl_memories,
states = states
)
logits = logits[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature)
sampled = rearrange(sampled, 'b -> b 1')
output = torch.cat((output, sampled), dim = -1)
if divisible_by(output.shape[-1] - 1, self.max_seq_len): # on the sampling of the last token in the current window, set new memories and states
memories = next_memories
states = next_states
output = output[:, start_len:]
if return_memories_and_states:
return output, memories, states
return output
def forward(
self,
x,
return_loss = False,
xl_memories: List[torch.Tensor] = [],
states: List[torch.Tensor] = [],
return_memories_and_states = None # can force to either return memory + state or not. by default will only return when number of tokens == max_seq_len
):
device = x.device
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
# get sequence length i and j for dynamic pos bias
assert x.shape[-1] <= self.max_seq_len
w = self.block_width
# token embedding
x = self.token_emb(x)
# dynamic pos bias
attn_mask = self.get_causal_attn_mask(w)
rotary_pos_emb, xpos_scale = self.rotary_pos_emb()
# only return memories and state if at the full block width, but can be overridden
return_memories_and_states = default(return_memories_and_states, self.max_seq_len == x.shape[-2])
# ready output tensor, to be concatted to block by block
batch, _, dim = x.shape
out = torch.empty(batch, 0, dim, dtype = x.dtype, device = self.device)
# split input into blocks of width w
input_blocks = x.split(w, dim = -2)
# process each block at a time
for input_block in input_blocks:
input_block_length = input_block.shape[-2]
# ready xl memories and states
iter_xl_memories = iter(xl_memories)
iter_states = iter(states)
next_xl_memories = []
next_states = []
# set the states on the appropriate state containers
for attn, _ in self.layers:
if not attn.is_recurrent_layer:
continue
attn.state_container.set_next_read_state(next(iter_states, None))
# go through layers
for ind, (attn, ff) in enumerate(self.layers):
# determine if the layer requires transformer xl memories
layer = ind + 1
# whether to pass in xl memories
attn_kwargs = dict(
rotary_pos_emb = rotary_pos_emb,
xpos_scale = xpos_scale,
attn_mask = attn_mask,
xl_memories = next(iter_xl_memories, None),
read_from_state_containers = self.read_state_router[layer]
)
# attention layer
residual = input_block
attn_branch_out, layer_xl_memories, layer_next_states = attn(input_block, **attn_kwargs)
if exists(layer_xl_memories):
next_xl_memories.append(layer_xl_memories)
if exists(layer_next_states):
next_states.append(layer_next_states)
input_block = attn_branch_out + residual
# feedforward layer
input_block = ff(input_block) + input_block
# concat to output
out = torch.cat((out, input_block), dim = -2)
# set new xl memories and states
states = next_states
if input_block_length == w:
xl_memories = self.mem_manager(xl_memories, next_xl_memories)
# project to logits
logits = self.to_logits(out)
# detach the states and memories
returned_next_states = list(map(torch.detach, states)) if return_memories_and_states else None
returned_next_xl_memories = list(map(torch.detach, xl_memories)) if return_memories_and_states else None
# whether to return logits
if not return_loss:
return logits, returned_next_xl_memories, returned_next_states
# cross entropy loss
logits = rearrange(logits, 'b n c -> b c n')
loss = F.cross_entropy(logits, labels, ignore_index = self.ignore_index)
return loss, returned_next_xl_memories, returned_next_states
# recurrent trainer wrapper
@beartype
class RecurrentTrainerWrapper(nn.Module):
def __init__(
self,
transformer: BlockRecurrentTransformer,
xl_memories_dropout = 0.,
state_dropout = 0.
):
super().__init__()
self.transformer = transformer
self.seq_len = transformer.max_seq_len
self.xl_memories_dropout = xl_memories_dropout
self.state_dropout = state_dropout
@eval_decorator
@torch.no_grad()
def generate(
self,
prime,
length,
**kwargs
):
seq_len = self.seq_len
start_len = prime.shape[-1]
assert start_len < length
output = prime
current_len = start_len
memories = []
states = []
# determine lengths
has_remainder = not divisible_by(length, seq_len)
remainder_amount = length % seq_len
total_segments = math.ceil(length / seq_len)
if not has_remainder:
lengths = (*((seq_len + 1,) * (total_segments - 1)), seq_len)
elif remainder_amount == 1:
lengths = (seq_len + 1,) * (total_segments - 1)
else:
lengths = (*((seq_len + 1,) * (total_segments - 1)), remainder_amount)
# loop through lengths
for next_length in lengths:
segment_output, memories, states = self.transformer.generate(
output[:, -current_len:],
length = next_length,
xl_memories = memories,
states = states,
return_memories_and_states = True,
**kwargs
)
output = torch.cat((output, segment_output), dim = -1)
current_len = 1
return output[:, start_len:]
def forward(
self,
x,
return_memories_and_states = False
):
total_seq_len, seq_len = x.shape[1], self.seq_len
assert divisible_by(total_seq_len - 1, seq_len), f'length of sequence ({total_seq_len}) must be equal to a multiple of {seq_len} + 1 (one extra token) during training'
segments = total_seq_len // seq_len
total_loss = 0.
memories = []
states = []
for ind in range(segments):
start = ind * seq_len
end = start + seq_len + 1
if self.training and random() < self.xl_memories_dropout:
memories.clear()
if self.training and random() < self.state_dropout:
states.clear()
loss, memories, states = self.transformer(
x[:, start:end],
xl_memories = memories,
states = states,
return_loss = True
)
total_loss = total_loss + (loss / segments)
if return_memories_and_states:
return total_loss, memories, states
return total_loss
|
from setuptools import setup, find_packages
setup(
name = 'adan-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.0',
license='MIT',
description = 'Adan - (ADAptive Nesterov momentum algorithm) Optimizer in Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/Adan-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'optimizer',
],
install_requires=[
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import math
import torch
from torch.optim import Optimizer
def exists(val):
return val is not None
class Adan(Optimizer):
def __init__(
self,
params,
lr = 1e-3,
betas = (0.02, 0.08, 0.01),
eps = 1e-8,
weight_decay = 0,
restart_cond: callable = None
):
assert len(betas) == 3
defaults = dict(
lr = lr,
betas = betas,
eps = eps,
weight_decay = weight_decay,
restart_cond = restart_cond
)
super().__init__(params, defaults)
def step(self, closure = None):
loss = None
if exists(closure):
loss = closure()
for group in self.param_groups:
lr = group['lr']
beta1, beta2, beta3 = group['betas']
weight_decay = group['weight_decay']
eps = group['eps']
restart_cond = group['restart_cond']
for p in group['params']:
if not exists(p.grad):
continue
data, grad = p.data, p.grad.data
assert not grad.is_sparse
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['prev_grad'] = torch.zeros_like(grad)
state['m'] = torch.zeros_like(grad)
state['v'] = torch.zeros_like(grad)
state['n'] = torch.zeros_like(grad)
step, m, v, n, prev_grad = state['step'], state['m'], state['v'], state['n'], state['prev_grad']
if step > 0:
prev_grad = state['prev_grad']
# main algorithm
m.mul_(1 - beta1).add_(grad, alpha = beta1)
grad_diff = grad - prev_grad
v.mul_(1 - beta2).add_(grad_diff, alpha = beta2)
next_n = (grad + (1 - beta2) * grad_diff) ** 2
n.mul_(1 - beta3).add_(next_n, alpha = beta3)
# bias correction terms
step += 1
correct_m, correct_v, correct_n = map(lambda n: 1 / (1 - (1 - n) ** step), (beta1, beta2, beta3))
# gradient step
def grad_step_(data, m, v, n):
weighted_step_size = lr / (n * correct_n).sqrt().add_(eps)
denom = 1 + weight_decay * lr
data.addcmul_(weighted_step_size, (m * correct_m + (1 - beta2) * v * correct_v), value = -1.).div_(denom)
grad_step_(data, m, v, n)
# restart condition
if exists(restart_cond) and restart_cond(state):
m.data.copy_(grad)
v.zero_()
n.data.copy_(grad ** 2)
grad_step_(data, m, v, n)
# set new incremented step
prev_grad.copy_(grad)
state['step'] = step
return loss
|
from adan_pytorch.adan import Adan
|
from setuptools import setup, find_packages
setup(
name = 'bidirectional-cross-attention',
packages = find_packages(exclude=[]),
version = '0.0.4',
license='MIT',
description = 'Bidirectional Cross Attention',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/bidirectional-cross-attention',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism'
],
install_requires=[
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
import torch
from torch import nn
from einops import rearrange
from torch import einsum
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def stable_softmax(t, dim = -1):
t = t - t.amax(dim = dim, keepdim = True)
return t.softmax(dim = dim)
# bidirectional cross attention - have two sequences attend to each other with 1 attention step
class BidirectionalCrossAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
context_dim = None,
dropout = 0.,
talking_heads = False,
prenorm = False,
):
super().__init__()
context_dim = default(context_dim, dim)
self.norm = nn.LayerNorm(dim) if prenorm else nn.Identity()
self.context_norm = nn.LayerNorm(context_dim) if prenorm else nn.Identity()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.dropout = nn.Dropout(dropout)
self.context_dropout = nn.Dropout(dropout)
self.to_qk = nn.Linear(dim, inner_dim, bias = False)
self.context_to_qk = nn.Linear(context_dim, inner_dim, bias = False)
self.to_v = nn.Linear(dim, inner_dim, bias = False)
self.context_to_v = nn.Linear(context_dim, inner_dim, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.context_to_out = nn.Linear(inner_dim, context_dim)
self.talking_heads = nn.Conv2d(heads, heads, 1, bias = False) if talking_heads else nn.Identity()
self.context_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) if talking_heads else nn.Identity()
def forward(
self,
x,
context,
mask = None,
context_mask = None,
return_attn = False,
rel_pos_bias = None
):
b, i, j, h, device = x.shape[0], x.shape[-2], context.shape[-2], self.heads, x.device
x = self.norm(x)
context = self.context_norm(context)
# get shared query/keys and values for sequence and context
qk, v = self.to_qk(x), self.to_v(x)
context_qk, context_v = self.context_to_qk(context), self.context_to_v(context)
# split out head
qk, context_qk, v, context_v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (qk, context_qk, v, context_v))
# get similarities
sim = einsum('b h i d, b h j d -> b h i j', qk, context_qk) * self.scale
# relative positional bias, if supplied
if exists(rel_pos_bias):
sim = sim + rel_pos_bias
# mask
if exists(mask) or exists(context_mask):
mask = default(mask, torch.ones((b, i), device = device, dtype = torch.bool))
context_mask = default(context_mask, torch.ones((b, j), device = device, dtype = torch.bool))
attn_mask = rearrange(mask, 'b i -> b 1 i 1') * rearrange(context_mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# get attention along both sequence length and context length dimensions
# shared similarity matrix
attn = stable_softmax(sim, dim = -1)
context_attn = stable_softmax(sim, dim = -2)
# dropouts
attn = self.dropout(attn)
context_attn = self.context_dropout(context_attn)
# talking heads
attn = self.talking_heads(attn)
context_attn = self.context_talking_heads(context_attn)
# src sequence aggregates values from context, context aggregates values from src sequence
out = einsum('b h i j, b h j d -> b h i d', attn, context_v)
context_out = einsum('b h j i, b h j d -> b h i d', context_attn, v)
# merge heads and combine out
out, context_out = map(lambda t: rearrange(t, 'b h n d -> b n (h d)'), (out, context_out))
out = self.to_out(out)
context_out = self.context_to_out(context_out)
if return_attn:
return out, context_out, attn, context_attn
return out, context_out
|
from bidirectional_cross_attention.bidirectional_cross_attention import BidirectionalCrossAttention
|
from setuptools import setup, find_packages
setup(
name = 'byol-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.6.0',
license='MIT',
description = 'Self-supervised contrastive learning made simple',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/byol-pytorch',
keywords = [
'self-supervised learning',
'artificial intelligence'
],
install_requires=[
'torch>=1.6',
'torchvision>=0.8'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
from byol_pytorch.byol_pytorch import BYOL
|
import copy
import random
from functools import wraps
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms as T
# helper functions
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# MLP class for projector and predictor
def MLP(dim, projection_size, hidden_size=4096):
return nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def SimSiamMLP(dim, projection_size, hidden_size=4096):
return nn.Sequential(
nn.Linear(dim, hidden_size, bias=False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, hidden_size, bias=False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size, bias=False),
nn.BatchNorm1d(projection_size, affine=False)
)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer = -2, use_simsiam_mlp = False):
super().__init__()
self.net = net
self.layer = layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.use_simsiam_mlp = use_simsiam_mlp
self.hidden = {}
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, input, output):
device = input[0].device
self.hidden[device] = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
create_mlp_fn = MLP if not self.use_simsiam_mlp else SimSiamMLP
projector = create_mlp_fn(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
self.hidden.clear()
_ = self.net(x)
hidden = self.hidden[x.device]
self.hidden.clear()
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x, return_projection = True):
representation = self.get_representation(x)
if not return_projection:
return representation
projector = self._get_projector(representation)
projection = projector(representation)
return projection, representation
# main class
class BYOL(nn.Module):
def __init__(
self,
net,
image_size,
hidden_layer = -2,
projection_size = 256,
projection_hidden_size = 4096,
augment_fn = None,
augment_fn2 = None,
moving_average_decay = 0.99,
use_momentum = True
):
super().__init__()
self.net = net
# default SimCLR augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
T.RandomResizedCrop((image_size, image_size)),
T.Normalize(
mean=torch.tensor([0.485, 0.456, 0.406]),
std=torch.tensor([0.229, 0.224, 0.225])),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer=hidden_layer, use_simsiam_mlp=not use_momentum)
self.use_momentum = use_momentum
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(projection_size, projection_size, projection_hidden_size)
# get device of network and make wrapper same device
device = get_module_device(net)
self.to(device)
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, image_size, image_size, device=device))
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.use_momentum, 'you do not need to update the moving average, since you have turned off momentum for the target encoder'
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater, self.target_encoder, self.online_encoder)
def forward(
self,
x,
return_embedding = False,
return_projection = True
):
assert not (self.training and x.shape[0] == 1), 'you must have greater than 1 sample when training, due to the batchnorm in the projection layer'
if return_embedding:
return self.online_encoder(x, return_projection = return_projection)
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one, _ = self.online_encoder(image_one)
online_proj_two, _ = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder() if self.use_momentum else self.online_encoder
target_proj_one, _ = target_encoder(image_one)
target_proj_two, _ = target_encoder(image_two)
target_proj_one.detach_()
target_proj_two.detach_()
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = loss_one + loss_two
return loss.mean()
|
import os
import argparse
import multiprocessing
from pathlib import Path
from PIL import Image
import torch
from torchvision import models, transforms
from torch.utils.data import DataLoader, Dataset
from byol_pytorch import BYOL
import pytorch_lightning as pl
# test model, a resnet 50
resnet = models.resnet50(pretrained=True)
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
parser.add_argument('--image_folder', type=str, required = True,
help='path to your folder of images for self-supervised learning')
args = parser.parse_args()
# constants
BATCH_SIZE = 32
EPOCHS = 1000
LR = 3e-4
NUM_GPUS = 2
IMAGE_SIZE = 256
IMAGE_EXTS = ['.jpg', '.png', '.jpeg']
NUM_WORKERS = multiprocessing.cpu_count()
# pytorch lightning module
class SelfSupervisedLearner(pl.LightningModule):
def __init__(self, net, **kwargs):
super().__init__()
self.learner = BYOL(net, **kwargs)
def forward(self, images):
return self.learner(images)
def training_step(self, images, _):
loss = self.forward(images)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=LR)
def on_before_zero_grad(self, _):
if self.learner.use_momentum:
self.learner.update_moving_average()
# images dataset
def expand_greyscale(t):
return t.expand(3, -1, -1)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.paths = []
for path in Path(f'{folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTS:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img)
# main
if __name__ == '__main__':
ds = ImagesDataset(args.image_folder, IMAGE_SIZE)
train_loader = DataLoader(ds, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True)
model = SelfSupervisedLearner(
resnet,
image_size = IMAGE_SIZE,
hidden_layer = 'avgpool',
projection_size = 256,
projection_hidden_size = 4096,
moving_average_decay = 0.99
)
trainer = pl.Trainer(
gpus = NUM_GPUS,
max_epochs = EPOCHS,
accumulate_grad_batches = 1,
sync_batchnorm = True
)
trainer.fit(model, train_loader)
|
from all_normalization_transformer import TransformerLM
from all_normalization_transformer.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 3e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = TransformerLM(
num_tokens = 256,
dim = 512,
depth = 12,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
only_norm = True,
shared_kv = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:SEQ_LEN]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def default(value, default):
return value if value is not None else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > 1.0 - thres
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('src_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, src_mask=input_mask, **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
input_mask = F.pad(input_mask, (1, 0), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, *args, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
m = kwargs.pop('input_mask', None)
xi, xo = x[:, :-1], x[:, 1:]
if m is not None:
assert m.shape == x.shape[0:2], 'input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle'
kwargs.update(input_mask = m[:, :-1])
out = self.net(xi, *args, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
|
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helpers
def cum_mean(t):
device = t.device
running_num = torch.arange(t.shape[-1], device=t.device) + 1
return t.cumsum(dim=-1) / running_num
def normalize(t, eps=1e-8):
t -= t.mean(dim=-1, keepdim=True)
s = (t ** 2).mean(dim=-1, keepdim=True)
return t * torch.rsqrt(s + eps)
def causal_normalize(t, eps=1e-8):
t -= cum_mean(t).diagonal(dim1=-2, dim2=-1)[..., None]
s = cum_mean(t ** 2).diagonal(dim1=-2, dim2=-1)[..., None]
return t * torch.rsqrt(s + eps)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class PostNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.fn(x)
return self.norm(x)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, causal = False, shared_kv = False):
super().__init__()
self.causal = causal
self.heads = heads
self.scale = dim ** -0.5
self.shared_kv = shared_kv
self.num_qkv = 3 if not shared_kv else 2
self.to_qkv = nn.Linear(dim, dim * self.num_qkv, bias = False)
self.to_out = nn.Linear(dim, dim)
self.norm_g = nn.Parameter(torch.ones(1, heads, 1, 1))
self.norm_b = nn.Parameter(torch.zeros(1, heads, 1, 1))
def forward(self, x):
b, n, _, h, device = *x.shape, self.heads, x.device
qkv = self.to_qkv(x)
qkv = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv = self.num_qkv, h = h)
if self.shared_kv:
q, k = qkv
v = k
else:
q, k, v = qkv
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
if self.causal:
mask = torch.ones(n, n, device = device).triu_(1).bool()
dots.masked_fill_(mask, 0.)
normalize_fn = causal_normalize if self.causal else normalize
normed_attn = normalize_fn(dots)
attn = normed_attn * self.norm_g + self.norm_b
if self.causal:
attn.masked_fill_(mask, 0.)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads = 8, causal = False, only_norm = False, shared_kv = False):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PostNorm(dim, Attention(dim, heads, causal = causal, shared_kv = shared_kv))),
Residual(PreNorm(dim, FeedForward(dim))) if not only_norm else nn.Identity(),
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
class TransformerLM(nn.Module):
def __init__(self, *, num_tokens, dim, depth, max_seq_len, heads = 8, causal = False, only_norm = False, shared_kv = False):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.transformer = Transformer(dim, depth, heads, causal = causal, only_norm = only_norm, shared_kv = shared_kv)
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x, **kwargs):
_, n = x.shape
x = self.token_emb(x)
x += self.pos_emb(torch.arange(n, device=x.device))
x = self.transformer(x)
x = self.to_logits(x)
return x
|
from all_normalization_transformer.all_normalization_transformer import TransformerLM
from all_normalization_transformer.autoregressive_wrapper import AutoregressiveWrapper
|
from setuptools import setup, find_packages
exec(open('audiolm_pytorch/version.py').read())
setup(
name = 'audiolm-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'AudioLM - Language Modeling Approach to Audio Generation from Google Research - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/audiolm-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'audio generation'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6.1',
'ema-pytorch>=0.2.2',
'encodec',
'fairseq',
'joblib',
'lion-pytorch',
'local-attention>=1.8.4',
'scikit-learn',
'sentencepiece',
'torch>=1.12',
'torchaudio',
'transformers',
'tqdm',
'vector-quantize-pytorch>=1.7.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
__version__ = '1.4.1'
|
import torch
import transformers
from transformers import T5Tokenizer, T5EncoderModel, T5Config
from beartype import beartype
from beartype.typing import Union, List
# less warning messages since only using encoder
transformers.logging.set_verbosity_error()
# helper functions
def exists(val):
return val is not None
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config = config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
raise ValueError(f'unknown t5 name {name}')
return config.d_model
# encoding text
@beartype
def t5_encode_text(
texts: Union[str, List[str]],
name = DEFAULT_T5_NAME,
output_device = None
):
if isinstance(texts, str):
texts = [texts]
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = 'pt',
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.inference_mode():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask[..., None].bool()
if not exists(output_device):
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
encoded_text.to(output_device)
attn_mask.to(output_device)
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
|
from pathlib import Path
import torch
from torch import nn, einsum
from torchaudio.functional import resample
from einops import rearrange, repeat, pack, unpack
from audiolm_pytorch.utils import curtail_to_multiple
# suppress a few warnings
def noop(*args, **kwargs):
pass
import warnings
import logging
logging.root.setLevel(logging.ERROR)
warnings.warn = noop
# import fairseq and joblib for hubert
import joblib
import fairseq
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class HubertWithKmeans(nn.Module):
"""
checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
or you can train your own
"""
def __init__(
self,
checkpoint_path,
kmeans_path,
target_sample_hz = 16000,
seq_len_multiple_of = None,
output_layer = 9
):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
self.output_layer = output_layer
model_path = Path(checkpoint_path)
kmeans_path = Path(kmeans_path)
assert model_path.exists(), f'path {checkpoint_path} does not exist'
assert kmeans_path.exists(), f'path {kmeans_path} does not exist'
checkpoint = torch.load(checkpoint_path)
load_model_input = {checkpoint_path: checkpoint}
model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
self.model = model[0]
self.model.eval()
kmeans = joblib.load(kmeans_path)
self.kmeans = kmeans
self.register_buffer(
'cluster_centers',
torch.from_numpy(kmeans.cluster_centers_)
)
@property
def groups(self):
return 1
@property
def codebook_size(self):
return self.kmeans.n_clusters
@property
def downsample_factor(self):
# todo: double check
return 320
@torch.inference_mode()
def forward(
self,
wav_input,
flatten = True,
input_sample_hz = None
):
batch, device = wav_input.shape[0], wav_input.device
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
embed = self.model(
wav_input,
features_only = True,
mask = False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
output_layer = self.output_layer
)['x']
batched_cluster_centers = repeat(self.cluster_centers, 'c d -> b c d', b = embed.shape[0])
dists = -torch.cdist(embed, batched_cluster_centers, p = 2)
clusters = dists.argmax(dim = -1)
if flatten:
return clusters
return rearrange(clusters, 'b ... -> b (...)')
|
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from audiolm_pytorch.audiolm_pytorch import AudioLM
from audiolm_pytorch.soundstream import SoundStream, AudioLMSoundStream, MusicLMSoundStream
from audiolm_pytorch.encodec import EncodecWrapper
from audiolm_pytorch.audiolm_pytorch import SemanticTransformer, CoarseTransformer, FineTransformer
from audiolm_pytorch.audiolm_pytorch import FineTransformerWrapper, CoarseTransformerWrapper, SemanticTransformerWrapper
from audiolm_pytorch.vq_wav2vec import FairseqVQWav2Vec
from audiolm_pytorch.hubert_kmeans import HubertWithKmeans
from audiolm_pytorch.trainer import SoundStreamTrainer, SemanticTransformerTrainer, FineTransformerTrainer, CoarseTransformerTrainer
from audiolm_pytorch.audiolm_pytorch import get_embeds
|
import functools
from itertools import cycle
from pathlib import Path
from functools import partial, wraps
from itertools import zip_longest
from typing import Optional
import torch
from torch import nn, einsum
from torch.autograd import grad as torch_grad
import torch.nn.functional as F
from torch.linalg import vector_norm
import torchaudio.transforms as T
from torchaudio.functional import resample
from einops import rearrange, reduce, pack, unpack
from vector_quantize_pytorch import GroupedResidualVQ
from local_attention import LocalMHA
from local_attention.transformer import FeedForward, DynamicPositionBias
from audiolm_pytorch.utils import curtail_to_multiple
from audiolm_pytorch.version import __version__
from packaging import version
parsed_version = version.parse(__version__)
import pickle
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(t, l = 1):
return ((t,) * l) if not isinstance(t, tuple) else t
def filter_by_keys(fn, d):
return {k: v for k, v in d.items() if fn(k)}
def map_keys(fn, d):
return {fn(k): v for k, v in d.items()}
# gan losses
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
def gradient_penalty(wave, output, weight = 10):
batch_size, device = wave.shape[0], wave.device
gradients = torch_grad(
outputs = output,
inputs = wave,
grad_outputs = torch.ones_like(output),
create_graph = True,
retain_graph = True,
only_inputs = True
)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((vector_norm(gradients, dim = 1) - 1) ** 2).mean()
# better sequential
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
# discriminators
class MultiScaleDiscriminator(nn.Module):
def __init__(
self,
channels = 16,
layers = 4,
groups = (4, 16, 64, 256),
chan_max = 1024,
input_channels = 1
):
super().__init__()
self.init_conv = nn.Conv1d(input_channels, channels, 15, padding = 7)
self.conv_layers = nn.ModuleList([])
curr_channels = channels
for _, group in zip(range(layers), groups):
chan_out = min(curr_channels * 4, chan_max)
self.conv_layers.append(nn.Sequential(
nn.Conv1d(curr_channels, chan_out, 41, stride = 4, padding = 20, groups = group),
leaky_relu()
))
curr_channels = chan_out
self.final_conv = nn.Sequential(
nn.Conv1d(curr_channels, curr_channels, 5, padding = 2),
leaky_relu(),
nn.Conv1d(curr_channels, 1, 3, padding = 1),
)
def forward(
self,
x,
return_intermediates = False
):
x = self.init_conv(x)
intermediates = []
for layer in self.conv_layers:
x = layer(x)
intermediates.append(x)
out = self.final_conv(x)
if not return_intermediates:
return out
return out, intermediates
# autoregressive squeeze excitation
# https://arxiv.org/abs/1709.01507
class SqueezeExcite(nn.Module):
def __init__(self, dim, reduction_factor = 4, dim_minimum = 8):
super().__init__()
dim_inner = max(dim_minimum, dim // reduction_factor)
self.net = nn.Sequential(
nn.Conv1d(dim, dim_inner, 1),
nn.SiLU(),
nn.Conv1d(dim_inner, dim, 1),
nn.Sigmoid()
)
def forward(self, x):
seq, device = x.shape[-2], x.device
# cumulative mean - since it is autoregressive
cum_sum = x.cumsum(dim = -2)
denom = torch.arange(1, seq + 1, device = device).float()
cum_mean = cum_sum / rearrange(denom, 'n -> n 1')
# glu gate
gate = self.net(cum_mean)
return x * gate
# complex stft discriminator
class ModReLU(nn.Module):
"""
https://arxiv.org/abs/1705.09792
https://github.com/pytorch/pytorch/issues/47052#issuecomment-718948801
"""
def __init__(self):
super().__init__()
self.b = nn.Parameter(torch.tensor(0.))
def forward(self, x):
return F.relu(torch.abs(x) + self.b) * torch.exp(1.j * torch.angle(x))
class ComplexConv2d(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel_size,
stride = 1,
padding = 0
):
super().__init__()
conv = nn.Conv2d(dim, dim_out, kernel_size, dtype = torch.complex64)
self.weight = nn.Parameter(torch.view_as_real(conv.weight))
self.bias = nn.Parameter(torch.view_as_real(conv.bias))
self.stride = stride
self.padding = padding
def forward(self, x):
weight, bias = map(torch.view_as_complex, (self.weight, self.bias))
x = x.to(weight.dtype)
return F.conv2d(x, weight, bias, stride = self.stride, padding = self.padding)
def ComplexSTFTResidualUnit(chan_in, chan_out, strides):
kernel_sizes = tuple(map(lambda t: t + 2, strides))
paddings = tuple(map(lambda t: t // 2, kernel_sizes))
return nn.Sequential(
Residual(Sequential(
ComplexConv2d(chan_in, chan_in, 3, padding = 1),
ModReLU(),
ComplexConv2d(chan_in, chan_in, 3, padding = 1)
)),
ComplexConv2d(chan_in, chan_out, kernel_sizes, stride = strides, padding = paddings)
)
class ComplexSTFTDiscriminator(nn.Module):
def __init__(
self,
*,
channels = 32,
strides = ((1, 2), (2, 2), (1, 2), (2, 2), (1, 2), (2, 2)),
chan_mults = (1, 2, 4, 4, 8, 8),
input_channels = 1,
n_fft = 1024,
hop_length = 256,
win_length = 1024,
stft_normalized = False,
logits_abs = True
):
super().__init__()
self.init_conv = ComplexConv2d(input_channels, channels, 7, padding = 3)
layer_channels = tuple(map(lambda mult: mult * channels, chan_mults))
layer_channels = (channels, *layer_channels)
layer_channels_pairs = tuple(zip(layer_channels[:-1], layer_channels[1:]))
curr_channels = channels
self.layers = nn.ModuleList([])
for layer_stride, (chan_in, chan_out) in zip(strides, layer_channels_pairs):
self.layers.append(ComplexSTFTResidualUnit(chan_in, chan_out, layer_stride))
self.final_conv = ComplexConv2d(layer_channels[-1], 1, (16, 1)) # todo: remove hardcoded 16
# stft settings
self.stft_normalized = stft_normalized
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
# how to output the logits into real space
self.logits_abs = logits_abs
def forward(self, x, return_intermediates = False):
x = rearrange(x, 'b 1 n -> b n')
'''
reference: The content of the paper( https://arxiv.org/pdf/2107.03312.pdf)is as follows:
The STFT-based discriminator is illustrated in Figure 4
and operates on a single scale, computing the STFT with a
window length of W = 1024 samples and a hop length of
H = 256 samples
'''
x = torch.stft(
x,
self.n_fft,
hop_length = self.hop_length,
win_length = self.win_length,
normalized = self.stft_normalized,
return_complex = True
)
x = rearrange(x, 'b ... -> b 1 ...')
intermediates = []
x = self.init_conv(x)
intermediates.append(x)
for layer in self.layers:
x = layer(x)
intermediates.append(x)
complex_logits = self.final_conv(x)
if self.logits_abs:
complex_logits = complex_logits.abs()
else:
complex_logits = torch.view_as_real(complex_logits)
if not return_intermediates:
return complex_logits
return complex_logits, intermediates
# sound stream
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class CausalConv1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, pad_mode = 'reflect', **kwargs):
super().__init__()
kernel_size = kernel_size
dilation = kwargs.get('dilation', 1)
stride = kwargs.get('stride', 1)
self.pad_mode = pad_mode
self.causal_padding = dilation * (kernel_size - 1) + (1 - stride)
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, **kwargs)
def forward(self, x):
x = F.pad(x, (self.causal_padding, 0), mode = self.pad_mode)
return self.conv(x)
class CausalConvTranspose1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, stride, **kwargs):
super().__init__()
self.upsample_factor = stride
self.padding = kernel_size - 1
self.conv = nn.ConvTranspose1d(chan_in, chan_out, kernel_size, stride, **kwargs)
def forward(self, x):
n = x.shape[-1]
out = self.conv(x)
out = out[..., :(n * self.upsample_factor)]
return out
def ResidualUnit(chan_in, chan_out, dilation, kernel_size = 7, squeeze_excite = False, pad_mode = 'reflect'):
return Residual(Sequential(
CausalConv1d(chan_in, chan_out, kernel_size, dilation = dilation, pad_mode = pad_mode),
nn.ELU(),
CausalConv1d(chan_out, chan_out, 1, pad_mode = pad_mode),
nn.ELU(),
SqueezeExcite(chan_out) if squeeze_excite else None
))
def EncoderBlock(chan_in, chan_out, stride, cycle_dilations = (1, 3, 9), squeeze_excite = False, pad_mode = 'reflect'):
it = cycle(cycle_dilations)
residual_unit = partial(ResidualUnit, squeeze_excite = squeeze_excite, pad_mode = pad_mode)
return nn.Sequential(
residual_unit(chan_in, chan_in, next(it)),
residual_unit(chan_in, chan_in, next(it)),
residual_unit(chan_in, chan_in, next(it)),
CausalConv1d(chan_in, chan_out, 2 * stride, stride = stride)
)
def DecoderBlock(chan_in, chan_out, stride, cycle_dilations = (1, 3, 9), squeeze_excite = False, pad_mode = 'reflect'):
even_stride = (stride % 2 == 0)
padding = (stride + (0 if even_stride else 1)) // 2
output_padding = 0 if even_stride else 1
residual_unit = partial(ResidualUnit, squeeze_excite = squeeze_excite, pad_mode = pad_mode)
it = cycle(cycle_dilations)
return nn.Sequential(
CausalConvTranspose1d(chan_in, chan_out, 2 * stride, stride = stride),
residual_unit(chan_out, chan_out, next(it)),
residual_unit(chan_out, chan_out, next(it)),
residual_unit(chan_out, chan_out, next(it)),
)
class LocalTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
window_size,
dynamic_pos_bias = False,
**kwargs
):
super().__init__()
self.window_size = window_size
self.layers = nn.ModuleList([])
self.pos_bias = None
if dynamic_pos_bias:
self.pos_bias = DynamicPositionBias(dim = dim // 2, heads = heads)
for _ in range(depth):
self.layers.append(nn.ModuleList([
LocalMHA(dim = dim, heads = heads, qk_rmsnorm = True, window_size = window_size, use_rotary_pos_emb = not dynamic_pos_bias, use_xpos = True, **kwargs),
FeedForward(dim = dim)
]))
def forward(self, x):
w = self.window_size
attn_bias = self.pos_bias(w, w * 2) if exists(self.pos_bias) else None
for attn, ff in self.layers:
x = attn(x, attn_bias = attn_bias) + x
x = ff(x) + x
return x
class FiLM(nn.Module):
def __init__(self, dim, dim_cond):
super().__init__()
self.to_cond = nn.Linear(dim_cond, dim * 2)
def forward(self, x, cond):
gamma, beta = self.to_cond(cond).chunk(2, dim = -1)
return x * gamma + beta
class SoundStream(nn.Module):
def __init__(
self,
*,
channels = 32,
strides = (2, 4, 5, 8),
channel_mults = (2, 4, 8, 16),
codebook_dim = 512,
codebook_size = 1024,
rq_num_quantizers = 8,
rq_commitment_weight = 1.,
rq_ema_decay = 0.95,
rq_quantize_dropout_multiple_of = 1,
rq_groups = 1,
rq_stochastic_sample_codes = False,
rq_kwargs: dict = {},
input_channels = 1,
discr_multi_scales = (1, 0.5, 0.25),
stft_normalized = False,
enc_cycle_dilations = (1, 3, 9),
dec_cycle_dilations = (1, 3, 9),
multi_spectral_window_powers_of_two = tuple(range(6, 12)),
multi_spectral_n_ffts = 512,
multi_spectral_n_mels = 64,
recon_loss_weight = 1.,
multi_spectral_recon_loss_weight = 1e-5,
adversarial_loss_weight = 1.,
feature_loss_weight = 100,
quantize_dropout_cutoff_index = 1,
target_sample_hz = 16000,
use_local_attn = True,
attn_window_size = 128,
attn_dim_head = 64,
attn_heads = 8,
attn_depth = 1,
attn_xpos_scale_base = None,
attn_dynamic_pos_bias = False,
squeeze_excite = False,
complex_stft_discr_logits_abs = True,
pad_mode = 'reflect',
stft_discriminator: Optional[nn.Module] = None # can pass in own stft discriminator
):
super().__init__()
# for autosaving the config
_locals = locals()
_locals.pop('self', None)
_locals.pop('__class__', None)
self._configs = pickle.dumps(_locals)
# rest of the class
self.target_sample_hz = target_sample_hz # for resampling on the fly
self.single_channel = input_channels == 1
self.strides = strides
layer_channels = tuple(map(lambda t: t * channels, channel_mults))
layer_channels = (channels, *layer_channels)
chan_in_out_pairs = tuple(zip(layer_channels[:-1], layer_channels[1:]))
encoder_blocks = []
for ((chan_in, chan_out), layer_stride) in zip(chan_in_out_pairs, strides):
encoder_blocks.append(EncoderBlock(chan_in, chan_out, layer_stride, enc_cycle_dilations, squeeze_excite, pad_mode))
self.encoder = nn.Sequential(
CausalConv1d(input_channels, channels, 7, pad_mode = pad_mode),
*encoder_blocks,
CausalConv1d(layer_channels[-1], codebook_dim, 3, pad_mode = pad_mode)
)
attn_kwargs = dict(
dim = codebook_dim,
dim_head = attn_dim_head,
heads = attn_heads,
depth = attn_depth,
window_size = attn_window_size,
xpos_scale_base = attn_xpos_scale_base,
dynamic_pos_bias = attn_dynamic_pos_bias,
prenorm = True,
causal = True
)
self.encoder_attn = LocalTransformer(**attn_kwargs) if use_local_attn else None
self.encoder_film = FiLM(codebook_dim, dim_cond = 2)
self.num_quantizers = rq_num_quantizers
self.codebook_dim = codebook_dim
self.codebook_size = codebook_size
self.rq_groups = rq_groups
self.rq = GroupedResidualVQ(
dim = codebook_dim,
num_quantizers = rq_num_quantizers,
codebook_size = codebook_size,
groups = rq_groups,
decay = rq_ema_decay,
commitment_weight = rq_commitment_weight,
quantize_dropout_multiple_of = rq_quantize_dropout_multiple_of,
kmeans_init = True,
threshold_ema_dead_code = 2,
quantize_dropout = True,
quantize_dropout_cutoff_index = quantize_dropout_cutoff_index,
stochastic_sample_codes = rq_stochastic_sample_codes,
**rq_kwargs
)
self.decoder_film = FiLM(codebook_dim, dim_cond = 2)
self.decoder_attn = LocalTransformer(**attn_kwargs) if use_local_attn else None
decoder_blocks = []
for ((chan_in, chan_out), layer_stride) in zip(reversed(chan_in_out_pairs), reversed(strides)):
decoder_blocks.append(DecoderBlock(chan_out, chan_in, layer_stride, dec_cycle_dilations, squeeze_excite, pad_mode))
self.decoder = nn.Sequential(
CausalConv1d(codebook_dim, layer_channels[-1], 7, pad_mode = pad_mode),
*decoder_blocks,
CausalConv1d(channels, input_channels, 7, pad_mode = pad_mode)
)
# discriminators
self.discr_multi_scales = discr_multi_scales
self.discriminators = nn.ModuleList([MultiScaleDiscriminator() for _ in range(len(discr_multi_scales))])
discr_rel_factors = [int(s1 / s2) for s1, s2 in zip(discr_multi_scales[:-1], discr_multi_scales[1:])]
self.downsamples = nn.ModuleList([nn.Identity()] + [nn.AvgPool1d(2 * factor, stride = factor, padding = factor) for factor in discr_rel_factors])
self.stft_discriminator = stft_discriminator
if not exists(self.stft_discriminator):
self.stft_discriminator = ComplexSTFTDiscriminator(
stft_normalized = stft_normalized,
logits_abs = complex_stft_discr_logits_abs # whether to output as abs() or use view_as_real
)
# multi spectral reconstruction
self.mel_spec_transforms = nn.ModuleList([])
self.mel_spec_recon_alphas = []
num_transforms = len(multi_spectral_window_powers_of_two)
multi_spectral_n_ffts = cast_tuple(multi_spectral_n_ffts, num_transforms)
multi_spectral_n_mels = cast_tuple(multi_spectral_n_mels, num_transforms)
for powers, n_fft, n_mels in zip_longest(multi_spectral_window_powers_of_two, multi_spectral_n_ffts, multi_spectral_n_mels):
win_length = 2 ** powers
alpha = (win_length / 2) ** 0.5
calculated_n_fft = default(max(n_fft, win_length), win_length) # @AndreyBocharnikov said this is usually win length, but overridable
# if any audio experts have an opinion about these settings, please submit a PR
melspec_transform = T.MelSpectrogram(
sample_rate = target_sample_hz,
n_fft = calculated_n_fft,
win_length = win_length,
hop_length = win_length // 4,
n_mels = n_mels,
normalized = stft_normalized
)
self.mel_spec_transforms.append(melspec_transform)
self.mel_spec_recon_alphas.append(alpha)
# loss weights
self.recon_loss_weight = recon_loss_weight
self.multi_spectral_recon_loss_weight = multi_spectral_recon_loss_weight
self.adversarial_loss_weight = adversarial_loss_weight
self.feature_loss_weight = feature_loss_weight
self.register_buffer('zero', torch.tensor([0.]), persistent = False)
@property
def device(self):
return next(self.parameters()).device
@property
def configs(self):
return pickle.loads(self._configs)
def decode_from_codebook_indices(self, quantized_indices):
quantized_indices = rearrange(quantized_indices, 'b n (g q) -> g b n q', g = self.rq_groups)
codes = self.rq.get_codes_from_indices(quantized_indices)
x = reduce(codes, 'g q b n d -> b n (g d)', 'sum')
return self.decode(x)
def decode(self, x, quantize = False):
if quantize:
x, *_ = self.rq(x)
x = self.decoder_attn(x)
x = rearrange(x, 'b n c -> b c n')
return self.decoder(x)
def save(self, path):
path = Path(path)
pkg = dict(
model = self.state_dict(),
config = self._configs,
version = __version__
)
torch.save(pkg, str(path))
@classmethod
def init_and_load_from(cls, path, strict = True):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
assert 'config' in pkg, 'model configs were not found in this saved checkpoint'
config = pickle.loads(pkg['config'])
soundstream = cls(**config)
soundstream.load(path, strict = strict)
return soundstream
def load(self, path, strict = True):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
# check version
if 'version' in pkg and version.parse(pkg['version']) < parsed_version:
print(f'soundstream model being loaded was trained on an older version of audiolm-pytorch ({pkg["version"]})')
has_ema = 'ema_model' in pkg
model_pkg = pkg['ema_model'] if has_ema else pkg['model']
if has_ema:
model_pkg = filter_by_keys(lambda k: k.startswith('ema_model.'), model_pkg)
model_pkg = map_keys(lambda k: k[len('ema_model.'):], model_pkg)
self.load_state_dict(model_pkg, strict = strict)
def load_from_trainer_saved_obj(self, path):
path = Path(path)
assert path.exists()
obj = torch.load(str(path))
self.load_state_dict(obj['model'])
def non_discr_parameters(self):
return [
*self.encoder.parameters(),
*self.decoder.parameters(),
*(self.encoder_attn.parameters() if exists(self.encoder_attn) else []),
*(self.decoder_attn.parameters() if exists(self.decoder_attn) else []),
*self.encoder_film.parameters(),
*self.decoder_film.parameters()
]
@property
def seq_len_multiple_of(self):
return functools.reduce(lambda x, y: x * y, self.strides)
@property
def downsample_factor(self):
return self.seq_len_multiple_of
def process_input(
self,
x,
input_sample_hz = None,
curtail_from_left = False
):
x, ps = pack([x], '* n')
if exists(input_sample_hz):
x = resample(x, input_sample_hz, self.target_sample_hz)
x = curtail_to_multiple(x, self.seq_len_multiple_of, from_left = curtail_from_left)
if x.ndim == 2:
x = rearrange(x, 'b n -> b 1 n')
return x, ps
def forward(
self,
x,
target = None,
is_denoising = None, # if you want to learn film conditioners that teach the soundstream to denoise - target would need to be passed in above
return_encoded = False,
return_discr_loss = False,
return_discr_losses_separately = False,
return_loss_breakdown = False,
return_recons_only = False,
input_sample_hz = None,
apply_grad_penalty = False,
curtail_from_left = False
):
assert not (exists(is_denoising) and not exists(target))
process_input = partial(self.process_input, input_sample_hz = input_sample_hz, curtail_from_left = curtail_from_left)
x, ps = process_input(x)
if exists(target):
target, _ = process_input(target)
orig_x = x.clone()
x = self.encoder(x)
x = rearrange(x, 'b c n -> b n c')
if exists(self.encoder_attn):
x = self.encoder_attn(x)
if exists(is_denoising):
denoise_input = torch.tensor([is_denoising, not is_denoising], dtype = x.dtype, device = self.device) # [1, 0] for denoise, [0, 1] for not denoising
x = self.encoder_film(x, denoise_input)
x, indices, commit_loss = self.rq(x)
if return_encoded:
indices = rearrange(indices, 'g b n q -> b n (g q)')
return x, indices, commit_loss
if exists(is_denoising):
x = self.decoder_film(x, denoise_input)
if exists(self.decoder_attn):
x = self.decoder_attn(x)
x = rearrange(x, 'b n c -> b c n')
recon_x = self.decoder(x)
if return_recons_only:
recon_x, = unpack(recon_x, ps, '* c n')
return recon_x
# multi-scale discriminator loss
if return_discr_loss:
real, fake = orig_x, recon_x.detach()
stft_discr_loss = None
stft_grad_penalty = None
discr_losses = []
discr_grad_penalties = []
if self.single_channel:
real, fake = orig_x.clone(), recon_x.detach()
stft_real_logits, stft_fake_logits = map(self.stft_discriminator, (real.requires_grad_(), fake))
stft_discr_loss = hinge_discr_loss(stft_fake_logits, stft_real_logits)
if apply_grad_penalty:
stft_grad_penalty = gradient_penalty(real, stft_discr_loss)
scaled_real, scaled_fake = real, fake
for discr, downsample in zip(self.discriminators, self.downsamples):
scaled_real, scaled_fake = map(downsample, (scaled_real, scaled_fake))
real_logits, fake_logits = map(discr, (scaled_real.requires_grad_(), scaled_fake))
one_discr_loss = hinge_discr_loss(fake_logits, real_logits)
discr_losses.append(one_discr_loss)
if apply_grad_penalty:
discr_grad_penalties.append(gradient_penalty(scaled_real, one_discr_loss))
if not return_discr_losses_separately:
all_discr_losses = torch.stack(discr_losses).mean()
if exists(stft_discr_loss):
all_discr_losses = all_discr_losses + stft_discr_loss
if exists(stft_grad_penalty):
all_discr_losses = all_discr_losses + stft_grad_penalty
return all_discr_losses
# return a list of discriminator losses with List[Tuple[str, Tensor]]
discr_losses_pkg = []
discr_losses_pkg.extend([(f'scale:{scale}', multi_scale_loss) for scale, multi_scale_loss in zip(self.discr_multi_scales, discr_losses)])
discr_losses_pkg.extend([(f'scale_grad_penalty:{scale}', discr_grad_penalty) for scale, discr_grad_penalty in zip(self.discr_multi_scales, discr_grad_penalties)])
if exists(stft_discr_loss):
discr_losses_pkg.append(('stft', stft_discr_loss))
if exists(stft_grad_penalty):
discr_losses_pkg.append(('stft_grad_penalty', stft_grad_penalty))
return discr_losses_pkg
# recon loss
target = default(target, orig_x) # target can also be passed in, in the case of denoising
recon_loss = F.mse_loss(target, recon_x)
# multispectral recon loss - eq (4) and (5) in https://arxiv.org/abs/2107.03312
multi_spectral_recon_loss = self.zero
if self.multi_spectral_recon_loss_weight > 0:
for mel_transform, alpha in zip(self.mel_spec_transforms, self.mel_spec_recon_alphas):
orig_mel, recon_mel = map(mel_transform, (orig_x, recon_x))
log_orig_mel, log_recon_mel = map(log, (orig_mel, recon_mel))
l1_mel_loss = (orig_mel - recon_mel).abs().sum(dim = -2).mean()
l2_log_mel_loss = alpha * vector_norm(log_orig_mel - log_recon_mel, dim = -2).mean()
multi_spectral_recon_loss = multi_spectral_recon_loss + l1_mel_loss + l2_log_mel_loss
# adversarial loss
adversarial_losses = []
discr_intermediates = []
# adversarial loss for multi-scale discriminators
real, fake = orig_x, recon_x
# features from stft
(stft_real_logits, stft_real_intermediates), (stft_fake_logits, stft_fake_intermediates) = map(partial(self.stft_discriminator, return_intermediates=True), (real, fake))
discr_intermediates.append((stft_real_intermediates, stft_fake_intermediates))
scaled_real, scaled_fake = real, fake
for discr, downsample in zip(self.discriminators, self.downsamples):
scaled_real, scaled_fake = map(downsample, (scaled_real, scaled_fake))
(real_logits, real_intermediates), (fake_logits, fake_intermediates) = map(partial(discr, return_intermediates = True), (scaled_real, scaled_fake))
discr_intermediates.append((real_intermediates, fake_intermediates))
one_adversarial_loss = hinge_gen_loss(fake_logits)
adversarial_losses.append(one_adversarial_loss)
feature_losses = []
for real_intermediates, fake_intermediates in discr_intermediates:
losses = [F.l1_loss(real_intermediate, fake_intermediate) for real_intermediate, fake_intermediate in zip(real_intermediates, fake_intermediates)]
feature_losses.extend(losses)
feature_loss = torch.stack(feature_losses).mean()
# adversarial loss for stft discriminator
adversarial_losses.append(hinge_gen_loss(stft_fake_logits))
adversarial_loss = torch.stack(adversarial_losses).mean()
# sum commitment loss
all_commitment_loss = commit_loss.sum()
total_loss = recon_loss * self.recon_loss_weight + multi_spectral_recon_loss * self.multi_spectral_recon_loss_weight + adversarial_loss * self.adversarial_loss_weight + feature_loss * self.feature_loss_weight + all_commitment_loss
if return_loss_breakdown:
return total_loss, (recon_loss, multi_spectral_recon_loss, adversarial_loss, feature_loss, all_commitment_loss)
return total_loss
# some default soundstreams
def AudioLMSoundStream(
strides = (2, 4, 5, 8),
target_sample_hz = 16000,
rq_num_quantizers = 12,
**kwargs
):
return SoundStream(
strides = strides,
target_sample_hz = target_sample_hz,
rq_num_quantizers = rq_num_quantizers,
**kwargs
)
def MusicLMSoundStream(
strides = (3, 4, 5, 8),
target_sample_hz = 24000,
rq_num_quantizers = 12,
**kwargs
):
return SoundStream(
strides = strides,
target_sample_hz = target_sample_hz,
rq_num_quantizers = rq_num_quantizers,
**kwargs
)
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('Config', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
causal = self.causal
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
if causal:
causal_mask = torch.ones((q_len, k_len), device = q.device, dtype = torch.bool).triu(k_len - q_len + 1)
mask = mask & ~causal_mask
causal = False
config = self.cuda_config if is_cuda else self.cpu_config
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out
def forward(self, q, k, v, mask = None, attn_bias = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
assert not exists(attn_bias), 'attention bias not supported for flash attention'
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k) * scale
# attention bias
if exists(attn_bias):
sim = sim + attn_bias
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = sim.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
return out
|
from torch import nn
# functions
def round_down_nearest_multiple(num, divisor):
return num // divisor * divisor
def curtail_to_multiple(t, mult, from_left = False):
data_len = t.shape[-1]
rounded_seq_len = round_down_nearest_multiple(data_len, mult)
seq_slice = slice(None, rounded_seq_len) if not from_left else slice(-rounded_seq_len, None)
return t[..., seq_slice]
# base class
class AudioConditionerBase(nn.Module):
pass
|
from pathlib import Path
import torch
from torch import nn
from einops import rearrange
import fairseq
from torchaudio.functional import resample
from audiolm_pytorch.utils import curtail_to_multiple
import logging
logging.root.setLevel(logging.ERROR)
def exists(val):
return val is not None
class FairseqVQWav2Vec(nn.Module):
"""
checkpoint path can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/wav2vec/README.md#vq-wav2vec
specifically download the kmeans model for now
$ wget https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt
"""
def __init__(
self,
checkpoint_path,
target_sample_hz = 24000,
seq_len_multiple_of = None
):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
path = Path(checkpoint_path)
assert path.exists(), f'path {checkpoint_path} does not exist'
checkpoint = torch.load(checkpoint_path)
load_model_input = {checkpoint_path: checkpoint}
model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
self.model = model[0]
self.model.eval()
assert hasattr(self.model, 'vector_quantizer') and hasattr(self.model.vector_quantizer, 'embedding'), 'the vq wav2vec model does not seem to be valid'
@property
def groups(self):
return self.model.vector_quantizer.groups
@property
def downsample_factor(self):
# todo: double check architecture
return 80
@property
def codebook_size(self):
return self.model.vector_quantizer.embedding.shape[0]
@torch.inference_mode()
def forward(
self,
wav_input,
flatten = True,
input_sample_hz = None
):
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
embed = self.model.feature_extractor(wav_input)
_, codebook_indices = self.model.vector_quantizer.forward_idx(embed)
if not flatten:
return codebook_indices
return rearrange(codebook_indices, 'b ... -> b (...)')
|
from lion_pytorch import Lion
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
use_lion = False,
**kwargs
):
has_wd = wd > 0
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and has_wd:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if use_lion:
return Lion(params, lr = lr, betas = betas, weight_decay = wd)
if not has_wd:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
|
import math
from functools import partial, wraps
from beartype.typing import Optional, Union, List
from beartype import beartype
import torch
from torch import nn, einsum, Tensor
from torch.autograd import grad as torch_grad
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
import torchaudio
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from audiolm_pytorch.vq_wav2vec import FairseqVQWav2Vec
from audiolm_pytorch.hubert_kmeans import HubertWithKmeans
from audiolm_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
from torchaudio.functional import resample
from audiolm_pytorch.soundstream import SoundStream
from audiolm_pytorch.encodec import EncodecWrapper
from audiolm_pytorch.utils import AudioConditionerBase
from audiolm_pytorch.attend import Attend
from tqdm import tqdm
from pathlib import Path
from audiolm_pytorch.version import __version__
from packaging import version
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def maybe(fn):
if not exists(fn):
return always(None)
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
def ceil_div(numer, denom):
return (numer + denom - 1) // denom
def remainder_needed_until_multiple(n, mult):
return (ceil_div(n, mult) * mult) - n
def round_down_nearest_multiple(val, mult):
return (val // mult) * mult
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# tensor helpers
def generate_mask_with_prob(shape, mask_prob, device):
seq = shape[-1]
rand = torch.randn(shape, device = device)
rand[:, 0] = -torch.finfo(rand.dtype).max
num_mask = min(int(seq * mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros(shape, device = device).scatter(1, indices, 1.).bool()
return mask
# attention related utils
def grad_shrink(t, alpha = 0.1):
return t * alpha + t.detach() * (1 - alpha)
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t + eps)
def l2norm(t):
return F.normalize(t, dim = -1)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def mask_out_after_eos_id(t, eos_id, mask_value = -1, keep_eos = True):
eos_mask = (t == eos_id).float()
if keep_eos:
eos_mask = F.pad(eos_mask, (1, -1))
after_eos_mask = eos_mask.cumsum(dim = -1) > 0
return t.masked_fill(after_eos_mask, mask_value)
def all_rows_have_eos_id(t, eos_id):
eos_mask = (t == eos_id)
return torch.any(eos_mask, dim = -1).all()
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# removing unique consecutives in the semantic token ids
# important detail noted by @eonglints
def append_eos_id(ids, eos_id):
b, device = ids.shape[0], ids.device
eos_ids = torch.ones(1, device = device).long() * eos_id
eos_ids = repeat(eos_ids, '1 -> b 1', b = b)
ids = torch.cat((ids, eos_ids), dim = -1)
return ids
def batch_unique_consecutive(t, pad_value = 0.):
unique_arr = [torch.unique_consecutive(el) for el in t.unbind(dim = 0)]
return pad_sequence(unique_arr, batch_first = True, padding_value = pad_value)
# function for getting embeds from nn.Embedding but with padding as some designated value (-1) outside the range of the embed table
@beartype
def get_embeds(
embeddings: nn.Embedding,
codes: torch.Tensor,
pad_id = -1,
return_mask = False,
mask_pad_pos_to = 0
):
pad_mask = codes == pad_id
codes_without_pad = codes.masked_fill(pad_mask, 0) # just retrieve first code as dummy
embeds = embeddings(codes_without_pad)
if exists(mask_pad_pos_to):
embeds = embeds.masked_fill(rearrange(pad_mask, '... -> ... 1'), mask_pad_pos_to)
if return_mask:
return embeds, ~pad_mask
return embeds
# bias-less layernorm, being used in more recent T5s, PaLM, also in @borisdayma 's experiments shared with me
# greater stability
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# relative positional bias
class RelativePositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(
self,
*,
dim,
heads,
layers = 3
):
super().__init__()
self.net = nn.ModuleList([])
self.net.append(nn.Sequential(nn.Linear(1, dim), nn.SiLU()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), nn.SiLU()))
self.net.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, n):
device = self.device
pos = torch.arange(n, device = device)
rel_pos = (rearrange(pos, 'i -> i 1') - rearrange(pos, 'j -> 1 j'))
rel_pos += (n - 1)
x = torch.arange(-n + 1, n, device = device).float()
x = rearrange(x, '... -> ... 1')
for layer in self.net:
x = layer(x)
x = x[rel_pos]
return rearrange(x, 'i j h -> h i j')
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, dropout = 0.1):
inner_dim = int(dim * 2 * mult / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
LayerNorm(inner_dim),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False,
num_null_kv = 0,
dropout = 0.1,
scale = 8,
flash = False
):
super().__init__()
self.heads = heads
self.causal = causal
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity()
self.attn_dropout = nn.Dropout(dropout)
self.num_null_kv = num_null_kv
self.null_kv = nn.Parameter(torch.randn(2, num_null_kv, dim_head)) if num_null_kv > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.attend = Attend(
flash = flash,
dropout = dropout,
causal = causal
)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
prefix_context = None,
prefix_context_mask = None
):
b, n, _, device = *x.shape, x.device
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
# take care of prefix-based self attention conditioning
# make sure to either concat the to the self attention mask or lengthen it accordingly
if exists(prefix_context):
kv_input = torch.cat((prefix_context, kv_input), dim = -2)
prefix_seq_len = prefix_context.shape[-2]
if not exists(mask):
mask = torch.ones((b, n), device = device, dtype = torch.bool)
if exists(prefix_context_mask):
mask = torch.cat((prefix_context_mask, mask), dim = -1)
else:
mask = F.pad(mask, (prefix_seq_len, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (prefix_seq_len, 0), value = 0.)
# prenorm
x = self.norm(x)
# project for queries, keys, values
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
# null key / values
if self.num_null_kv > 0:
null_k, null_v = repeat(self.null_kv, 'kv n d -> kv b n d', b = b).unbind(dim = 0)
k = torch.cat((null_k, k), dim = -2)
v = torch.cat((null_v, v), dim = -2)
# split for multi-headed attention
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# handle mask and null key / value
if exists(mask):
mask = F.pad(mask, (self.num_null_kv, 0), value = True)
# attention
out = self.attend(q, k, v, attn_bias = attn_bias, mask = mask)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
dim_context = None,
cross_attend = False,
attn_dropout = 0.,
ff_dropout = 0.,
grad_shrink_alpha = 0.1,
cond_as_self_attn_prefix = False,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
assert not (cross_attend and cond_as_self_attn_prefix)
self.dim_context = default(dim_context, dim)
self.cond_as_self_attn_prefix = cond_as_self_attn_prefix
self.grad_shrink = partial(grad_shrink, alpha = grad_shrink_alpha)
self.layers = nn.ModuleList([])
self.rel_pos_bias = RelativePositionBias(dim = dim // 2, heads = heads) if rel_pos_bias else None
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dropout = attn_dropout, flash = flash_attn, causal = True, **kwargs),
Attention(dim = dim, heads = heads, dropout = attn_dropout, dim_context = dim_context, flash = flash_attn, num_null_kv = 1, norm_context = True, **kwargs) if cross_attend else None,
FeedForward(dim = dim, dropout = ff_dropout)
]))
self.norm = LayerNorm(dim)
def forward(
self,
x,
self_attn_mask = None,
context = None,
context_mask = None,
attn_bias = None
):
assert not (self.cond_as_self_attn_prefix and not exists(context))
assert not (exists(context) and context.shape[-1] != self.dim_context), f'you had specified a conditioning dimension of {self.dim_context}, yet what was received by the transformer has dimension of {context.shape[-1]}'
n, device = x.shape[1], x.device
x = self.grad_shrink(x) # from cogview paper, adopted by GLM 130B LLM, decreases likelihood of attention net instability
if exists(attn_bias):
rel_pos_bias = attn_bias
else:
rel_pos_bias = maybe(self.rel_pos_bias)(n)
self_attn_kwargs = dict()
if self.cond_as_self_attn_prefix:
self_attn_kwargs = dict(
prefix_context = context,
prefix_context_mask = context_mask
)
for attn, cross_attn, ff in self.layers:
x = attn(x, attn_bias = rel_pos_bias, mask = self_attn_mask, **self_attn_kwargs) + x
if exists(cross_attn):
assert exists(context)
x = cross_attn(x, context = context, mask = context_mask) + x
x = ff(x) + x
return self.norm(x)
# the three hierarchical transformers
class SemanticTransformer(nn.Module):
@beartype
def __init__(
self,
*,
dim,
depth,
num_semantic_tokens,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
cond_dim = None,
has_condition = False,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
self.num_semantic_tokens = num_semantic_tokens
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.start_token = nn.Parameter(torch.randn(dim))
self.semantic_embedding = nn.Embedding(num_semantic_tokens + 1, dim)
self.eos_id = num_semantic_tokens
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
grad_shrink_alpha = grad_shrink_alpha,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn,
**kwargs
)
self.to_logits = nn.Linear(dim, num_semantic_tokens + 1)
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
@beartype
def forward(
self,
*,
ids = None,
return_loss = False,
text: Optional[List[str]] = None,
text_embeds = None,
self_attn_mask = None,
cond_drop_prob = None,
unique_consecutive = None
):
device = self.device
b = ids.shape[0]
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
text_mask = None
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
if exists(text_embeds):
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
if return_loss:
labels, ids = ids.clone(), ids[:, :-1]
tokens = get_embeds(self.semantic_embedding, ids)
start_tokens = repeat(self.start_token, 'd -> b 1 d', b = ids.shape[0])
tokens = torch.cat((start_tokens, tokens), dim = 1)
if exists(self_attn_mask):
self_attn_mask = F.pad(self_attn_mask, (1, 0), value = True)
tokens = self.transformer(tokens, context = text_embeds, self_attn_mask = self_attn_mask, context_mask = text_mask)
return self.to_logits(tokens)
class CoarseTransformer(nn.Module):
@beartype
def __init__(
self,
*,
codebook_size,
num_coarse_quantizers,
dim,
depth,
num_semantic_tokens,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
has_condition = False,
cond_dim = None,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
project_semantic_logits = True,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
self.num_semantic_tokens = num_semantic_tokens
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.semantic_start_token = nn.Parameter(torch.randn(dim))
self.coarse_start_token = nn.Parameter(torch.randn(dim))
self.semantic_eos_id = num_semantic_tokens
self.semantic_embedding = nn.Embedding(num_semantic_tokens + 1, dim)
self.coarse_eos_id = codebook_size
codebook_size_with_eos = codebook_size + 1
self.coarse_embedding = nn.Embedding(num_coarse_quantizers * codebook_size_with_eos, dim)
self.coarse_quantize_embedding = nn.Embedding(num_coarse_quantizers, dim)
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.cross_attn_bias = nn.Parameter(torch.zeros(heads, 1, 1)) if rel_pos_bias else None
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
grad_shrink_alpha = grad_shrink_alpha,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn,
**kwargs
)
self.codebook_size = codebook_size
self.num_coarse_quantizers = num_coarse_quantizers
self.to_semantic_logits = nn.Linear(dim, num_semantic_tokens + 1) if project_semantic_logits else None
self.coarse_logit_weights = nn.Parameter(torch.randn(num_coarse_quantizers, codebook_size_with_eos, dim))
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
semantic_logits, coarse_logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return semantic_logits, coarse_logits
null_semantic_logits, null_coarse_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_semantic_logits = None
if exists(null_semantic_logits):
scaled_semantic_logits = null_semantic_logits + (semantic_logits - null_semantic_logits) * cond_scale
scaled_coarse_logits = null_coarse_logits + (coarse_logits - null_coarse_logits) * cond_scale
return scaled_semantic_logits, scaled_coarse_logits
@beartype
def forward(
self,
*,
semantic_token_ids,
coarse_token_ids,
self_attn_mask = None,
text: Optional[List[str]] = None,
text_embeds = None,
cond_drop_prob = None,
return_only_coarse_logits = False
):
b, device = semantic_token_ids.shape[0], semantic_token_ids.device
arange = partial(torch.arange, device = device)
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = None
if exists(text_embeds):
text_mask = torch.any(text_embeds != 0, dim = -1)
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
coarse_token_ids, semantic_token_ids = map(lambda t: rearrange(t, 'b ... -> b (...)'), (coarse_token_ids, semantic_token_ids))
offsets = self.codebook_size * arange(self.num_coarse_quantizers)
offsets = repeat(offsets, 'q -> 1 (n q)', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
offsets = offsets[:, :coarse_token_ids.shape[-1]]
coarse_token_ids = coarse_token_ids + offsets
semantic_tokens = get_embeds(self.semantic_embedding, semantic_token_ids)
coarse_tokens = self.coarse_embedding(coarse_token_ids)
coarse_quantize_tokens = repeat(self.coarse_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
coarse_quantize_tokens = coarse_quantize_tokens[:coarse_token_ids.shape[-1], ...]
coarse_tokens = coarse_tokens + coarse_quantize_tokens
semantic_seq_len = semantic_tokens.shape[1]
semantic_start_tokens = repeat(self.semantic_start_token, 'd -> b 1 d', b = b)
coarse_start_tokens = repeat(self.coarse_start_token, 'd -> b 1 d', b = b)
tokens = torch.cat((
semantic_start_tokens,
semantic_tokens,
coarse_start_tokens,
coarse_tokens
), dim = 1)
# engineer the attention bias so that cross attention is not dominated by relative positions
seq_len = tokens.shape[-2]
attn_bias = None
if exists(self.transformer.rel_pos_bias):
attn_bias = self.transformer.rel_pos_bias(seq_len)
is_semantic = arange(seq_len) < (semantic_seq_len + 1) # semantic seq len + start token
is_cross_attn = rearrange(is_semantic, 'i -> i 1') ^ rearrange(is_semantic, 'j -> 1 j')
attn_bias = torch.where(
is_cross_attn,
self.cross_attn_bias,
attn_bias
)
# attend
tokens = self.transformer(
tokens,
context = text_embeds,
attn_bias = attn_bias,
self_attn_mask = self_attn_mask,
context_mask = text_mask
)
pred_semantic_tokens, pred_coarse_tokens = tokens[:, :semantic_seq_len], tokens[:, (semantic_seq_len + 1):]
# semantic logits
semantic_logits = self.to_semantic_logits(pred_semantic_tokens) if not return_only_coarse_logits and exists(self.to_semantic_logits) else None
# get coarse logits
n = pred_coarse_tokens.shape[1]
nq = round_down_nearest_multiple(n, self.num_coarse_quantizers)
pred_coarse_tokens_groupable, pred_coarse_tokens_remainder = pred_coarse_tokens[:, :nq], pred_coarse_tokens[:, nq:]
pred_coarse_tokens_groupable = rearrange(pred_coarse_tokens_groupable, 'b (n q) d -> b n q d', q = self.num_coarse_quantizers)
coarse_logits_groupable = einsum('q c d, b n q d -> b n q c', self.coarse_logit_weights, pred_coarse_tokens_groupable)
coarse_logits_groupable = rearrange(coarse_logits_groupable, 'b n q c -> b (n q) c')
remainder_num_quantizers = pred_coarse_tokens_remainder.shape[1]
if remainder_num_quantizers > 0:
coarse_logits_remainder = einsum('q c d, b q d -> b q c', self.coarse_logit_weights[:remainder_num_quantizers], pred_coarse_tokens_remainder)
coarse_logits = torch.cat((coarse_logits_groupable, coarse_logits_remainder), dim = 1)
else:
coarse_logits = coarse_logits_groupable
return semantic_logits, coarse_logits
class FineTransformer(nn.Module):
def __init__(
self,
*,
num_coarse_quantizers,
num_fine_quantizers,
codebook_size,
dim,
depth,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
has_condition = False,
cond_dim = None,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
project_coarse_logits = True,
pad_id = -1,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.num_coarse_quantizers = num_coarse_quantizers
self.coarse_start_token = nn.Parameter(torch.randn(dim))
self.fine_start_token = nn.Parameter(torch.randn(dim))
self.coarse_embedding = nn.Embedding(num_coarse_quantizers * codebook_size, dim)
self.fine_embedding = nn.Embedding(num_fine_quantizers * codebook_size, dim)
self.coarse_quantize_embedding = nn.Embedding(num_coarse_quantizers, dim)
self.fine_quantize_embedding = nn.Embedding(num_fine_quantizers, dim)
self.pad_id = pad_id
self.eos_id = codebook_size
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
rel_pos_bias = False,
grad_shrink_alpha = grad_shrink_alpha,
flash_attn = flash_attn,
**kwargs
)
# doing a specialized attn bias so that corresponding time steps at fine and coarse sequences attend to each other better
self.null_pos_bias = nn.Parameter(torch.randn(heads, 1, 1)) if rel_pos_bias else None
pos_bias_mlp_dim = dim // 2
self.pos_bias_mlp = nn.Sequential(
nn.Linear(2, pos_bias_mlp_dim),
nn.SiLU(),
nn.Linear(pos_bias_mlp_dim, pos_bias_mlp_dim),
nn.SiLU(),
nn.Linear(pos_bias_mlp_dim, heads)
) if rel_pos_bias else None
self.codebook_size = codebook_size
self.num_coarse_quantizers = num_coarse_quantizers
self.num_fine_quantizers = num_fine_quantizers
self.coarse_logit_weights = nn.Parameter(torch.randn(num_coarse_quantizers, codebook_size, dim)) if project_coarse_logits else None
self.fine_logit_weights = nn.Parameter(torch.randn(num_fine_quantizers, codebook_size, dim))
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
coarse_logits, fine_logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return coarse_logits, fine_logits
null_coarse_logits, null_fine_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_coarse_logits = None
if exists(null_coarse_logits):
scaled_coarse_logits = null_coarse_logits + (coarse_logits - null_coarse_logits) * cond_scale
scaled_fine_logits = null_fine_logits + (fine_logits - null_fine_logits) * cond_scale
return scaled_coarse_logits, scaled_fine_logits
def forward(
self,
coarse_token_ids,
fine_token_ids,
text: Optional[List[str]] = None,
text_embeds = None,
cond_drop_prob = None,
self_attn_mask = None,
return_only_fine_logits = False
):
b, device = coarse_token_ids.shape[0], coarse_token_ids.device
# handle text conditioning
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
text_mask = None
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
if exists(text_embeds):
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
coarse_token_ids, fine_token_ids = map(lambda t: rearrange(t, 'b ... -> b (...)'), (coarse_token_ids, fine_token_ids))
# do not attend to any of the coarse padding tokens or coarse end token either
coarse_self_attn_mask = (coarse_token_ids != self.pad_id) & (coarse_token_ids != self.eos_id)
coarse_token_ids = coarse_token_ids.masked_fill(~coarse_self_attn_mask, 0)
fine_token_seq_len = fine_token_ids.shape[-1]
coarse_self_attn_mask = F.pad(coarse_self_attn_mask, (1, fine_token_seq_len + 1), value = True)
if exists(self_attn_mask):
self_attn_mask &= coarse_self_attn_mask
else:
self_attn_mask = coarse_self_attn_mask
# prepare coarse and fine token embeddings
b, n = coarse_token_ids.shape
coarse_length = coarse_token_ids.shape[-1]
coarse_offsets = torch.arange(self.num_coarse_quantizers, device = device)
coarse_seq_length = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers)
coarse_offsets = repeat(coarse_offsets, 'q -> (n q)', n = coarse_seq_length)
coarse_offsets = coarse_offsets[:coarse_length]
coarse_token_ids = coarse_token_ids + rearrange(coarse_offsets, '... -> 1 ...') * self.codebook_size
fine_length = fine_token_ids.shape[-1]
fine_offsets = torch.arange(self.num_fine_quantizers, device = device)
fine_seq_length = ceil_div(fine_token_ids.shape[-1], self.num_fine_quantizers)
fine_offsets = repeat(fine_offsets, 'q -> (n q)', n = fine_seq_length)
fine_offsets = fine_offsets[:fine_length]
fine_token_ids = fine_token_ids + rearrange(fine_offsets, '... -> 1 ...') * self.codebook_size
coarse_tokens = self.coarse_embedding(coarse_token_ids)
fine_tokens = self.fine_embedding(fine_token_ids)
coarse_quantize_tokens = repeat(self.coarse_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
coarse_quantize_tokens = coarse_quantize_tokens[:coarse_token_ids.shape[-1], ...]
coarse_tokens = coarse_tokens + coarse_quantize_tokens
fine_quantize_tokens = repeat(self.fine_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(fine_token_ids.shape[-1], self.num_fine_quantizers))
fine_quantize_tokens = fine_quantize_tokens[:fine_token_ids.shape[-1], ...]
fine_tokens = fine_tokens + fine_quantize_tokens
coarse_start_tokens = repeat(self.coarse_start_token, 'd -> b 1 d', b = b)
fine_start_tokens = repeat(self.fine_start_token, 'd -> b 1 d', b = b)
tokens = torch.cat((
coarse_start_tokens,
coarse_tokens,
fine_start_tokens,
fine_tokens
), dim = 1)
# an engineered attention bias so coarse and fine sequences attend to each other better
attn_bias = None
if exists(self.pos_bias_mlp):
max_seq_len = max(coarse_seq_length, fine_seq_length)
coarse_pos = torch.arange(coarse_seq_length, device = device)
fine_pos = torch.arange(fine_seq_length, device = device)
coarse_pos = repeat(coarse_pos, 'n -> (n q)', q = self.num_coarse_quantizers)[:coarse_length]
fine_pos = repeat(fine_pos, 'n -> (n q)', q = self.num_fine_quantizers)[:fine_length]
coarse_pos = F.pad(coarse_pos, (1, 0), value = -1)
fine_pos = F.pad(fine_pos, (1, 0), value = -1)
seq_positions = torch.cat((coarse_pos, fine_pos), dim = -1)
coarse_offsets = F.pad(coarse_offsets, (1, 0), value = 0)
fine_offsets = fine_offsets + self.num_coarse_quantizers
fine_offsets = F.pad(fine_offsets, (1, 0), value = 0)
seq_offsets = torch.cat((coarse_offsets, fine_offsets), dim = -1)
pos_mlp_input = torch.stack((seq_positions.clamp(min = 0), seq_offsets), dim = -1)
num_offsets = self.num_fine_quantizers + self.num_coarse_quantizers
# relative positions are always (2 * N - 1), where N is the length of the dimension
rel_seq_len, rel_offsets = map(lambda n: 2 * n - 1, (max_seq_len, num_offsets))
# get all relative distances
rel_dist = (rearrange(pos_mlp_input, 'i c -> i 1 c') - rearrange(pos_mlp_input, 'j c -> 1 j c'))
# get all possible relative distances for the attention bias to be computed from the mlp
# which would be - (2 * N - 1) * (2 * Q - 1) - where N = sequence length and Q = total quantizers
rel_seq_len_range = repeat(torch.arange(rel_seq_len, device = device), 'n -> (n q)', q = rel_offsets)
rel_offset_range = repeat(torch.arange(rel_offsets, device = device), 'q -> (n q)', n = rel_seq_len)
mlp_inputs = torch.stack((rel_seq_len_range, rel_offset_range), dim = -1)
# implicitly parameterized relative distances, by sequence and quantizer positions
attn_bias = self.pos_bias_mlp(mlp_inputs.float())
# translate coordinates of (rel_seq_pos, rel_quantizer_offset) -> positive index to select from attn bias
rel_dist_seq_pos, rel_dist_seq_offset = rel_dist.unbind(dim = -1)
rel_dist_seq_pos += max_seq_len - 1
rel_dist_seq_offset += num_offsets - 1
rel_dist_indices = rel_dist_seq_pos * rel_offsets + rel_dist_seq_offset
# select the relative positional attention bias outputted by the MLP
# savings go from (N * Q) ^ 2 -> ~ (4 * N * Q)
attn_bias = attn_bias[rel_dist_indices]
attn_bias = rearrange(attn_bias, '... h -> h ...')
# need to make sure start token has a custom positional bias
is_start_token_seq = seq_positions == -1
start_token_mask = rearrange(is_start_token_seq, 'i -> i 1') | rearrange(is_start_token_seq, 'j -> 1 j')
attn_bias = torch.where(
start_token_mask,
self.null_pos_bias,
attn_bias,
)
# attention
tokens = self.transformer(
tokens,
context = text_embeds,
self_attn_mask = self_attn_mask,
context_mask = text_mask,
attn_bias = attn_bias
)
pred_coarse_tokens, pred_fine_tokens = tokens[:, :n], tokens[:, (n + 1):]
# get coarse logits
pred_coarse_seq_len = pred_coarse_tokens.shape[1]
padding = remainder_needed_until_multiple(pred_coarse_seq_len, self.num_coarse_quantizers)
if padding != 0:
pred_coarse_tokens = F.pad(pred_coarse_tokens, (0, 0, 0, padding), value = 0.)
pred_coarse_tokens = rearrange(pred_coarse_tokens, 'b (n q) d -> b n q d', q = self.num_coarse_quantizers)
coarse_logits = None
if not return_only_fine_logits and exists(self.coarse_logit_weights):
coarse_logits = einsum('q c d, b n q d -> b n q c', self.coarse_logit_weights, pred_coarse_tokens)
coarse_logits = rearrange(coarse_logits, 'b n q c -> b (n q) c')
coarse_logits = coarse_logits[:, :pred_coarse_seq_len]
# get fine logits
pred_fine_seq_len = pred_fine_tokens.shape[1]
nq = round_down_nearest_multiple(pred_fine_seq_len, self.num_fine_quantizers)
pred_fine_tokens_groupable, pred_fine_tokens_remainder = pred_fine_tokens[:, :nq], pred_fine_tokens[:, nq:]
pred_fine_tokens_groupable = rearrange(pred_fine_tokens_groupable, 'b (n q) d -> b n q d', q = self.num_fine_quantizers)
fine_logits_groupable = einsum('q c d, b n q d -> b n q c', self.fine_logit_weights, pred_fine_tokens_groupable)
fine_logits_groupable = rearrange(fine_logits_groupable, 'b n q c -> b (n q) c')
remainder_num_quantizers = pred_fine_tokens_remainder.shape[1]
if remainder_num_quantizers > 0:
fine_logits_remainder = einsum('q c d, b q d -> b q c', self.fine_logit_weights[:remainder_num_quantizers], pred_fine_tokens_remainder)
fine_logits = torch.cat((fine_logits_groupable, fine_logits_remainder), dim = 1)
else:
fine_logits = fine_logits_groupable
return coarse_logits, fine_logits
# training wrappers
class SemanticTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: SemanticTransformer,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
pad_id = -1,
unique_consecutive = True,
mask_prob = 0.15
):
super().__init__()
self.wav2vec = wav2vec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
assert not exists(self.wav2vec) or self.wav2vec.codebook_size == transformer.num_semantic_tokens, f'num_semantic_tokens on SemanticTransformer must be set to {self.wav2vec.codebook_size}'
self.unique_consecutive = unique_consecutive
self.pad_id = pad_id
self.eos_id = transformer.eos_id
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
def embed_text(self, text):
return self.transformer.embed_text(text, output_device = self.device)
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
max_length,
text: Optional[List[str]] = None,
text_embeds = None,
prime_wave = None,
prime_wave_input_sample_hz = None,
prime_ids = None,
batch_size = 1,
cond_scale = 3,
filter_thres = 0.9,
temperature = 1.,
include_eos_in_output = True, # if doing hierarchical sampling, eos must be kept for an easy time
**kwargs
):
device = self.device
# derive wav2vec ids from the input wave
if exists(prime_wave):
assert not exists(prime_ids)
assert exists(self.wav2vec)
ids = self.wav2vec(
prime_wave,
flatten = False,
input_sample_hz = prime_wave_input_sample_hz
)
elif exists(prime_ids):
ids = prime_ids
else:
ids = torch.empty((batch_size, 0), dtype = torch.long, device = device)
if self.unique_consecutive:
ids = batch_unique_consecutive(ids, pad_value = self.pad_id)
# derive joint audio-text embeddings if needed
if exists(self.audio_conditioner) and exists(prime_wave):
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = prime_wave, namespace = 'semantic')
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
# start length and get running id output
batch = ids.shape[0]
start_length = ids.shape[-1]
sample_semantic_ids = ids.clone()
last_logit_indices = (ids != self.pad_id).sum(dim = -1).long()
# sample from transformer
for ind in tqdm(range(start_length, max_length), desc = 'generating semantic'):
logits = self.transformer.forward_with_cond_scale(
ids = sample_semantic_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
**kwargs
)
last_logit_indices_expanded = repeat(last_logit_indices, 'b -> b 1 c', b = batch, c = logits.shape[-1])
last_logits = logits.gather(1, last_logit_indices_expanded)
last_logits = rearrange(last_logits, 'b 1 c -> b c')
filtered_logits = top_k(last_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sample_semantic_ids = torch.cat((sample_semantic_ids, sampled), dim = -1)
if all_rows_have_eos_id(sample_semantic_ids, self.eos_id):
break
last_logit_indices += 1
sample_semantic_ids = mask_out_after_eos_id(sample_semantic_ids, self.eos_id, keep_eos = False)
return sample_semantic_ids
def forward(
self,
*,
semantic_token_ids = None,
raw_wave = None,
text = None,
text_embeds = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) or exists(semantic_token_ids), 'either raw waveform (raw_wave) is given or semantic token ids are given (semantic_token_ids)'
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'semantic')
if not exists(semantic_token_ids):
assert exists(self.wav2vec), 'VQWav2Vec must be be provided if given raw wave for training'
semantic_token_ids = self.wav2vec(raw_wave, flatten = False)
semantic_token_ids = rearrange(semantic_token_ids, 'b ... -> b (...)')
if self.training:
semantic_token_ids = append_eos_id(semantic_token_ids, self.transformer.eos_id)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value = self.pad_id)
input_ids = semantic_token_ids
if return_loss:
input_ids = semantic_token_ids[:, :-1]
self_attn_mask = None
if self.mask_prob > 0. and self.training:
self_attn_mask = generate_mask_with_prob(input_ids.shape, self.mask_prob, input_ids.device)
logits = self.transformer(
ids = input_ids,
text = text,
text_embeds = text_embeds,
self_attn_mask = self_attn_mask,
**kwargs
)
if not return_loss:
return logits
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
semantic_token_ids,
ignore_index = self.pad_id
)
return loss
class CoarseTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: CoarseTransformer,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
pad_id = -1,
unique_consecutive = True,
semantic_cross_entropy_loss_weight = 1.,
mask_prob = 0.15
):
super().__init__()
self.codec = codec
self.wav2vec = wav2vec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
self.unique_consecutive = unique_consecutive
self.pad_id = pad_id
self.semantic_cross_entropy_loss_weight = semantic_cross_entropy_loss_weight
self.num_coarse_quantizers = transformer.num_coarse_quantizers * codec.rq_groups
self.semantic_eos_id = transformer.semantic_eos_id
self.coarse_eos_id = transformer.coarse_eos_id
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
semantic_token_ids,
prime_wave: Optional[Tensor] = None,
prime_wave_input_sample_hz = None,
prime_coarse_token_ids: Optional[Tensor] = None,
text: Optional[List[str]] = None,
text_embeds = None,
max_time_steps = 512,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
reconstruct_wave = False,
**kwargs
):
batch, device = semantic_token_ids.shape[0], self.device
semantic_token_ids = semantic_token_ids.to(device)
# initialize coarse token ids
# if a prime audio wave was supplied, then start off with appropriate acoustic tokens
assert not (exists(prime_wave) and exists(prime_coarse_token_ids)), 'you can either pass in the prime as a raw wave (codec required) or as preprocessed acoustic token ids'
if exists(prime_coarse_token_ids):
coarse_token_ids = prime_coarse_token_ids
elif exists(prime_wave):
assert exists(self.codec)
with torch.inference_mode():
self.codec.eval()
_, indices, _ = self.codec(
prime_wave,
return_encoded = True,
input_sample_hz = prime_wave_input_sample_hz
)
coarse_token_ids = indices[..., :self.num_coarse_quantizers]
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
else:
coarse_token_ids = torch.empty((batch, 0), device = device, dtype = torch.long)
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value=self.pad_id)
# initialize
init_coarse_time_step = 0
sampled_coarse_token_ids = coarse_token_ids.clone()
for time_step in tqdm(range(init_coarse_time_step, max_time_steps), desc = 'generating coarse'):
for ind in range(self.num_coarse_quantizers):
just_finished_quantizer_step = (ind == 0 and time_step > 0)
_, coarse_logits = self.transformer.forward_with_cond_scale(
coarse_token_ids = sampled_coarse_token_ids,
semantic_token_ids = semantic_token_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
return_only_coarse_logits = True,
**kwargs
)
last_coarse_logits = coarse_logits[:, -1]
if not just_finished_quantizer_step:
last_coarse_logits[:, -1] = float('-inf') # prevent from eos in the middle of a time step
filtered_logits = top_k(last_coarse_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sampled_coarse_token_ids = torch.cat((sampled_coarse_token_ids, sampled), dim = -1)
sampled_coarse_token_ids = mask_out_after_eos_id(sampled_coarse_token_ids, self.coarse_eos_id, keep_eos = False)
sampled_coarse_token_ids = rearrange(sampled_coarse_token_ids, 'b (n q) -> b n q', q = self.num_coarse_quantizers)
if not reconstruct_wave:
return sampled_coarse_token_ids
assert exists(self.codec)
wav = self.codec.decode_from_codebook_indices(sampled_coarse_token_ids)
return rearrange(wav, 'b 1 n -> b n')
def forward(
self,
*,
semantic_token_ids = None,
raw_wave = None,
raw_wave_for_codec = None,
text = None,
text_embeds = None,
coarse_token_ids = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) or exists(semantic_token_ids), 'either raw waveform (raw_wave) is given or semantic token ids are given (semantic_token_ids)'
raw_wave_for_codec = default(raw_wave_for_codec, raw_wave)
assert exists(raw_wave_for_codec) or exists(coarse_token_ids), 'either raw waveform (raw_wav) is given, or coarse and fine token ids (coarse_token_ids, fine_token_ids)'
assert not all(map(exists, (raw_wave, raw_wave_for_codec, semantic_token_ids, coarse_token_ids)))
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'coarse') # technically audio embeds, but shared text-audio joint embedding space for mulan
if not exists(semantic_token_ids):
assert exists(self.wav2vec), 'VQWav2Vec must be be provided if given raw wave for training'
semantic_token_ids = self.wav2vec(raw_wave, flatten = False)
if not exists(coarse_token_ids):
assert exists(self.codec), 'Codec must be provided if given raw wave for training'
with torch.inference_mode():
self.codec.eval()
_, indices, _ = self.codec(raw_wave_for_codec, return_encoded = True)
batch, num_timesteps = raw_wave_for_codec.shape
num_frames = int(num_timesteps / self.codec.seq_len_multiple_of)
assert indices.shape[0] == batch and indices.shape[1] == num_frames, \
f'Expected indices to have shape (batch, num_frames, num_coarse_quantizers + num_fine_quantizers), but got {indices.shape}'
coarse_token_ids = indices[..., :self.num_coarse_quantizers]
semantic_token_ids = rearrange(semantic_token_ids, 'b ... -> b (...)')
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
if self.training:
semantic_token_ids = append_eos_id(semantic_token_ids, self.transformer.semantic_eos_id)
coarse_token_ids = append_eos_id(coarse_token_ids, self.transformer.coarse_eos_id)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value = self.pad_id)
if return_loss:
semantic_labels, coarse_labels = semantic_token_ids, coarse_token_ids.clone()
coarse_token_ids = coarse_token_ids[:, :-1]
# self attention mask would omit any padding and eos tokens in the semantic prime
self_attn_mask = (semantic_token_ids != self.pad_id) & (semantic_token_ids != self.semantic_eos_id)
semantic_token_ids = semantic_token_ids.masked_fill(~self_attn_mask, 0)
coarse_token_len = coarse_token_ids.shape[-1]
self_attn_mask = F.pad(self_attn_mask, (1, coarse_token_len + 1), value = True) # attend to semantic bos and all coarse tokens
# forgetful causal mask - structured dropout
if self.mask_prob > 0 and self.training:
self_attn_mask &= generate_mask_with_prob(self_attn_mask.shape, self.mask_prob, device = self_attn_mask.device)
semantic_logits, coarse_logits = self.transformer(
semantic_token_ids = semantic_token_ids,
coarse_token_ids = coarse_token_ids,
self_attn_mask = self_attn_mask,
text = text,
text_embeds = text_embeds,
**kwargs
)
# whether to early return the logits
if not return_loss:
return semantic_logits, coarse_logits
coarse_logits, semantic_logits = map(lambda t: maybe(rearrange)(t, 'b n c -> b c n'), (coarse_logits, semantic_logits))
if self.unique_consecutive:
num_coarse_logits, _num_semantic_logits = coarse_labels.numel(), (semantic_labels != self.pad_id).sum()
else:
num_coarse_logits, _num_semantic_logits = coarse_logits.shape[-1], semantic_logits.shape[-1]
semantic_loss = 0.
num_semantic_logits = 0
if self.semantic_cross_entropy_loss_weight > 0 and exists(semantic_logits):
num_semantic_logits = _num_semantic_logits
semantic_loss = F.cross_entropy(
semantic_logits,
semantic_labels,
ignore_index = self.pad_id
)
coarse_loss = F.cross_entropy(
coarse_logits,
coarse_labels,
ignore_index = self.pad_id
)
return (
semantic_loss * num_semantic_logits * self.semantic_cross_entropy_loss_weight +
coarse_loss * num_coarse_logits
) / (num_semantic_logits + num_coarse_logits)
class FineTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: FineTransformer,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
coarse_cross_entropy_loss_weight = 1.,
pad_id = -1,
mask_prob = 0.15
):
super().__init__()
self.codec = codec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
self.num_fine_quantizers = transformer.num_fine_quantizers * codec.rq_groups
self.num_coarse_quantizers = transformer.num_coarse_quantizers * codec.rq_groups
if exists(codec):
assert (self.num_fine_quantizers + self.num_coarse_quantizers) == (codec.num_quantizers * codec.rq_groups), 'number of fine and coarse quantizers on fine transformer must add up to total number of quantizers on codec'
self.eos_id = transformer.eos_id
assert self.num_coarse_quantizers > 0
self.pad_id = pad_id
self.coarse_cross_entropy_loss_weight = coarse_cross_entropy_loss_weight
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
coarse_token_ids,
prime_wave: Optional[Tensor] = None,
prime_wave_input_sample_hz = None,
prime_fine_token_ids: Optional[Tensor] = None,
text: Optional[List[str]] = None,
text_embeds = None,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
reconstruct_wave = False,
mask_out_generated_fine_tokens = False,
**kwargs
):
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
batch, device = coarse_token_ids.shape[0], self.device
coarse_token_ids = coarse_token_ids.to(device)
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
# initialize fine token ids
# if a prime wave was supplied, start off with fine acoustic tokens
assert not (exists(prime_wave) and exists(prime_fine_token_ids)), 'you can either pass in the prime as a raw wave (codec required) or as preprocessed acoustic token ids'
if exists(prime_fine_token_ids):
fine_token_ids = prime_fine_token_ids
elif exists(prime_wave):
assert exists(self.codec)
with torch.inference_mode():
self.codec.eval()
_, token_ids, _ = self.codec(
prime_wave,
return_encoded = True,
input_sample_hz = prime_wave_input_sample_hz
)
fine_token_ids = token_ids[..., self.num_coarse_quantizers:]
fine_token_ids = rearrange(fine_token_ids, 'b ... -> b (...)')
else:
fine_token_ids = torch.empty((batch, 0), device = device, dtype = torch.long)
# calculate number of sampling steps
init_fine_time_step = fine_token_ids.shape[-1] // self.num_fine_quantizers
max_time_steps = coarse_token_ids.shape[1] // self.num_coarse_quantizers
sampled_fine_token_ids = fine_token_ids.clone()
for time_step in tqdm(range(init_fine_time_step, max_time_steps), desc = 'generating fine'):
for ind in range(self.num_fine_quantizers):
just_finished_quantizer_step = (ind == 0 and time_step > 0)
_, fine_logits = self.transformer.forward_with_cond_scale(
coarse_token_ids = coarse_token_ids,
fine_token_ids = sampled_fine_token_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
return_only_fine_logits = True,
**kwargs
)
last_fine_logits = fine_logits[:, -1]
if not just_finished_quantizer_step:
last_fine_logits[:, -1] = float('-inf') # prevent from eos in the middle of a time step
filtered_logits = top_k(last_fine_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sampled_fine_token_ids = torch.cat((sampled_fine_token_ids, sampled), dim = -1)
sampled_fine_token_ids = mask_out_after_eos_id(sampled_fine_token_ids, self.eos_id, keep_eos = False)
# reshape coarse and fine tokens for quantization dimension
sampled_fine_token_ids = rearrange(sampled_fine_token_ids, 'b (n q) -> b n q', q = self.num_fine_quantizers)
coarse_token_ids = rearrange(coarse_token_ids, 'b (n q) -> b n q', q = self.num_coarse_quantizers)
# whether to mask out fine token positions where the coarse token ids are all padding (variable lengthed training)
if mask_out_generated_fine_tokens:
pos_is_all_padding = (coarse_token_ids == self.pad_id).all(dim = -1, keepdim = True)
sampled_fine_token_ids = sampled_fine_token_ids.masked_fill(pos_is_all_padding, self.pad_id)
# if not reconstructing wave, return just the fine token ids
if not reconstruct_wave:
return sampled_fine_token_ids
# reconstruct the wave using codec, concatting the fine and coarse token ids together first across quantization dimension
assert exists(self.codec)
coarse_and_fine_ids = torch.cat((coarse_token_ids, sampled_fine_token_ids), dim = -1)
wav = self.codec.decode_from_codebook_indices(coarse_and_fine_ids)
return rearrange(wav, 'b 1 n -> b n')
def forward(
self,
*,
raw_wave = None,
text = None,
text_embeds = None,
token_ids = None,
coarse_token_ids = None,
fine_token_ids = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) ^ (exists(token_ids) ^ (exists(coarse_token_ids) and exists(fine_token_ids))), 'either raw waveform (raw_wav) is given, or coarse and fine token ids (coarse_token_ids, fine_token_ids)'
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'fine') # technically audio embeds, but shared text-audio joint embedding space for mulan
if exists(raw_wave):
assert exists(self.codec), 'Codec must be provided if given raw wave for training'
with torch.inference_mode():
self.codec.eval()
_, token_ids, _ = self.codec(raw_wave, return_encoded = True)
batch, num_timesteps = raw_wave.shape
num_frames = int(num_timesteps / self.codec.seq_len_multiple_of)
assert token_ids.shape == torch.Size((batch, num_frames, self.num_coarse_quantizers + self.num_fine_quantizers)), \
f'Expected token ids to have shape (batch, num_frames, num_coarse_quantizers + num_fine_quantizers), but got {token_ids.shape}'
if exists(token_ids):
coarse_token_ids, fine_token_ids = token_ids[..., :self.num_coarse_quantizers], token_ids[..., self.num_coarse_quantizers:]
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
fine_token_ids = rearrange(fine_token_ids, 'b ... -> b (...)')
# if training, determine labels, should remove one from fine token ids
if return_loss:
coarse_labels = coarse_token_ids
fine_labels = fine_token_ids
fine_token_ids = fine_token_ids[:, :-1]
# forgetful causal mask - structured dropout
self_attn_mask = None
if self.mask_prob > 0 and self.training:
mask_shape = (
coarse_token_ids.shape[0],
coarse_token_ids.shape[-1] + fine_token_ids.shape[-1] + 2
)
self_attn_mask = generate_mask_with_prob(mask_shape, self.mask_prob, device = self.device)
coarse_logits, fine_logits = self.transformer(
coarse_token_ids = coarse_token_ids,
fine_token_ids = fine_token_ids,
self_attn_mask = self_attn_mask,
text = text,
text_embeds = text_embeds,
**kwargs
)
# early return the logits
if not return_loss:
return coarse_logits, fine_logits
coarse_logits, fine_logits = map(lambda t: maybe(rearrange)(t, 'b n c -> b c n'), (coarse_logits, fine_logits))
num_fine_logits = fine_logits.shape[-1]
num_coarse_logits = 0
coarse_loss = 0.
if self.coarse_cross_entropy_loss_weight > 0 and exists(coarse_logits):
num_coarse_logits = coarse_logits.shape[-1]
coarse_loss = F.cross_entropy(
coarse_logits,
coarse_labels,
ignore_index = self.pad_id
)
fine_loss = F.cross_entropy(
fine_logits,
fine_labels,
ignore_index = self.pad_id
)
return (
coarse_loss * num_coarse_logits * self.coarse_cross_entropy_loss_weight +
fine_loss * num_fine_logits
) / (num_coarse_logits + num_fine_logits)
# audio LM
class AudioLM(nn.Module):
@beartype
def __init__(
self,
*,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
codec: Union[SoundStream, EncodecWrapper],
semantic_transformer: SemanticTransformer,
coarse_transformer: CoarseTransformer,
fine_transformer: FineTransformer,
audio_conditioner: Optional[AudioConditionerBase] = None,
unique_consecutive = True
):
super().__init__()
self.audio_conditioner = audio_conditioner
assert semantic_transformer.num_semantic_tokens == coarse_transformer.num_semantic_tokens
assert coarse_transformer.codebook_size == fine_transformer.codebook_size
assert coarse_transformer.num_coarse_quantizers == fine_transformer.num_coarse_quantizers
assert (fine_transformer.num_coarse_quantizers + fine_transformer.num_fine_quantizers) == codec.num_quantizers
self.semantic_has_condition = semantic_transformer.has_condition
self.coarse_has_condition = coarse_transformer.has_condition
self.fine_has_condition = fine_transformer.has_condition
self.needs_text = any([self.semantic_has_condition, self.coarse_has_condition, self.fine_has_condition])
self.semantic = SemanticTransformerWrapper(
wav2vec = wav2vec,
transformer = semantic_transformer,
audio_conditioner = audio_conditioner,
unique_consecutive = unique_consecutive
)
self.coarse = CoarseTransformerWrapper(
wav2vec = wav2vec,
codec = codec,
transformer = coarse_transformer,
audio_conditioner = audio_conditioner,
unique_consecutive = unique_consecutive
)
self.fine = FineTransformerWrapper(
codec= codec,
transformer = fine_transformer,
audio_conditioner = audio_conditioner
)
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
def forward(
self,
*,
batch_size = 1,
text: Optional[List[str]] = None,
text_embeds: Optional[Tensor] = None,
prime_wave = None,
prime_wave_input_sample_hz = None,
prime_wave_path = None,
max_length = 2048,
return_coarse_generated_wave = False,
mask_out_generated_fine_tokens = False
):
assert not (self.needs_text and (not exists(text) and not exists(text_embeds))), 'text needs to be passed in if one of the transformer requires conditioning'
if self.needs_text:
if exists(text):
text_embeds = self.semantic.embed_text(text)
assert not (exists(prime_wave) and exists(prime_wave_path)), 'prompt audio must be given as either `prime_wave: Tensor` or `prime_wave_path: str`'
if exists(prime_wave):
assert exists(prime_wave_input_sample_hz), 'the input sample frequency for the prompt audio must be given as `prime_wave_input_sample_hz: int`'
prime_wave = prime_wave.to(self.device)
elif exists(prime_wave_path):
prime_wave_path = Path(prime_wave_path)
assert exists(prime_wave_path), f'file does not exist at {str(prime_wave_path)}'
prime_wave, prime_wave_input_sample_hz = torchaudio.load(str(prime_wave_path))
prime_wave = prime_wave.to(self.device)
semantic_token_ids = self.semantic.generate(
text_embeds = text_embeds if self.semantic_has_condition else None,
batch_size = batch_size,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
max_length = max_length
)
coarse_token_ids_or_recon_wave = self.coarse.generate(
text_embeds = text_embeds if self.coarse_has_condition else None,
semantic_token_ids = semantic_token_ids,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
reconstruct_wave = return_coarse_generated_wave
)
if return_coarse_generated_wave:
return coarse_token_ids_or_recon_wave
generated_wave = self.fine.generate(
text_embeds = text_embeds if self.fine_has_condition else None,
coarse_token_ids = coarse_token_ids_or_recon_wave,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
reconstruct_wave = True,
mask_out_generated_fine_tokens = mask_out_generated_fine_tokens
)
return generated_wave
|
import re
from math import sqrt
import copy
from random import choice
from pathlib import Path
from shutil import rmtree
from collections import Counter
from beartype.typing import Union, List, Optional, Tuple
from typing_extensions import Annotated
from beartype import beartype
from beartype.door import is_bearable
from beartype.vale import Is
import torch
import torchaudio
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from einops import rearrange
from audiolm_pytorch.optimizer import get_optimizer
from ema_pytorch import EMA
from audiolm_pytorch.soundstream import SoundStream
from audiolm_pytorch.encodec import EncodecWrapper
from audiolm_pytorch.audiolm_pytorch import (
SemanticTransformer,
SemanticTransformerWrapper,
CoarseTransformer,
CoarseTransformerWrapper,
FineTransformer,
FineTransformerWrapper,
FairseqVQWav2Vec,
HubertWithKmeans
)
from audiolm_pytorch.data import SoundDataset, get_dataloader
from audiolm_pytorch.utils import AudioConditionerBase
from audiolm_pytorch.version import __version__
from packaging import version
from accelerate import (Accelerator, DistributedType)
from accelerate.utils import DistributedDataParallelKwargs
# constants
DEFAULT_SAMPLE_RATE = 16000
# make sure only one trainer is instantiated
ONE_TRAINER_INSTANTIATED = False
def check_one_trainer():
global ONE_TRAINER_INSTANTIATED
assert not ONE_TRAINER_INSTANTIATED, 'only one Trainer can be instantiated at a time for training'
ONE_TRAINER_INSTANTIATED = True
# for automatically routing data emitted from a dataset to keywords of the transformer wrappers
DATASET_FIELD_TYPE_CONFIG = dict(
raw_wave = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim in {2, 3}]
],
text = List[str],
text_embeds = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim == 3]
],
)
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# auto data to module keyword argument routing functions
def has_duplicates(tup):
counts = dict(Counter(tup))
return any(filter(lambda count: count > 1, counts.values()))
def determine_types(data, config):
output = []
for el in data:
for name, data_type in config.items():
if is_bearable(el, data_type):
output.append(name)
break
else:
raise TypeError(f'unable to determine type of {data}')
return tuple(output)
def checkpoint_num_steps(checkpoint_path):
"""Returns the number of steps trained from a checkpoint based on the filename.
Filename format assumed to be something like "/path/to/semantic.transformer.20000.pt" which is
for 20k train steps. Returns 20000 in that case.
"""
results = re.findall(r'\d+', str(checkpoint_path))
if len(results) == 0:
return 0
return int(results[-1])
# main trainer class
class SoundStreamTrainer(nn.Module):
@beartype
def __init__(
self,
soundstream: SoundStream,
*,
num_train_steps: int,
batch_size: int,
data_max_length: int = None,
data_max_length_seconds: Union[int, float] = None,
folder: str = None,
train_dataloader: DataLoader = None,
val_dataloader: DataLoader = None,
lr: float = 2e-4,
grad_accum_every: int = 4,
wd: float = 0.,
max_grad_norm: float = 0.5,
discr_max_grad_norm: float = None,
save_results_every: int = 100,
save_model_every: int= 1000,
log_losses_every: int= 1,
results_folder: str = './results',
valid_frac: float = 0.05,
random_split_seed: int = 42,
use_ema: bool = True,
ema_beta: float = 0.995,
ema_update_after_step: int = 500,
ema_update_every: int = 10,
apply_grad_penalty_every: int = 4,
dl_num_workers: int = 0,
accelerator: Accelerator = None,
accelerate_kwargs: dict = dict(),
dataloader_drop_last = True,
split_batches = False,
use_lion: bool = False,
force_clear_prev_results: bool = None # set to True | False to skip the prompt
):
"""
Initialize with a SoundStream instance and either a folder containing audio data or
train/val DataLoader instances.
"""
super().__init__()
check_one_trainer()
if accelerator:
self.accelerator = accelerator
assert len(accelerate_kwargs) == 0
else:
kwargs = DistributedDataParallelKwargs(find_unused_parameters = True)
self.accelerator = Accelerator(
kwargs_handlers = [kwargs],
split_batches = split_batches,
**accelerate_kwargs
)
self.soundstream = soundstream
self.use_ema = use_ema
if self.use_ema:
self.ema_soundstream = EMA(soundstream, beta = ema_beta, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
hyperparameters = {
"num_train_steps": num_train_steps,
"batch_size": batch_size,
"gradient_accum_every": grad_accum_every,
"learning_rate": lr,
"target_sample_hz": soundstream.target_sample_hz,
}
# optimizers
self.optim = get_optimizer(soundstream.non_discr_parameters(), lr = lr, wd = wd)
for discr_optimizer_key, discr in self.multiscale_discriminator_iter():
one_multiscale_discr_optimizer = get_optimizer(discr.parameters(), lr = lr, wd = wd)
setattr(self, discr_optimizer_key, one_multiscale_discr_optimizer)
self.discr_optim = get_optimizer(soundstream.stft_discriminator.parameters(), lr = lr, wd = wd, use_lion = use_lion)
# max grad norm
self.max_grad_norm = max_grad_norm
self.discr_max_grad_norm = discr_max_grad_norm
if folder is None:
assert train_dataloader is not None
assert val_dataloader is not None
self.dl = train_dataloader
self.valid_dl = val_dataloader
else:
assert train_dataloader is None
assert val_dataloader is None
# create dataset
if exists(data_max_length_seconds):
assert not exists(data_max_length)
data_max_length = int(data_max_length_seconds * soundstream.target_sample_hz)
else:
assert exists(data_max_length)
hyperparameters['data_max_length'] = data_max_length
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = soundstream.target_sample_hz,
seq_len_multiple_of = soundstream.seq_len_multiple_of
)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, num_workers = dl_num_workers, shuffle = True, drop_last = dataloader_drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, num_workers = dl_num_workers, shuffle = True, drop_last = dataloader_drop_last)
# prepare with accelerator
(
self.soundstream,
self.optim,
self.discr_optim,
self.dl
) = self.accelerator.prepare(
self.soundstream,
self.optim,
self.discr_optim,
self.dl
)
# prepare the multiscale discriminators with accelerator
for name, _ in self.multiscale_discriminator_iter():
optimizer = getattr(self, name)
optimizer = self.accelerator.prepare(optimizer)
setattr(self, name, optimizer)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.log_losses_every = log_losses_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
# Initialize experiment trackers if an external Accelerator is not passed in
if not accelerator:
self.accelerator.init_trackers("soundstream", config=hyperparameters)
assert self.accelerator.distributed_type != DistributedType.FSDP, 'FSDP not supported for soundstream trainer due to complex-valued stft discriminator'
def set_model_as_ema_model_(self):
""" this will force the main 'online' model to have same parameters as the exponentially moving averaged model """
assert self.use_ema
self.ema_soundstream.ema_model.load_state_dict(self.soundstream.state_dict())
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.soundstream),
optim = self.optim.state_dict(),
config = self.unwrapped_soundstream._configs,
discr_optim = self.discr_optim.state_dict(),
version = __version__
)
if self.use_ema:
pkg['ema_model'] = self.ema_soundstream.state_dict()
for key, _ in self.multiscale_discriminator_iter():
discr_optim = getattr(self, key)
pkg[key] = discr_optim.state_dict()
torch.save(pkg, path)
@property
def unwrapped_soundstream(self):
return self.accelerator.unwrap_model(self.soundstream)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
# if loading from old version, make a hacky guess
if len(pkg.keys()) > 20:
self.unwrapped_soundstream.load_state_dict(pkg)
if self.use_ema:
self.ema_soundstream.ema_model.load_state_dict(pkg)
return
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
# otherwise load things normally
self.unwrapped_soundstream.load_state_dict(pkg['model'])
if self.use_ema:
assert 'ema_model' in pkg
self.ema_soundstream.load_state_dict(pkg['ema_model'])
self.optim.load_state_dict(pkg['optim'])
self.discr_optim.load_state_dict(pkg['discr_optim'])
for key, _ in self.multiscale_discriminator_iter():
discr_optim = getattr(self, key)
discr_optim.load_state_dict(pkg[key])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def multiscale_discriminator_iter(self):
for ind, discr in enumerate(self.unwrapped_soundstream.discriminators):
yield f'multiscale_discr_optimizer_{ind}', discr
def multiscale_discriminator_optim_iter(self):
for name, _ in self.multiscale_discriminator_iter():
yield name, getattr(self, name)
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
apply_grad_penalty = self.apply_grad_penalty_every > 0 and not (steps % self.apply_grad_penalty_every)
log_losses = self.log_losses_every > 0 and not (steps % self.log_losses_every)
self.soundstream.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
wave, = next(self.dl_iter)
wave = wave.to(device)
loss, (recon_loss, multi_spectral_recon_loss, adversarial_loss, feature_loss, all_commitment_loss) = self.soundstream(wave, return_loss_breakdown = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, dict(
loss = loss.item() / self.grad_accum_every,
recon_loss = recon_loss.item() / self.grad_accum_every,
))
if log_losses:
accum_log(logs, dict(
multi_spectral_recon_loss = multi_spectral_recon_loss.item() / self.grad_accum_every,
adversarial_loss = adversarial_loss.item() / self.grad_accum_every,
feature_loss = feature_loss.item() / self.grad_accum_every,
all_commitment_loss = all_commitment_loss.item() / self.grad_accum_every,
))
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.soundstream.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# update discriminator
self.discr_optim.zero_grad()
for name, multiscale_discr_optim in self.multiscale_discriminator_optim_iter():
multiscale_discr_optim.zero_grad()
for _ in range(self.grad_accum_every):
wave, = next(self.dl_iter)
wave = wave.to(device)
discr_losses = self.soundstream(
wave,
apply_grad_penalty = apply_grad_penalty,
return_discr_loss = True,
return_discr_losses_separately = True
)
for name, discr_loss in discr_losses:
self.accelerator.backward(discr_loss / self.grad_accum_every, retain_graph = True)
accum_log(logs, {name: discr_loss.item() / self.grad_accum_every})
if exists(self.discr_max_grad_norm):
self.accelerator.clip_grad_norm_(self.soundstream.stft_discriminator.parameters(), self.discr_max_grad_norm)
# gradient step for all discriminators
self.discr_optim.step()
for name, multiscale_discr_optim in self.multiscale_discriminator_optim_iter():
multiscale_discr_optim.step()
# build pretty printed losses
losses_str = f"{steps}: soundstream total loss: {logs['loss']:.3f}, soundstream recon loss: {logs['recon_loss']:.3f}"
if log_losses:
self.accelerator.log({
"total_loss": logs['loss'],
"recon_loss": logs['recon_loss'],
"multi_spectral_recon_loss": logs['multi_spectral_recon_loss'],
"adversarial_loss": logs['adversarial_loss'],
"feature_loss": logs['feature_loss'],
"all_commitment_loss": logs['all_commitment_loss'],
"stft_discr_loss": logs['stft']
}, step=steps)
for key, loss in logs.items():
if not key.startswith('scale:'):
continue
_, scale_factor = key.split(':')
losses_str += f" | discr (scale {scale_factor}) loss: {loss:.3f}"
if log_losses:
self.accelerator.log({f"discr_loss (scale {scale_factor})": loss}, step=steps)
# log
self.print(losses_str)
# update exponential moving averaged generator
self.accelerator.wait_for_everyone()
if self.is_main and self.use_ema:
self.ema_soundstream.update()
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
models = [(self.unwrapped_soundstream, str(steps))]
if self.use_ema:
models.append((self.ema_soundstream.ema_model if self.use_ema else self.unwrapped_soundstream, f'{steps}.ema'))
wave, = next(self.valid_dl_iter)
wave = wave.to(device)
for model, label in models:
model.eval()
with torch.inference_mode():
recons = model(wave, return_recons_only = True)
for ind, recon in enumerate(recons.unbind(dim = 0)):
filename = str(self.results_folder / f'sample_{label}.flac')
torchaudio.save(filename, recon.cpu().detach(), self.unwrapped_soundstream.target_sample_hz)
self.print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'soundstream.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# semantic transformer trainer
class SemanticTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
transformer: SemanticTransformer,
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
data_max_length = None,
data_max_length_seconds = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.wav2vec = wav2vec
self.transformer = transformer
self.audio_conditioner = audio_conditioner
self.train_wrapper = SemanticTransformerWrapper(
wav2vec = wav2vec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = data_max_length_seconds * wav2vec.target_sample_hz
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = wav2vec.target_sample_hz,
seq_len_multiple_of = wav2vec.seq_len_multiple_of
)
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.train_wrapper,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.train_wrapper,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("semantic", config=hps)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.ds_fields, data))
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.train_wrapper(**data_kwargs, return_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for _ in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.valid_dl_iter))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(**data_kwargs, return_loss = True)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'semantic.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# fine transformer trainer
class CoarseTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
transformer: CoarseTransformer,
codec: Union[SoundStream, EncodecWrapper],
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
ds_fields: Tuple[str, ...] = ('raw_wave', 'raw_wave_for_codec', 'text'),
data_max_length = None,
data_max_length_seconds = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.transformer = transformer
self.codec = codec
self.wav2vec = wav2vec
self.audio_conditioner = audio_conditioner
self.train_wrapper = CoarseTransformerWrapper(
codec = codec,
wav2vec = wav2vec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = tuple(data_max_length_seconds * hz for hz in (wav2vec.target_sample_hz, codec.target_sample_hz))
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = (
wav2vec.target_sample_hz,
codec.target_sample_hz
), # need 2 waves resampled differently here
seq_len_multiple_of = codec.seq_len_multiple_of
)
self.ds_fields = ds_fields
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.transformer,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.transformer,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("coarse", config=hps)
self.train_wrapper.to(self.device)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = dict(zip(self.ds_fields, next(self.dl_iter)))
loss = self.train_wrapper(
**data_kwargs,
return_loss = True
)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for i in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = dict(zip(self.ds_fields, next(self.valid_dl_iter)))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(
**data_kwargs,
return_loss = True
)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'coarse.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# fine transformer trainer
class FineTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
transformer: FineTransformer,
codec: Union[SoundStream, EncodecWrapper],
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
data_max_length = None,
data_max_length_seconds = None,
dataset_normalize = False,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.transformer = transformer
self.codec = codec
self.audio_conditioner = audio_conditioner
self.train_wrapper = FineTransformerWrapper(
codec = codec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = data_max_length_seconds * codec.target_sample_hz
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = codec.target_sample_hz,
seq_len_multiple_of = codec.seq_len_multiple_of
)
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.transformer,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.transformer,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("fine", config=hps)
self.train_wrapper.to(self.device)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.ds_fields, data))
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.train_wrapper(**data_kwargs, return_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for i in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.valid_dl_iter))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(**data_kwargs, return_loss = True)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'fine.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
|
from functools import reduce
from einops import rearrange, pack, unpack
import torch
from torch import nn
from torchaudio.functional import resample
from vector_quantize_pytorch import ResidualVQ
from encodec import EncodecModel
from encodec.utils import _linear_overlap_add
# helper functions
def exists(val):
return val is not None
# hacky way to get num quantizers
def get_num_quantizers(model: EncodecModel, audio_length = 512):
out = model.encode(torch.randn(1, 1, audio_length))
return out[0][0].shape[1]
class EncodecWrapper(nn.Module):
"""
Support pretrained 24kHz Encodec by Meta AI, if you want to skip training SoundStream.
TODO:
- see if we need to keep the scaled version and somehow persist the scale factors for when we need to decode? Right
now I'm just setting self.model.normalize = False to sidestep all of that
- see if we can use the 48kHz model, which is specifically for music. Right now we're using the 24kHz model because
that's what was used in MusicLM and avoids any resampling issues.
-
"""
def __init__(
self,
target_sample_hz = 24000,
strides = (2, 4, 5, 8),
num_quantizers = 8,
bandwidth = 6.0
):
super().__init__()
# Instantiate a pretrained EnCodec model
self.model = EncodecModel.encodec_model_24khz()
self.model.normalize = False # this means we don't need to scale codes e.g. when running model.encode(wav)
# The number of codebooks used will be determined bythe bandwidth selected.
# E.g. for a bandwidth of 6kbps, `n_q = 8` codebooks are used.
# Supported bandwidths are 1.5kbps (n_q = 2), 3 kbps (n_q = 4), 6 kbps (n_q = 8) and 12 kbps (n_q =16) and 24kbps (n_q=32).
# For the 48 kHz model, only 3, 6, 12, and 24 kbps are supported. The number
# of codebooks for each is half that of the 24 kHz model as the frame rate is twice as much.
# bandwidth affects num quantizers used: https://github.com/facebookresearch/encodec/pull/41
self.model.set_target_bandwidth(bandwidth)
num_quantizers = get_num_quantizers(self.model)
# Fields that SoundStream has that get used externally. We replicate them here.
self.target_sample_hz = target_sample_hz
assert self.target_sample_hz == 24000, "haven't done anything with non-24kHz yet"
self.codebook_dim = 128
self.rq_groups = 1
self.num_quantizers = num_quantizers
self.strides = strides # used in seq_len_multiple_of
# cross entropy loss to indices passed in on l2 distance logits introduced in vector-quantize-pytorch 1.2.2
self.rq = ResidualVQ(
dim = 128,
codebook_size = 1024,
num_quantizers = num_quantizers
)
# copy codebook over to ResidualVQ for cross entropy loss logic from naturalspeech2
# luckily, it seems Meta AI basically used my ResidualVQ code verbatim. makes porting it over easy
for encodec_rq_layer, rq_layer in zip(self.model.quantizer.vq.layers, self.rq.layers):
encodec_codebook = dict(encodec_rq_layer._codebook.named_buffers()).get('embed')
vq_codebook = dict(rq_layer._codebook.named_buffers()).get('embed')
encodec_codebook = rearrange(encodec_codebook, '... -> 1 ...')
vq_codebook.copy_(encodec_codebook)
@property
def seq_len_multiple_of(self):
return reduce(lambda x, y: x * y, self.strides)
@property
def downsample_factor(self):
return self.seq_len_multiple_of
def forward(
self,
x,
input_sample_hz = None,
return_encoded = False,
**kwargs
):
x, ps = pack([x], '* n')
if exists(input_sample_hz):
x = resample(x, input_sample_hz, self.target_sample_hz)
# kwargs for stuff like return_encoded=True, which SoundStream uses but Encodec doesn't
assert not self.model.training, "Encodec is pretrained and should never be called outside eval mode."
# Unlike in the Encodec sample code in its README, x has already been resampled so we don't need to call
# convert_audio and unsqueeze. The convert_audio function also doesn't play nicely with batches.
# b = batch, t = timesteps, 1 channel for the 24kHz model, 2 channels for the 48kHz model
wav = rearrange(x, f'b t -> b {self.model.channels} t')
# Extract discrete codes from EnCodec
with torch.inference_mode():
encoded_frames = self.model.encode(wav)
# encoded_frames is a list of (frame, scale) tuples. Scale is a scalar but we don't use it. Frame is a tensor
# of shape [batch, num_quantizers, num_samples_per_frame]. We want to concatenate the frames to get all the
# timesteps concatenated.
codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1) # [batch, num_quantizers, timesteps]
# transformer code that uses codec expects codes to be [batch, timesteps, num_quantizers]
codes = rearrange(codes, 'b q n -> b n q') # result: [batch, timesteps, num_quantizers]
# in original soundstream, is x, indices, commit_loss. But we only use indices in eval mode, so just keep that.
# allow for returning of sum of quantized embeddings
emb = None
if return_encoded:
emb = self.get_emb_from_indices(codes)
emb, = unpack(emb, ps, '* n c')
codes, = unpack(codes, ps, '* n q')
return emb, codes, None
def decode_from_codebook_indices(self, quantized_indices):
# Input: batch x num tokens x num quantizers
# Output: batch x 1 x num samples
assert self.model.sample_rate == 24000,\
"if changing to 48kHz, that model segments its audio into lengths of 1.0 second with 1% overlap, whereas " \
"the 24kHz doesn't segment at all. this means the frame decode logic might change; this is a reminder to " \
"double check that."
# Since 24kHz pretrained doesn't do any segmenting, we have all the frames already (1 frame = 1 token in quantized_indices)
# The following code is hacked in from self.model.decode() (Encodec version 0.1.1) where we skip the part about
# scaling.
# Shape: 1 x (num_frames * stride product). 1 because we have 1 frame (because no segmenting)
frames = self._decode_frame(quantized_indices)
result = _linear_overlap_add(frames, self.model.segment_stride or 1)
# TODO: I'm not overly pleased with this because when this function gets called, we just rearrange the result
# back to b n anyways, but we'll keep this as a temporary hack just to make things work for now
return rearrange(result, 'b n -> b 1 n')
def get_emb_from_indices(self, indices):
codes = rearrange(indices, 'b t q -> q b t')
emb = self.model.quantizer.decode(codes)
return rearrange(emb, 'b c n -> b n c')
def decode(self, emb):
emb = rearrange(emb, 'b n c -> b c n')
return self.model.decoder(emb)
def _decode_frame(self, quantized_indices):
# The following code is hacked in from self.model._decode_frame() (Encodec version 0.1.1) where we assume we've
# already unwrapped the EncodedFrame
# Input: batch x num tokens x num quantizers
# Output: batch x new_num_samples, where new_num_samples is num_frames * stride product (may be slightly
# larger than original num samples as a result, because the last frame might not be "fully filled" with samples
# if num_samples doesn't divide perfectly).
# num_frames == the number of acoustic tokens you have, one token per frame
codes = rearrange(quantized_indices, 'b t q -> q b t')
emb = self.model.quantizer.decode(codes)
# emb shape: batch x self.model.quantizer.dimension x T. Note self.model.quantizer.dimension is the embedding dimension
return self.model.decoder(emb)
|
from pathlib import Path
from functools import partial, wraps
from beartype import beartype
from beartype.typing import Tuple, Union, Optional
from beartype.door import is_bearable
import torchaudio
from torchaudio.functional import resample
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from audiolm_pytorch.utils import curtail_to_multiple
from einops import rearrange, reduce
# helper functions
def exists(val):
return val is not None
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def is_unique(arr):
return len(set(arr)) == len(arr)
# dataset functions
class SoundDataset(Dataset):
@beartype
def __init__(
self,
folder,
target_sample_hz: Union[int, Tuple[int, ...]], # target sample hz must be specified, or a tuple of them if one wants to return multiple resampled
exts = ['flac', 'wav', 'mp3', 'webm'],
max_length: Optional[int] = None, # max length would apply to the highest target_sample_hz, if there are multiple
seq_len_multiple_of: Optional[Union[int, Tuple[Optional[int], ...]]] = None
):
super().__init__()
path = Path(folder)
assert path.exists(), 'folder does not exist'
files = [file for ext in exts for file in path.glob(f'**/*.{ext}')]
assert len(files) > 0, 'no sound files found'
self.files = files
self.max_length = max_length
self.target_sample_hz = cast_tuple(target_sample_hz)
num_outputs = len(self.target_sample_hz)
# strategy, if there are multiple target sample hz, would be to resample to the highest one first
# apply the max lengths, and then resample to all the others
self.max_target_sample_hz = max(self.target_sample_hz)
self.seq_len_multiple_of = cast_tuple(seq_len_multiple_of, num_outputs)
assert len(self.target_sample_hz) == len(self.seq_len_multiple_of)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
file = self.files[idx]
data, sample_hz = torchaudio.load(file)
assert data.numel() > 0, f'one of your audio file ({file}) is empty. please remove it from your folder'
if data.shape[0] > 1:
# the audio has more than 1 channel, convert to mono
data = reduce(data, 'c ... -> 1 ...', 'mean')
# first resample data to the max target freq
data = resample(data, sample_hz, self.max_target_sample_hz)
sample_hz = self.max_target_sample_hz
# then curtail or pad the audio depending on the max length
max_length = self.max_length
audio_length = data.size(1)
if exists(max_length):
if audio_length > max_length:
max_start = audio_length - max_length
start = torch.randint(0, max_start, (1, ))
data = data[:, start:start + max_length]
else:
data = F.pad(data, (0, max_length - audio_length), 'constant')
data = rearrange(data, '1 ... -> ...')
# resample if target_sample_hz is not None in the tuple
num_outputs = len(self.target_sample_hz)
data = cast_tuple(data, num_outputs)
data_tuple = tuple(resample(d, sample_hz, target_sample_hz) for d, target_sample_hz in zip(data, self.target_sample_hz))
output = []
# process each of the data resample at different frequencies individually for curtailing to multiple
for data, seq_len_multiple_of in zip(data_tuple, self.seq_len_multiple_of):
if exists(seq_len_multiple_of):
data = curtail_to_multiple(data, seq_len_multiple_of)
output.append(data.float())
# cast from list to tuple
output = tuple(output)
# return only one audio, if only one target resample freq
if num_outputs == 1:
return output[0]
return output
# dataloader functions
def collate_one_or_multiple_tensors(fn):
@wraps(fn)
def inner(data):
is_one_data = not isinstance(data[0], tuple)
if is_one_data:
data = fn(data)
return (data,)
outputs = []
for datum in zip(*data):
if is_bearable(datum, Tuple[str, ...]):
output = list(datum)
else:
output = fn(datum)
outputs.append(output)
return tuple(outputs)
return inner
@collate_one_or_multiple_tensors
def curtail_to_shortest_collate(data):
min_len = min(*[datum.shape[0] for datum in data])
data = [datum[:min_len] for datum in data]
return torch.stack(data)
@collate_one_or_multiple_tensors
def pad_to_longest_fn(data):
return pad_sequence(data, batch_first = True)
def get_dataloader(ds, pad_to_longest = True, **kwargs):
collate_fn = pad_to_longest_fn if pad_to_longest else curtail_to_shortest_collate
return DataLoader(ds, collate_fn = collate_fn, **kwargs)
|
# standard imports
import os
import sys
import pickle
# non-standard imports
import numpy as np
from sklearn import svm
from sqlite3 import dbapi2 as sqlite3
# local imports
from utils import safe_pickle_dump, strip_version, Config
num_recommendations = 500 # papers to recommend per user
# -----------------------------------------------------------------------------
if not os.path.isfile(Config.database_path):
print("the database file as.db should exist. You can create an empty database with sqlite3 as.db < schema.sql")
sys.exit()
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = sqldb.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
# -----------------------------------------------------------------------------
# fetch all users
users = query_db('''select * from user''')
print('number of users: ', len(users))
# load the tfidf matrix and meta
meta = pickle.load(open(Config.meta_path, 'rb'))
out = pickle.load(open(Config.tfidf_path, 'rb'))
X = out['X']
X = X.todense()
xtoi = { strip_version(x):i for x,i in meta['ptoi'].items() }
user_sim = {}
for ii,u in enumerate(users):
print("%d/%d building an SVM for %s" % (ii, len(users), u['username'].encode('utf-8')))
uid = u['user_id']
lib = query_db('''select * from library where user_id = ?''', [uid])
pids = [x['paper_id'] for x in lib] # raw pids without version
posix = [xtoi[p] for p in pids if p in xtoi]
if not posix:
continue # empty library for this user maybe?
print(pids)
y = np.zeros(X.shape[0])
for ix in posix: y[ix] = 1
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000, tol=1e-6, C=0.1)
clf.fit(X,y)
s = clf.decision_function(X)
sortix = np.argsort(-s)
sortix = sortix[:min(num_recommendations, len(sortix))] # crop paper recommendations to save space
user_sim[uid] = [strip_version(meta['pids'][ix]) for ix in list(sortix)]
print('writing', Config.user_sim_path)
safe_pickle_dump(user_sim, Config.user_sim_path)
|
"""
Very simple script that simply iterates over all files data/pdf/f.pdf
and create a file data/txt/f.pdf.txt that contains the raw text, extracted
using the "pdftotext" command. If a pdf cannot be converted, this
script will not produce the output file.
"""
import os
import sys
import time
import shutil
import pickle
from utils import Config
# make sure pdftotext is installed
if not shutil.which('pdftotext'): # needs Python 3.3+
print('ERROR: you don\'t have pdftotext installed. Install it first before calling this script')
sys.exit()
if not os.path.exists(Config.txt_dir):
print('creating ', Config.txt_dir)
os.makedirs(Config.txt_dir)
have = set(os.listdir(Config.txt_dir))
files = os.listdir(Config.pdf_dir)
for i,f in enumerate(files): # there was a ,start=1 here that I removed, can't remember why it would be there. shouldn't be, i think.
txt_basename = f + '.txt'
if txt_basename in have:
print('%d/%d skipping %s, already exists.' % (i, len(files), txt_basename, ))
continue
pdf_path = os.path.join(Config.pdf_dir, f)
txt_path = os.path.join(Config.txt_dir, txt_basename)
cmd = "pdftotext %s %s" % (pdf_path, txt_path)
os.system(cmd)
print('%d/%d %s' % (i, len(files), cmd))
# check output was made
if not os.path.isfile(txt_path):
# there was an error with converting the pdf
print('there was a problem with parsing %s to text, creating an empty text file.' % (pdf_path, ))
os.system('touch ' + txt_path) # create empty file, but it's a record of having tried to convert
time.sleep(0.01) # silly way for allowing for ctrl+c termination
|
import os
import json
import time
import pickle
import dateutil.parser
import argparse
from random import shuffle
import numpy as np
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from flask_limiter import Limiter
from werkzeug import check_password_hash, generate_password_hash
from utils import safe_pickle_dump, strip_version, isvalidid, Config
# various globals
# -----------------------------------------------------------------------------
# database configuration
if os.path.isfile('secret_key.txt'):
SECRET_KEY = open('secret_key.txt', 'r').read()
else:
SECRET_KEY = 'devkey, should be in a file'
app = Flask(__name__)
app.config.from_object(__name__)
limiter = Limiter(app, global_limits=["100 per hour", "20 per minute"])
SEARCH_DICT = {}
# -----------------------------------------------------------------------------
# utilities for database interactions
# -----------------------------------------------------------------------------
# to initialize the database: sqlite3 as.db < schema.sql
def connect_db():
sqlite_db = sqlite3.connect(Config.database_path)
sqlite_db.row_factory = sqlite3.Row # to return dicts rather than tuples
return sqlite_db
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
def get_username(user_id):
"""Convenience method to look up the username for a user."""
rv = query_db('select username from user where user_id = ?',
[user_id], one=True)
return rv[0] if rv else None
# -----------------------------------------------------------------------------
# connection handlers
# -----------------------------------------------------------------------------
@app.before_request
def before_request():
# this will always request database connection, even if we dont end up using it ;\
g.db = connect_db()
# retrieve user object from the database if user_id is set
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# -----------------------------------------------------------------------------
# search/sort functionality
# -----------------------------------------------------------------------------
def date_sort():
scores = []
for pid,p in db.items():
timestruct = dateutil.parser.parse(p['updated'])
p['time_updated'] = int(timestruct.strftime("%s")) # store in struct for future convenience
timestruct = dateutil.parser.parse(p['published'])
p['time_published'] = int(timestruct.strftime("%s")) # store in struct for future convenience
scores.append((p['time_updated'], p))
scores.sort(reverse=True, key=lambda x: x[0])
out = [sp[1] for sp in scores]
return out
def papers_search(qraw):
qparts = qraw.lower().strip().split() # split by spaces
# use reverse index and accumulate scores
scores = []
for pid,p in db.items():
score = sum(SEARCH_DICT[pid].get(q,0) for q in qparts)
if score == 0:
continue # no match whatsoever, dont include
# give a small boost to more recent papers
score += 0.0001*p['tscore']
scores.append((score, p))
scores.sort(reverse=True, key=lambda x: x[0]) # descending
out = [x[1] for x in scores if x[0] > 0]
return out
def papers_similar(pid):
rawpid = strip_version(pid)
# check if we have this paper at all, otherwise return empty list
if not rawpid in db:
return []
# check if we have distances to this specific version of paper id (includes version)
if pid in sim_dict:
# good, simplest case: lets return the papers
return [db[strip_version(k)] for k in sim_dict[pid]]
else:
# ok we don't have this specific version. could be a stale URL that points to,
# e.g. v1 of a paper, but due to an updated version of it we only have v2 on file
# now. We want to use v2 in that case.
# lets try to retrieve the most recent version of this paper we do have
kok = [k for k in sim_dict if rawpid in k]
if kok:
# ok we have at least one different version of this paper, lets use it instead
id_use_instead = kok[0]
return [db[strip_version(k)] for k in sim_dict[id_use_instead]]
else:
# return just the paper. we dont have similarities for it for some reason
return [db[rawpid]]
def papers_from_library():
out = []
if g.user:
# user is logged in, lets fetch their saved library data
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = [strip_version(x['paper_id']) for x in user_library]
out = [db[x] for x in libids]
out = sorted(out, key=lambda k: k['updated'], reverse=True)
return out
def papers_from_svm(recent_days=None):
out = []
if g.user:
uid = session['user_id']
if not uid in user_sim:
return []
# we want to exclude papers that are already in user library from the result, so fetch them.
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = {strip_version(x['paper_id']) for x in user_library}
plist = user_sim[uid]
out = [db[x] for x in plist if not x in libids]
if recent_days is not None:
# filter as well to only most recent papers
curtime = int(time.time()) # in seconds
out = [x for x in out if curtime - x['time_published'] < recent_days*24*60*60]
return out
def papers_filter_version(papers, v):
if v != '1':
return papers # noop
intv = int(v)
filtered = [p for p in papers if p['_version'] == intv]
return filtered
def encode_json(ps, n=10, send_images=True, send_abstracts=True):
libids = set()
if g.user:
# user is logged in, lets fetch their saved library data
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = {strip_version(x['paper_id']) for x in user_library}
ret = []
for i in range(min(len(ps),n)):
p = ps[i]
idvv = '%sv%d' % (p['_rawid'], p['_version'])
struct = {}
struct['title'] = p['title']
struct['pid'] = idvv
struct['category'] = p['arxiv_primary_category']['term']
struct['authors'] = [a['name'] for a in p['authors']]
struct['link'] = p['link']
struct['in_library'] = 1 if p['_rawid'] in libids else 0
if send_abstracts:
struct['abstract'] = p['summary']
if send_images:
struct['img'] = '/static/thumbs/' + idvv + '.pdf.jpg'
struct['tags'] = [t['term'] for t in p['tags']]
timestruct = dateutil.parser.parse(p['updated'])
struct['published_time'] = '%s/%s/%s' % (timestruct.month, timestruct.day, timestruct.year)
timestruct = dateutil.parser.parse(p['published'])
struct['originally_published_time'] = '%s/%s/%s' % (timestruct.month, timestruct.day, timestruct.year)
cc = p.get('arxiv_comment', '')
if len(cc) > 100:
cc = cc[:100] + '...' # crop very long comments
struct['comment'] = cc
ret.append(struct)
return ret
# -----------------------------------------------------------------------------
# flask request handling
# -----------------------------------------------------------------------------
def default_context(papers, **kws):
top_papers = encode_json(papers, args.num_results)
ans = dict(papers=top_papers, numresults=len(papers), totpapers=len(db), msg='')
ans.update(kws)
return ans
@app.route("/")
def intmain():
vstr = request.args.get('vfilter', 'all')
papers = DATE_SORTED_PAPERS # precomputed
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='recent',
msg='Showing most recent Arxiv papers:')
return render_template('main.html', **ctx)
@app.route("/<request_pid>")
def rank(request_pid=None):
if not isvalidid(request_pid):
return '' # these are requests for icons, things like robots.txt, etc
papers = papers_similar(request_pid)
ctx = default_context(papers, render_format='paper')
return render_template('main.html', **ctx)
@app.route("/search", methods=['GET'])
def search():
q = request.args.get('q', '') # get the search request
papers = papers_search(q) # perform the query and get sorted documents
ctx = default_context(papers, render_format="search")
return render_template('main.html', **ctx)
@app.route('/recommend', methods=['GET'])
def recommend():
""" return user's svm sorted list """
ttstr = request.args.get('timefilter', 'week') # default is week
vstr = request.args.get('vfilter', 'all') # default is all (no filter)
legend = {'day':1, '3days':3, 'week':7, 'month':30, 'year':365}
tt = legend.get(ttstr, None)
papers = papers_from_svm(recent_days=tt)
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='recommend',
msg='Recommended papers: (based on SVM trained on tfidf of papers in your library, refreshed every day or so)' if g.user else 'You must be logged in and have some papers saved in your library.')
return render_template('main.html', **ctx)
@app.route('/top', methods=['GET'])
def top():
""" return top papers """
ttstr = request.args.get('timefilter', 'week') # default is week
vstr = request.args.get('vfilter', 'all') # default is all (no filter)
legend = {'day':1, '3days':3, 'week':7, 'month':30, 'year':365, 'alltime':10000}
tt = legend.get(ttstr, 7)
curtime = int(time.time()) # in seconds
papers = [p for p in TOP_SORTED_PAPERS if curtime - p['time_published'] < tt*24*60*60]
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='top',
msg='Top papers based on people\'s libraries:')
return render_template('main.html', **ctx)
@app.route('/toptwtr', methods=['GET'])
def toptwtr():
""" return top papers """
papers = TWITTER_TOP
ctx = default_context(papers, render_format='toptwtr',
msg='Top papers mentioned on Twitter over last 5 days:')
return render_template('main.html', **ctx)
@app.route('/library')
def library():
""" render user's library """
papers = papers_from_library()
ret = encode_json(papers, 500) # cap at 500 papers in someone's library. that's a lot!
if g.user:
msg = '%d papers in your library:' % (len(ret), )
else:
msg = 'You must be logged in. Once you are, you can save papers to your library (with the save icon on the right of each paper) and they will show up here.'
ctx = default_context(papers, render_format='library', msg=msg)
return render_template('main.html', **ctx)
@app.route('/libtoggle', methods=['POST'])
def review():
""" user wants to toggle a paper in his library """
# make sure user is logged in
if not g.user:
return 'NO' # fail... (not logged in). JS should prevent from us getting here.
idvv = request.form['pid'] # includes version
if not isvalidid(idvv):
return 'NO' # fail, malformed id. weird.
pid = strip_version(idvv)
if not pid in db:
return 'NO' # we don't know this paper. wat
uid = session['user_id'] # id of logged in user
# check this user already has this paper in library
record = query_db('''select * from library where
user_id = ? and paper_id = ?''', [uid, pid], one=True)
print(record)
ret = 'NO'
if record:
# record exists, erase it.
g.db.execute('''delete from library where user_id = ? and paper_id = ?''', [uid, pid])
g.db.commit()
#print('removed %s for %s' % (pid, uid))
ret = 'OFF'
else:
# record does not exist, add it.
rawpid = strip_version(pid)
g.db.execute('''insert into library (paper_id, user_id, update_time) values (?, ?, ?)''',
[rawpid, uid, int(time.time())])
g.db.commit()
#print('added %s for %s' % (pid, uid))
ret = 'ON'
return ret
@app.route('/login', methods=['POST'])
def login():
""" logs in the user. if the username doesn't exist creates the account """
if not request.form['username']:
flash('You have to enter a username')
elif not request.form['password']:
flash('You have to enter a password')
elif get_user_id(request.form['username']) is not None:
# username already exists, fetch all of its attributes
user = query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if check_password_hash(user['pw_hash'], request.form['password']):
# password is correct, log in the user
session['user_id'] = get_user_id(request.form['username'])
flash('User ' + request.form['username'] + ' logged in.')
else:
# incorrect password
flash('User ' + request.form['username'] + ' already exists, wrong password.')
else:
# create account and log in
creation_time = int(time.time())
g.db.execute('''insert into user (username, pw_hash, creation_time) values (?, ?, ?)''',
[request.form['username'],
generate_password_hash(request.form['password']),
creation_time])
user_id = g.db.execute('select last_insert_rowid()').fetchall()[0][0]
g.db.commit()
session['user_id'] = user_id
flash('New account %s created' % (request.form['username'], ))
return redirect(url_for('intmain'))
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('You were logged out')
return redirect(url_for('intmain'))
# -----------------------------------------------------------------------------
# int main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prod', dest='prod', action='store_true', help='run in prod?')
parser.add_argument('-r', '--num_results', dest='num_results', type=int, default=200, help='number of results to return per query')
parser.add_argument('--port', dest='port', type=int, default=5000, help='port to serve on')
args = parser.parse_args()
print(args)
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
print('loading tfidf_meta', Config.meta_path)
meta = pickle.load(open(Config.meta_path, "rb"))
vocab = meta['vocab']
idf = meta['idf']
print('loading paper similarities', Config.sim_path)
sim_dict = pickle.load(open(Config.sim_path, "rb"))
print('loading user recommendations', Config.user_sim_path)
if os.path.isfile(Config.user_sim_path):
user_sim = pickle.load(open(Config.user_sim_path, 'rb'))
else:
user_sim = {}
print('loading twitter top', Config.tweet_path)
if os.path.isfile(Config.tweet_path):
TWITTER_TOP = pickle.load(open(Config.tweet_path, 'rb'))
TWITTER_TOP = [db[pid] for count,pid in TWITTER_TOP]
else:
TWITTER_TOP = []
print('precomputing papers date sorted...')
DATE_SORTED_PAPERS = date_sort()
if not os.path.isfile(Config.database_path):
print('did not find as.db, trying to create an empty database from schema.sql...')
print('this needs sqlite3 to be installed!')
os.system('sqlite3 as.db < schema.sql')
# compute top papers in peoples' libraries
print('computing top papers...')
def get_popular():
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
libs = sqldb.execute('''select * from library''').fetchall()
counts = {}
for lib in libs:
pid = lib['paper_id']
counts[pid] = counts.get(pid, 0) + 1
return counts
top_counts = get_popular()
top_paper_counts = sorted([(v,k) for k,v in top_counts.items() if v > 0], reverse=True)
print(top_paper_counts[:min(30, len(top_paper_counts))])
TOP_SORTED_PAPERS = [db[q[1]] for q in top_paper_counts]
# compute min and max time for all papers
tts = [time.mktime(dateutil.parser.parse(p['updated']).timetuple()) for pid,p in db.items()]
ttmin = min(tts)*1.0
ttmax = max(tts)*1.0
for pid,p in db.items():
tt = time.mktime(dateutil.parser.parse(p['updated']).timetuple())
p['tscore'] = (tt-ttmin)/(ttmax-ttmin)
# some utilities for creating a search index for faster search
punc = "'!\"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~'" # removed hyphen from string.punctuation
trans_table = {ord(c): None for c in punc}
def makedict(s, forceidf=None, scale=1.0):
words = set(s.lower().translate(trans_table).strip().split())
out = {}
for w in words: # todo: if we're using bigrams in vocab then this won't search over them
if forceidf is None:
if w in vocab:
# we have idf for this
idfval = idf[vocab[w]]*scale
else:
idfval = 1.0*scale # assume idf 1.0 (low)
else:
idfval = forceidf
out[w] = idfval
return out
def merge_dicts(dlist):
out = {}
for d in dlist:
for k,v in d.items():
out[k] = out.get(k,0) + v
return out
# caching: check if db.p is younger than search_dict.p
recompute_index = True
if os.path.isfile(Config.search_dict_path):
db_modified_time = os.path.getmtime(Config.db_path)
search_modified_time = os.path.getmtime(Config.search_dict_path)
if search_modified_time > db_modified_time:
# search index exists and is more recent, no need
recompute_index = False
if recompute_index:
print('building an index for faster search...')
for pid in db:
p = db[pid]
dict_title = makedict(p['title'], forceidf=5, scale=3)
dict_authors = makedict(' '.join(x['name'] for x in p['authors']), forceidf=5)
dict_categories = {x['term'].lower():5 for x in p['tags']}
if 'and' in dict_authors:
# special case for "and" handling in authors list
del dict_authors['and']
dict_summary = makedict(p['summary'])
SEARCH_DICT[pid] = merge_dicts([dict_title, dict_authors, dict_categories, dict_summary])
# and cache it in file
print('writing ', Config.search_dict_path, ' as cache...')
safe_pickle_dump(SEARCH_DICT, Config.search_dict_path)
else:
print('loading cached index for faster search from', Config.search_dict_path)
SEARCH_DICT = pickle.load(open(Config.search_dict_path, 'rb'))
# start
if args.prod:
# run on Tornado instead, since running raw Flask in prod is not recommended
print('starting tornado!')
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
enable_pretty_logging()
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port)
IOLoop.instance().start()
else:
print('starting flask!')
app.debug = True
app.run(port=args.port)
|
"""
Queries arxiv API and downloads papers (the query is a parameter).
The script is intended to enrich an existing database pickle (by default db.p),
so this file will be loaded first, and then new results will be added to it.
"""
import os
import time
import pickle
import random
import argparse
import urllib.request
import feedparser
from utils import Config, safe_pickle_dump
def encode_feedparser_dict(d):
"""
helper function to get rid of feedparser bs with a deep copy.
I hate when libs wrap simple things in their own classes.
"""
if isinstance(d, feedparser.FeedParserDict) or isinstance(d, dict):
j = {}
for k in d.keys():
j[k] = encode_feedparser_dict(d[k])
return j
elif isinstance(d, list):
l = []
for k in d:
l.append(encode_feedparser_dict(k))
return l
else:
return d
def parse_arxiv_url(url):
"""
examples is http://arxiv.org/abs/1512.08756v2
we want to extract the raw id and the version
"""
ix = url.rfind('/')
idversion = j['id'][ix+1:] # extract just the id (and the version)
parts = idversion.split('v')
assert len(parts) == 2, 'error parsing url ' + url
return parts[0], int(parts[1])
if __name__ == "__main__":
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--search-query', type=str,
default='cat:cs.CV+OR+cat:cs.AI+OR+cat:cs.LG+OR+cat:cs.CL+OR+cat:cs.NE+OR+cat:stat.ML',
help='query used for arxiv API. See http://arxiv.org/help/api/user-manual#detailed_examples')
parser.add_argument('--start-index', type=int, default=0, help='0 = most recent API result')
parser.add_argument('--max-index', type=int, default=10000, help='upper bound on paper index we will fetch')
parser.add_argument('--results-per-iteration', type=int, default=100, help='passed to arxiv API')
parser.add_argument('--wait-time', type=float, default=5.0, help='lets be gentle to arxiv API (in number of seconds)')
parser.add_argument('--break-on-no-added', type=int, default=1, help='break out early if all returned query papers are already in db? 1=yes, 0=no')
args = parser.parse_args()
# misc hardcoded variables
base_url = 'http://export.arxiv.org/api/query?' # base api query url
print('Searching arXiv for %s' % (args.search_query, ))
# lets load the existing database to memory
try:
db = pickle.load(open(Config.db_path, 'rb'))
except Exception as e:
print('error loading existing database:')
print(e)
print('starting from an empty database')
db = {}
# -----------------------------------------------------------------------------
# main loop where we fetch the new results
print('database has %d entries at start' % (len(db), ))
num_added_total = 0
for i in range(args.start_index, args.max_index, args.results_per_iteration):
print("Results %i - %i" % (i,i+args.results_per_iteration))
query = 'search_query=%s&sortBy=lastUpdatedDate&start=%i&max_results=%i' % (args.search_query,
i, args.results_per_iteration)
with urllib.request.urlopen(base_url+query) as url:
response = url.read()
parse = feedparser.parse(response)
num_added = 0
num_skipped = 0
for e in parse.entries:
j = encode_feedparser_dict(e)
# extract just the raw arxiv id and version for this paper
rawid, version = parse_arxiv_url(j['id'])
j['_rawid'] = rawid
j['_version'] = version
# add to our database if we didn't have it before, or if this is a new version
if not rawid in db or j['_version'] > db[rawid]['_version']:
db[rawid] = j
print('Updated %s added %s' % (j['updated'].encode('utf-8'), j['title'].encode('utf-8')))
num_added += 1
num_added_total += 1
else:
num_skipped += 1
# print some information
print('Added %d papers, already had %d.' % (num_added, num_skipped))
if len(parse.entries) == 0:
print('Received no results from arxiv. Rate limiting? Exiting. Restart later maybe.')
print(response)
break
if num_added == 0 and args.break_on_no_added == 1:
print('No new papers were added. Assuming no new papers exist. Exiting.')
break
print('Sleeping for %i seconds' % (args.wait_time , ))
time.sleep(args.wait_time + random.uniform(0, 3))
# save the database before we quit, if we found anything new
if num_added_total > 0:
print('Saving database with %d papers to %s' % (len(db), Config.db_path))
safe_pickle_dump(db, Config.db_path)
|
"""
Use imagemagick to convert all pfds to a sequence of thumbnail images
requires: sudo apt-get install imagemagick
"""
import os
import time
import shutil
from subprocess import Popen
from utils import Config
# make sure imagemagick is installed
if not shutil.which('convert'): # shutil.which needs Python 3.3+
print("ERROR: you don\'t have imagemagick installed. Install it first before calling this script")
sys.exit()
# create if necessary the directories we're using for processing and output
pdf_dir = os.path.join('data', 'pdf')
if not os.path.exists(Config.thumbs_dir): os.makedirs(Config.thumbs_dir)
if not os.path.exists(Config.tmp_dir): os.makedirs(Config.tmp_dir)
# fetch all pdf filenames in the pdf directory
files_in_pdf_dir = os.listdir(pdf_dir)
pdf_files = [x for x in files_in_pdf_dir if x.endswith('.pdf')] # filter to just pdfs, just in case
# iterate over all pdf files and create the thumbnails
for i,p in enumerate(pdf_files):
pdf_path = os.path.join(pdf_dir, p)
thumb_path = os.path.join(Config.thumbs_dir, p + '.jpg')
if os.path.isfile(thumb_path):
print("skipping %s, thumbnail already exists." % (pdf_path, ))
continue
print("%d/%d processing %s" % (i, len(pdf_files), p))
# take first 8 pages of the pdf ([0-7]), since 9th page are references
# tile them horizontally, use JPEG compression 80, trim the borders for each image
#cmd = "montage %s[0-7] -mode Concatenate -tile x1 -quality 80 -resize x230 -trim %s" % (pdf_path, "thumbs/" + f + ".jpg")
#print "EXEC: " + cmd
# nvm, below using a roundabout alternative that is worse and requires temporary files, yuck!
# but i found that it succeeds more often. I can't remember wha thappened anymore but I remember
# that the version above, while more elegant, had some problem with it on some pdfs. I think.
# erase previous intermediate files thumb-*.png in the tmp directory
if os.path.isfile(os.path.join(Config.tmp_dir, 'thumb-0.png')):
for i in range(8):
f = os.path.join(Config.tmp_dir, 'thumb-%d.png' % (i,))
f2= os.path.join(Config.tmp_dir, 'thumbbuf-%d.png' % (i,))
if os.path.isfile(f):
cmd = 'mv %s %s' % (f, f2)
os.system(cmd)
# okay originally I was going to issue an rm call, but I am too terrified of
# running scripted rm queries, so what we will do is instead issue a "mv" call
# to rename the files. That's a bit safer, right? We have to do this because if
# some papers are shorter than 8 pages, then results from previous paper will
# "leek" over to this result, through the intermediate files.
# spawn async. convert can unfortunately enter an infinite loop, have to handle this.
# this command will generate 8 independent images thumb-0.png ... thumb-7.png of the thumbnails
pp = Popen(['convert', '%s[0-7]' % (pdf_path, ), '-thumbnail', 'x156', os.path.join(Config.tmp_dir, 'thumb.png')])
t0 = time.time()
while time.time() - t0 < 20: # give it 15 seconds deadline
ret = pp.poll()
if not (ret is None):
# process terminated
break
time.sleep(0.1)
ret = pp.poll()
if ret is None:
print("convert command did not terminate in 20 seconds, terminating.")
pp.terminate() # give up
if not os.path.isfile(os.path.join(Config.tmp_dir, 'thumb-0.png')):
# failed to render pdf, replace with missing image
missing_thumb_path = os.path.join('static', 'missing.jpg')
os.system('cp %s %s' % (missing_thumb_path, thumb_path))
print("could not render pdf, creating a missing image placeholder")
else:
cmd = "montage -mode concatenate -quality 80 -tile x1 %s %s" % (os.path.join(Config.tmp_dir, 'thumb-*.png'), thumb_path)
print(cmd)
os.system(cmd)
time.sleep(0.01) # silly way for allowing for ctrl+c termination
|
from contextlib import contextmanager
import os
import re
import pickle
import tempfile
# global settings
# -----------------------------------------------------------------------------
class Config(object):
# main paper information repo file
db_path = 'db.p'
# intermediate processing folders
pdf_dir = os.path.join('data', 'pdf')
txt_dir = os.path.join('data', 'txt')
thumbs_dir = os.path.join('static', 'thumbs')
# intermediate pickles
tfidf_path = 'tfidf.p'
meta_path = 'tfidf_meta.p'
sim_path = 'sim_dict.p'
user_sim_path = 'user_sim.p'
tweet_path = 'twitter.p' # written by twitter_daemon.py
# sql database file
database_path = 'as.db'
search_dict_path = 'search_dict.p'
tmp_dir = 'tmp'
# Context managers for atomic writes courtesy of
# http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python
@contextmanager
def _tempfile(*args, **kws):
""" Context for temporary file.
Will find a free temporary filename upon entering
and will try to delete the file on leaving
Parameters
----------
suffix : string
optional file suffix
"""
fd, name = tempfile.mkstemp(*args, **kws)
os.close(fd)
try:
yield name
finally:
try:
os.remove(name)
except OSError as e:
if e.errno == 2:
pass
else:
raise e
@contextmanager
def open_atomic(filepath, *args, **kwargs):
""" Open temporary file object that atomically moves to destination upon
exiting.
Allows reading and writing to and from the same filename.
Parameters
----------
filepath : string
the file path to be opened
fsync : bool
whether to force write the file to disk
kwargs : mixed
Any valid keyword arguments for :code:`open`
"""
fsync = kwargs.pop('fsync', False)
with _tempfile(dir=os.path.dirname(filepath)) as tmppath:
with open(tmppath, *args, **kwargs) as f:
yield f
if fsync:
f.flush()
os.fsync(file.fileno())
os.rename(tmppath, filepath)
def safe_pickle_dump(obj, fname):
with open_atomic(fname, 'wb') as f:
pickle.dump(obj, f, -1)
# arxiv utils
# -----------------------------------------------------------------------------
def strip_version(idstr):
""" identity function if arxiv id has no version, otherwise strips it. """
parts = idstr.split('v')
return parts[0]
# "1511.08198v1" is an example of a valid arxiv id that we accept
def isvalidid(pid):
return re.match('^\d+\.\d+(v\d+)?$', pid)
|
import re
import pytz
import time
import pickle
import datetime
from dateutil import parser
import twitter # pip install python-twitter
from utils import Config, safe_pickle_dump
sleep_time = 60*10 # in seconds
max_days_keep = 5 # max number of days to keep a tweet in memory
def get_db_pids():
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
# I know this looks weird, but I don't trust dict_keys to be efficient with "in" operator.
# I also don't trust it to keep some reference to the whole dict, as I'm hoping db here deallocates.
# Can't find good docs here
pid_dict = {p:1 for p in db}
return pid_dict
def get_keys():
lines = open('twitter.txt', 'r').read().splitlines()
return lines
# authenticate
keys = get_keys()
api = twitter.Api(consumer_key=keys[0],
consumer_secret=keys[1],
access_token_key=keys[2],
access_token_secret=keys[3])
print(api.VerifyCredentials())
def extract_arxiv_pids(r):
pids = []
for u in r.urls:
m = re.search('arxiv.org/abs/(.+)', u.expanded_url)
if m:
rawid = m.group(1)
pids.append(rawid)
return pids
db_pids = get_db_pids()
seen = {}
epochd = datetime.datetime(1970,1,1,tzinfo=pytz.utc) # time of epoch
while True:
try:
results = api.GetSearch(raw_query="q=arxiv.org&result_type=recent&count=100")
ok = True
except Exception as e:
print('there was some problem:')
print(e)
time.sleep(sleep_time)
continue
tnow = time.time()
num_processed = 0
parsed = []
for r in results:
arxiv_pids = extract_arxiv_pids(r)
arxiv_pids = [p for p in arxiv_pids if p in db_pids] # filter to those that are in our paper db
if not arxiv_pids: continue # nothing relevant here, lets move on
if r.id in seen: continue # skip, already saw and recorded
seen[r.id] = {'seen':tnow} # mark as seen at this time
num_processed += 1
# collect all arxiv paper ids from valid urls
seen[r.id]['pids'] = arxiv_pids
# parse & records time of this tweet
d = parser.parse(r.created_at)
time_posted = (d - epochd).total_seconds()
seen[r.id]['time_posted'] = time_posted
print('processed %d/%d new tweets. Currently maintaining total %d' % (num_processed, len(results), len(seen)))
# maintain state: if something was seen > few days ago, forget it
maxdt = 60*60*24*max_days_keep
seen_new = { tweetid:d for tweetid,d in seen.items() if tnow - d['time_posted'] < maxdt }
print('previous seen dict had %d tweets, pruning to %d' % (len(seen), len(seen_new)))
seen = seen_new # swap
# compile all votes and write output for serving
votes = {}
for tweetid,d in seen.items():
for pid in d['pids']:
votes[pid] = votes.get(pid, 0) + 1
votes = [(v,k) for k,v in votes.items()]
votes.sort(reverse=True, key=lambda x: x[0]) # descending
print('top votes', votes[:min(len(votes), 10)])
print('writing', Config.tweet_path)
safe_pickle_dump(votes, Config.tweet_path)
# and sleep for a while
print('sleeping', sleep_time)
time.sleep(sleep_time)
|
import os
import time
import pickle
import shutil
import random
from urllib.request import urlopen
from utils import Config
timeout_secs = 10 # after this many seconds we give up on a paper
if not os.path.exists(Config.pdf_dir): os.makedirs(Config.pdf_dir)
have = set(os.listdir(Config.pdf_dir)) # get list of all pdfs we already have
numok = 0
numtot = 0
db = pickle.load(open(Config.db_path, 'rb'))
for pid,j in db.items():
pdfs = [x['href'] for x in j['links'] if x['type'] == 'application/pdf']
assert len(pdfs) == 1
pdf_url = pdfs[0] + '.pdf'
basename = pdf_url.split('/')[-1]
fname = os.path.join(Config.pdf_dir, basename)
# try retrieve the pdf
numtot += 1
try:
if not basename in have:
print('fetching %s into %s' % (pdf_url, fname))
req = urlopen(pdf_url, None, timeout_secs)
with open(fname, 'wb') as fp:
shutil.copyfileobj(req, fp)
time.sleep(0.05 + random.uniform(0,0.1))
else:
print('%s exists, skipping' % (fname, ))
numok+=1
except Exception as e:
print('error downloading: ', pdf_url)
print(e)
print('%d/%d of %d downloaded ok.' % (numok, numtot, len(db)))
print('final number of papers downloaded okay: %d/%d' % (numok, len(db)))
|
"""
Reads txt files of all papers and computes tfidf vectors for all papers.
Dumps results to file tfidf.p
"""
import os
import pickle
from random import shuffle, seed
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from utils import Config, safe_pickle_dump
seed(1337)
max_train = 10000 # max number of tfidf training documents (chosen randomly), for memory efficiency
# read database
db = pickle.load(open(Config.db_path, 'rb'))
# read all text files for all papers into memory
txt_paths, pids = [], []
n = 0
for pid,j in db.items():
n += 1
idvv = '%sv%d' % (j['_rawid'], j['_version'])
txt_path = os.path.join('data', 'txt', idvv) + '.pdf.txt'
if os.path.isfile(txt_path): # some pdfs dont translate to txt
with open(txt_path, 'r') as f:
txt = f.read()
if len(txt) > 1000 and len(txt) < 500000: # 500K is VERY conservative upper bound
txt_paths.append(txt_path) # todo later: maybe filter or something some of them
pids.append(idvv)
print("read %d/%d (%s) with %d chars" % (n, len(db), idvv, len(txt)))
else:
print("skipped %d/%d (%s) with %d chars: suspicious!" % (n, len(db), idvv, len(txt)))
else:
print("could not find %s in txt folder." % (txt_path, ))
print("in total read in %d text files out of %d db entries." % (len(txt_paths), len(db)))
# compute tfidf vectors with scikits
v = TfidfVectorizer(input='content',
encoding='utf-8', decode_error='replace', strip_accents='unicode',
lowercase=True, analyzer='word', stop_words='english',
token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b',
ngram_range=(1, 2), max_features = 10000,
norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True,
max_df=1.0, min_df=1)
# create an iterator object to conserve memory
def make_corpus(paths):
for p in paths:
with open(p, 'r') as f:
txt = f.read()
yield txt
# train
train_txt_paths = list(txt_paths) # duplicate
shuffle(train_txt_paths) # shuffle
train_txt_paths = train_txt_paths[:min(len(train_txt_paths), max_train)] # crop
print("training on %d documents..." % (len(train_txt_paths), ))
train_corpus = make_corpus(train_txt_paths)
v.fit(train_corpus)
# transform
print("transforming %d documents..." % (len(txt_paths), ))
corpus = make_corpus(txt_paths)
X = v.transform(corpus)
print(v.vocabulary_)
print(X.shape)
# write full matrix out
out = {}
out['X'] = X # this one is heavy!
print("writing", Config.tfidf_path)
safe_pickle_dump(out, Config.tfidf_path)
# writing lighter metadata information into a separate (smaller) file
out = {}
out['vocab'] = v.vocabulary_
out['idf'] = v._tfidf.idf_
out['pids'] = pids # a full idvv string (id and version number)
out['ptoi'] = { x:i for i,x in enumerate(pids) } # pid to ix in X mapping
print("writing", Config.meta_path)
safe_pickle_dump(out, Config.meta_path)
print("precomputing nearest neighbor queries in batches...")
X = X.todense() # originally it's a sparse matrix
sim_dict = {}
batch_size = 200
for i in range(0,len(pids),batch_size):
i1 = min(len(pids), i+batch_size)
xquery = X[i:i1] # BxD
ds = -np.asarray(np.dot(X, xquery.T)) #NxD * DxB => NxB
IX = np.argsort(ds, axis=0) # NxB
for j in range(i1-i):
sim_dict[pids[i+j]] = [pids[q] for q in list(IX[:50,j])]
print('%d/%d...' % (i, len(pids)))
print("writing", Config.sim_path)
safe_pickle_dump(sim_dict, Config.sim_path)
|
from setuptools import setup, find_packages
from io import open
import versioneer
DESCRIPTION = (
"ANANSE: Prediction of key transcription factors in cell fate "
"determination using enhancer networks"
)
with open("README.md", encoding="utf-8") as f:
long_description = f.read().strip("\n")
setup(
name="ananse",
version=versioneer.get_version(),
long_description=long_description,
long_description_content_type="text/markdown",
description=DESCRIPTION,
author="Quan Xu",
author_email="[email protected]",
url="https://github.com/vanheeringen-lab/ananse/",
download_url="https://github.com/vanheeringen-lab/ananse/"
+ versioneer.get_version(),
license="MIT",
packages=find_packages(),
scripts=["scripts/ananse"],
include_package_data=True,
zip_safe=False, # This is necessary, otherwise files won't be installed
classifiers=[
"Development Status :: 4 Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
install_requires=[
"setuptools >= 0.7",
"adjusttext",
"dask",
"gimmemotifs >=0.15.1",
"loguru",
"networkx",
"numpy",
"openpyxl",
"pandas",
"scipy",
"scikit-learn",
"tables",
"genomepy >= 0.9.3",
"pyranges",
],
)
|
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
import configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
import urllib
import pandas as pd
import numpy as np
import re
import sys
import os
from loguru import logger
import ananse
logger.remove()
logger.add(
sys.stderr, format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | {level} | {message}"
)
TFP_URL = "https://maayanlab.cloud/Enrichr/geneSetLibrary?mode=text&libraryName=TF_Perturbations_Followed_by_Expression"
TRRUST_URL = "https://www.grnpedia.org/trrust/data/trrust_rawdata.human.tsv"
MSIGDB_URL = "https://data.broadinstitute.org/gsea-msigdb/msigdb/release/7.4/c3.all.v7.4.symbols.gmt"
def download_trrust_reference(outfile):
edges = []
with urllib.request.urlopen(
TRRUST_URL,
) as f:
for line in f.readlines():
tf, target, regtype, pmid = line.decode().strip().split("\t")
# Just skip repression for now
if regtype in ["Activation", "Unknown"]:
edges.append([tf, target, 1])
edges = pd.DataFrame(edges, columns=["tf", "target", "interaction"])
edges.to_csv(outfile, sep="\t", index=False)
def download_msigdb_reference(outfile):
with urllib.request.urlopen(MSIGDB_URL) as gmt, open(outfile, "w") as fl1:
for line in gmt:
a = line.decode("utf-8").split()
tf = a[0].split("_")[0]
targets = a[2:]
for target in targets:
fl1.write(f"{tf}\t{target}\n")
def fix_columns(df):
"""Make sure network has a tf and a target column."""
df.columns = df.columns.str.lower()
df = df.rename(
columns={
"source": "tf",
"source_target": "tf_target",
"target_gene": "target",
}
)
if "tf_target" in df.columns:
df[["tf", "target"]] = df["tf_target"].str.split("_", expand=True).iloc[:, :2]
df = df.drop(columns=["tf_target"])
if "tf" not in df.columns:
raise ValueError("Expect a column named 'source' or 'tf'")
if "target" not in df.columns:
raise ValueError("Expect a column named 'target' or 'target_gene'")
return df
def prepare_reference_network(network, filter_tfs=True):
"""Generate reference network.
This network contains all possible edges, based on the TFs
and the target genes in the input. TFs are optionally filtered
to contain only validated TFs.
Returns
-------
DataFrame with column `"interaction"` having 1 for a validated
edge and 0 otherwise.
"""
if isinstance(network, pd.DataFrame):
df = network.reset_index()
elif isinstance(network, str):
if network.endswith("feather"):
df = pd.read_feather(network)
else:
df = pd.read_table(network)
else:
raise ValueError("Unknown network type, need DataFrame or filename.")
df = fix_columns(df)
interaction_column = None
for col in df.columns:
if col in ["tf", "target"]:
continue
vals = df[col].unique()
if len(vals) in [1, 2] and 1 in vals:
interaction_column = col
break
tfs = set(df["tf"].unique())
if filter_tfs:
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
targets = df["target"].unique()
# logger.info(
# f"{os.path.split(network)[-1]} reference - {len(tfs)} TFs, {len(targets)} targets, {df.shape[0]} edges."
# )
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
if interaction_column is not None:
logger.info(f"Using '{interaction_column}' as interaction column.")
df = df.set_index(["tf", "target"])[[interaction_column]].rename(
columns={interaction_column: "interaction"}
)
else:
logger.info("No column with 1 found, assuming all lines are positive edges.")
df = df.set_index(["tf", "target"])
df["interaction"] = 1
return total.join(df[["interaction"]]).fillna(0)
def _read_dorothea_reference(fname):
dorothea = pd.read_table(fname)
cols = [
"is_evidence_chip_seq",
"is_evidence_curated",
"is_evidence_inferred",
"is_evidence_tfbs",
]
dorothea = dorothea.set_index(["tf", "target"])[cols]
for col in cols:
dorothea[col] = dorothea[col].astype(int)
dorothea["dorothea"] = np.any(dorothea[cols] == 1, 1).astype(int)
dorothea = dorothea.reset_index()
tfs = set(dorothea["tf"].unique())
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
targets = dorothea["target"].unique()
logger.info(
f"Dorothea reference - {len(tfs)} TFs, {len(targets)} targets, {dorothea.shape[0]} edges."
)
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
dorothea = dorothea.set_index(["tf", "target"])
dorothea = total.join(dorothea)
dorothea = dorothea.fillna(0)
return dorothea
def _read_enrichr_perturbation_reference(fname=None):
"""Uses the TF perturbations from Enrichr[1,2] to create reference edges.
Targets are defined by up- or down-regulated gened from the following sets:
Up: INDUCTION, ACTIVATION, OE.
Down: KD, KO, INACTIVATION, DEPLETION, SIRNA, SHRNA, KNOCKOUT, DELETION INHIBITION.
The TF and targets in the DataFrame consists of the Cartesian product of
all TFs and target genes that occur in the set.
Returns
-------
DataFrame with tf-target edges.
References
----------
.. [1] Chen EY, Tan CM, Kou Y, Duan Q, Wang Z, Meirelles GV, Clark NfR, Ma'ayan A.
"Enrichr: interactive and collaborative HTML5 gene list enrichment analysis
tool." BMC Bioinformatics. 2013;128(14)
.. [2] Kuleshov MV, Jones MR, Rouillard AD, Fernandez NF, Duan Q, Wang Z,
Koplev S, Jenkins SL, Jagodnik KM, Lachmann A, McDermott MG, Monteiro CD,
Gundersen GW, Ma'ayan A. "Enrichr: a comprehensive gene set enrichment
analysis web server 2016 update." Nucleic Acids Research. 2016; gkw377.
"""
use_online = False
if fname:
fopen = open(fname)
else:
logger.info(
"No filename provided for TF perturbations, downloading from Enrichr"
)
fopen = urllib.request.urlopen(TFP_URL)
use_online = True
p = re.compile(r"(\w+)\s+(\w+)\s+(.+)\s+(\w+)")
all_info = []
edges = []
with fopen as f:
for line in f:
if use_online:
line = line.decode("utf-8")
vals = line.strip().split("\t")
m = re.search(p, vals[0])
all_info.append(m.groups(0))
if (
m.group(2) in ["INDUCTION", "ACTIVATION", "OE"] and m.group(4) == "UP"
) or (
m.group(2)
in [
"KD",
"KO",
"INACTIVATION",
"DEPLETION",
"SIRNA",
"SHRNA",
"KNOCKOUT",
"DELETION",
"INHIBITION",
]
and m.group(4) == "DOWN"
):
tf = m.group(1)
for target in vals[2:]:
edges.append([tf, target])
all_info = pd.DataFrame(all_info, columns=["tf", "exp", "info", "up_down"])
perturb_df = pd.DataFrame(edges, columns=["tf", "target"])
tfs = set(perturb_df["tf"].unique())
targets = perturb_df["target"].unique()
logger.info(
f"TF perturbation reference - {len(tfs)} TFs, {len(targets)} targets, {perturb_df.shape[0]} edges."
)
perturb_df["experiments"] = 1
perturb_df = perturb_df.groupby(["tf", "target"]).count()
perturb_df["interaction"] = 1
perturb_df.columns = ["perturb_experiments", "perturb_interaction"]
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
perturb_df = total.join(perturb_df).fillna(0)
return perturb_df
def get_tfs():
valid_factors = pd.read_excel(
"https://www.biorxiv.org/content/biorxiv/early/2020/12/07/2020.10.28.359232/DC1/embed/media-1.xlsx",
engine="openpyxl",
sheet_name=1,
)
valid_factors = valid_factors.loc[
valid_factors["Pseudogene"].isnull(), "HGNC approved gene symbol"
].values
valid_factors = [f for f in valid_factors if f != "EP300"]
return valid_factors
def read_network(fname, name=None):
network = fname
if fname.endswith("feather"):
df = pd.read_feather(network)
else:
df = pd.read_table(network)
df = fix_columns(df)
df = df.set_index(["tf", "target"])
# Assuming last column is the edge weight
df = df.iloc[:, [-1]]
if name is not None:
df.columns = [name]
return df
def _read_correlation_reference(network, corCutoff=0.6):
tfs_name = f"{os.path.dirname(ananse.__file__)}/db/tfs.txt"
tfs = pd.read_csv(tfs_name, header=None)[0].tolist()
edb = pd.read_csv(network, sep="\t")
edb["iscorrelation"] = [1 if i > corCutoff else 0 for i in edb["correlationRank"]]
edb[["tf", "target"]] = edb["source_target"].str.split("_", expand=True).iloc[:, :2]
edb = edb.drop(
columns=["source_target", "ocorrelation", "correlation", "correlationRank"]
)
edb = edb[edb.tf.isin(tfs)]
edb = edb.set_index(["tf", "target"])
return edb
def _read_goterm_reference(network, goCutoff=0):
tfs_name = f"{os.path.dirname(ananse.__file__)}/db/tfs.txt"
tfs = pd.read_csv(tfs_name, header=None)[0].tolist()
gdb = pd.read_csv(network, sep="\t", header=None)
gdb["isgo"] = [1 if i > goCutoff else 0 for i in gdb[2]]
gdb = gdb.rename(columns={3: "tf", 1: "target"})
gdb = gdb[gdb.tf.isin(tfs)]
gdb = gdb.drop(columns=[0, 2])
gdb = gdb.set_index(["tf", "target"])
return gdb
def _read_msigdb_reference(network):
msidb = pd.read_csv(network, sep="\t", header=None)
msidb = msidb.rename(columns={0: "tf", 1: "target"})
msidb = msidb.set_index(["tf", "target"])
msidb["interaction"] = 1
return msidb
def _read_regnet_reference(network):
regnet = pd.read_csv(network)
regnet = regnet.rename(
columns={"regulator_symbol": "tf", "target_symbol": "target"}
)
regnet = regnet.set_index(["tf", "target"])
regnet["interaction"] = 1
return regnet[["interaction"]]
def read_reference(name, fname=None):
"""
Valid reference networks (name):
- dorothea
- perturbation
- correlation
- goterm
- msigdb
- regnet
- trrust
"""
if name.lower() == "dorothea":
return _read_dorothea_reference(fname)
if name.lower() == "perturbation":
return prepare_reference_network(_read_enrichr_perturbation_reference(fname))
if name.lower() == "correlation":
return prepare_reference_network(_read_correlation_reference(fname, 0.6))
if name.lower() == "goterm":
return prepare_reference_network(_read_goterm_reference(fname, 0))
if name.lower() == "msigdb":
return prepare_reference_network(_read_msigdb_reference(fname))
if name.lower() == "regnet":
return prepare_reference_network(_read_regnet_reference(fname))
if name.lower() == "trrust":
return prepare_reference_network(fname)
def validate_files(fnames, ignore_missing=False):
file_error = False
for fname in fnames:
if not os.path.exists(fname):
logger.error(f"file {fname} does not exist")
file_error = True
if not ignore_missing and file_error:
raise ValueError("One or more files not found!")
def read_networks(network_dict, ignore_missing=False):
"""Read predicted networks.
Input is a dictionary with name as key and filename as value.
"""
# Validate files first
validate_files(network_dict.values(), ignore_missing=ignore_missing)
df = pd.DataFrame({"tf": [], "target": []}).set_index(["tf", "target"])
for name, fname in network_dict.items():
if os.path.exists(fname):
logger.info(f"Reading {name}")
tmp = read_network(fname, name=name)
logger.info(f"Merging {name}")
df = df.join(tmp, how="outer")
return df
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> master)"
git_full = "18995f01657db5e92d4558eff4c1e81d30ff088e"
git_date = "2021-09-28 10:06:03 +0200"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "ananse-"
cfg.versionfile_source = "ananse/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
from glob import glob
import inspect
import os
import re
import sys
from tempfile import NamedTemporaryFile
from fluff.fluffio import load_heatmap_data
from genomepy import Genome
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import scan_regionfile_to_table
from gimmemotifs.moap import moap
import joblib
from loguru import logger
import networkx as nx
import numpy as np
import pandas as pd
from pandas import HDFStore
from sklearn.preprocessing import scale, minmax_scale
from scipy.stats import rankdata
import qnorm
import ananse
from ananse.enhancer_binding import CombineBedFiles
from ananse.utils import get_motif_factors, check_input_factors
# This motif file is not created by default
# * f"{self.data_dir}/reference.factor.feather"
class PeakPredictor:
def __init__(
self,
reference=None,
atac_bams=None,
histone_bams=None,
regions=None,
genome="hg38",
pfmfile=None,
factors=None,
pfmscorefile=None,
ncpus=4,
):
self.data_dir = reference
if atac_bams is None and histone_bams is None:
raise ValueError("Need either ATAC-seq or H3K27ac BAM file(s).")
if genome is None:
logger.warning("Assuming genome is hg38")
genome = "hg38"
self.genome = genome
self.set_species(genome)
if pfmfile is None and self.species not in ["human", "mouse"]:
logger.warning(
f"The genome '{genome}' is not recognized as human or mouse."
)
logger.warning(
"If you do have another species, the motif file likely needs to be adapted."
)
logger.warning(
"Currently mouse and human gene names are used to link motif to TFs."
)
logger.warning(
"If your gene symbols are different, then you will need to create a new mapping"
)
logger.warning(
"and use the `-p` argument. For a possible method to do this, see here:"
)
logger.warning(
"https://gimmemotifs.readthedocs.io/en/stable/reference.html#command-gimme-motif2factors"
)
# Set basic information
self.ncpus = ncpus
self._atac_data = None
self._histone_data = None
self.factor_models = {}
self.pfmfile = pfmfile
self._load_motifs(factors=factors)
# if the reference regions are used, we can use existing data such
# as motif scores.
if regions is None:
self.region_type = "reference"
self._load_reference_data()
# If we have custom regions we have to scan for motifs.
else:
self.region_type = "custom"
self.regions = regions
if pfmscorefile is None:
self._scan_motifs(regions)
else:
self._load_prescanned_motifs(pfmscorefile)
# Load ATAC data
if atac_bams is not None:
self.load_atac(atac_bams, update_models=False)
# Load histone ChIP-seq data
if histone_bams is not None:
self.load_histone(histone_bams, update_models=False)
self._set_model_type()
def _scan_motifs(self, regions):
"""[summary]
Parameters
----------
regions : [type]
[description]
"""
logger.info("Scanning regions for motifs.")
with NamedTemporaryFile(mode="w") as f:
print("region", file=f)
for region in regions:
print(region, file=f)
f.flush()
# TODO: we're still scanning for *all* motifs, even if we only have
# a few factors
motif_df = scan_regionfile_to_table(
f.name, self.genome, "score", ncpus=self.ncpus
)
self._motifs = pd.DataFrame(index=motif_df.index)
for factor in self.f2m:
# if factor not in valid_factors:
# continue
self._motifs[factor] = motif_df[self.f2m[factor]].mean(1)
def _load_prescanned_motifs(self, pfmscorefile):
"""
Use pre-scanned gimmemotifs motif scores.
Parameters
----------
pfmscorefile : str/file
pre-scanned gimmemotifs scores file
"""
logger.info("loading pre-scanned motif scores.")
motif_df = pd.read_table(pfmscorefile, comment="#", index_col=0)
self._motifs = pd.DataFrame(index=motif_df.index)
for factor in self.f2m:
# if factor not in valid_factors:
# continue
self._motifs[factor] = motif_df[self.f2m[factor]].mean(1)
def _load_reference_data(self):
"""Load data for reference regions.
Will load three types of data:
* Motif scores.
* The average peak coverage (self._avg)
* The distance from the peak to nearest TSS. (self._dist)
All of these data are only used with the reference set of regions.
"""
# Read motifs
logger.info("loading motifs for reference")
self._motifs = pd.read_feather(f"{self.data_dir}/reference.factor.feather")
self._motifs.set_index(self._motifs.columns[0], inplace=True)
# Read average coverage
logger.info("loading average peak coverage for reference")
self._avg = pd.read_table(
f"{self.data_dir}/reference.coverage.txt",
sep="\t",
comment="#",
index_col=0,
)
self._avg.columns = ["average"]
self._avg["average"] = self._avg["average"] / self._avg["average"].max()
# Read distance to TSS
logger.info("loading distance for reference")
self._dist = pd.read_table(
f"{self.data_dir}/reference.dist_to_tss.txt",
sep="\t",
comment="#",
index_col=0,
)
# Set regions
self.regions = self._avg.index
def _load_human_factors(self):
package_dir = os.path.dirname(ananse.__file__)
tf_xlsx = os.path.join(package_dir, "db", "lovering.tfs.xlsx")
valid_factors = pd.read_excel(
tf_xlsx,
engine="openpyxl",
sheet_name=1,
)
valid_factors = valid_factors.loc[
valid_factors["Pseudogene"].isnull(), "HGNC approved gene symbol"
].values
valid_factors = list(set(valid_factors) - set(["EP300"]))
return valid_factors
def set_species(self, genome):
try:
# Try to get taxonomy id for genomepy managed genome.
# If there is a taxonomy id, we can be really sure about the species.
# If genome doesn't have a tax_id, then it will be 'na' and
# fail to convert to int.
genome = Genome(genome)
tax_id = int(genome.tax_id)
if tax_id == 9606:
self.species = "human"
elif tax_id == 10090:
self.species = "mouse"
else:
# tax_id converts to int so it is valid, must be not human or mouse
self.species = None
return
except Exception:
pass
mapping = {
"hg38": "human",
"hg19": "human",
"GRCh3": "human",
"mm10": "mouse",
"mm9": "mouse",
"GRCm3": "mouse",
}
base_genome = os.path.basename(self.genome.strip("/"))
for name, species in mapping.items():
if name in base_genome:
self.species = species
return
self.species = None
def factors(self):
if self.species == "human":
valid_factors = self._load_human_factors()
return [f for f in self.f2m if f in valid_factors]
if self.species == "mouse":
# Mouse mappings are included in the default motif db.
# Using the fact here that mouse names are not all upper-case.
# TODO: replace with a curated set of factors.
return [f for f in self.f2m if f[1:].islower()]
return list(self.f2m.keys())
def _load_factor2motifs(self, pfmfile=None, indirect=True, factors=None):
motifs = read_motifs(pfmfile, as_dict=True)
f2m = {}
if self.species == "human":
valid_factors = self._load_human_factors()
for name, motif in motifs.items():
for factor in get_motif_factors(motif, indirect=indirect):
if factors is not None and factor not in factors:
continue
# TODO: this is temporary, while the motif database we use
# not very clean...
if self.species == "human":
factor = factor.upper()
if self.species == "human" and factor not in valid_factors:
continue
f2m.setdefault(factor, []).append(name)
return f2m
def _load_motifs(self, indirect=True, factors=None):
"""Load motif-associated data.
For now, only default motifs are supported.
Will read factors associated to motifs, and generates a graph of
related factors based on different factors binding to the same motif.
This information is used to select the most appropriate TF model.
Parameters
----------
indirect : bool, optional
Include TF-motif associations that are not curated, for instance
based on ChIP-seq motif prediction, or binding inference. This will
greatly increase TF coverage. By default True.
"""
if self.pfmfile is None:
logger.info("using default motif file")
else:
logger.debug(f"Motifs: {self.pfmfile}")
self.motifs = read_motifs(self.pfmfile, as_dict=True)
self.f2m = self._load_factor2motifs(
pfmfile=self.pfmfile, indirect=indirect, factors=factors
)
if len(self.f2m) == 1:
logger.info("using motifs for 1 factor")
else:
logger.info(f"using motifs for {len(self.f2m)} factors")
# Create a graph of TFs where edges are determined by the Jaccard index
# of the motifs that they bind to. For instance, when TF 1 binds motif
# A and B and TF 2 binds motif B and C, the edge weight will be 0.33.
tmp_f2m = {}
if self.pfmfile is not None:
logger.debug("reading default file")
tmp_f2m = self._load_factor2motifs(indirect=True)
for k, v in self.f2m.items():
if k in tmp_f2m:
tmp_f2m[k] += v
else:
tmp_f2m[k] = v
self.motif_graph = nx.Graph()
d = []
for f1 in tmp_f2m:
for f2 in tmp_f2m:
jaccard = len(set(tmp_f2m[f1]).intersection(set(tmp_f2m[f2]))) / len(
set(tmp_f2m[f1]).union(set(tmp_f2m[f2]))
)
d.append([f1, f2, jaccard])
if jaccard > 0:
self.motif_graph.add_edge(f1, f2, weight=1 - jaccard)
def _load_bams(self, bams, title, window=200):
tmp = pd.DataFrame(index=self.regions)
with NamedTemporaryFile(mode="w") as f_out:
for region in self.regions:
print("{}\t{}\t{}".format(*re.split("[:-]", region)), file=f_out)
f_out.flush()
for bam in bams:
result = load_heatmap_data(
f_out.name,
bam,
bins=1,
up=window // 2,
down=window // 2,
rmdup=True,
rmrepeats=True,
)
tmp[result[0]] = result[2].T[0]
fname = f"{self.data_dir}/{title}.qnorm.ref.txt.gz"
if os.path.exists(fname):
logger.debug(f"quantile normalization for {title}")
qnorm_ref = pd.read_table(fname, index_col=0)["qnorm_ref"].values
if len(self.regions) != len(qnorm_ref):
qnorm_ref = np.random.choice(
qnorm_ref, size=len(self.regions), replace=True
)
tmp = qnorm.quantile_normalize(tmp, target=qnorm_ref)
else:
tmp = np.log1p(tmp)
# Limit memory usage by using float16
tmp = tmp.mean(1).astype("float16").to_frame(title)
fname = f"{self.data_dir}/{title}.mean.ref.txt.gz"
if self.region_type == "reference" and os.path.exists(fname):
mean_ref = pd.read_table(fname, index_col=0)
if mean_ref.shape[0] == tmp.shape[0]:
mean_ref.index = tmp.index
tmp[f"{title}.relative"] = (
tmp[title] - mean_ref.loc[tmp.index]["mean_ref"].values
)
tmp[f"{title}.relative"] = scale(tmp[f"{title}.relative"])
else:
logger.debug(f"Regions of {fname} are not the same as input regions.")
logger.debug("Skipping calculation of relative values.")
tmp[title] = tmp[title] / tmp[title].max()
return tmp
def load_atac(self, bams, update_models=True):
"""Load ATAC-seq counts from BAM files.
Parameters
----------
bams : list
List of file names.
update_models : bool, optional
Update the model used if data is loaded, by default True.
"""
logger.info("loading ATAC data")
self._atac_data = self._load_bams(bams, title="ATAC", window=200)
if update_models:
self._set_model_type()
def load_histone(self, bams, update_models=True):
"""Load H3K27ac ChIP-seq counts from BAM files.
Parameters
----------
bams : list
List of file names.
update_models : bool, optional
Update the model used if data is loaded, by default True.
"""
logger.info("loading H3K27ac data")
self._histone_data = self._load_bams(bams, title="H3K27ac", window=2000)
if update_models:
self._set_model_type()
def _set_model_type(self):
"""Select the mode to use for binding prediction.
Basically, this will select the columns that are available,
based on the different types of data that are loaded.
Reference regions will have the most information.
"""
cols = ["motif"]
if self._atac_data is not None:
cols += ["ATAC"]
if self.region_type == "reference":
cols += ["ATAC.relative"]
if self._histone_data is not None:
cols += ["H3K27ac"]
if self.region_type == "reference":
cols += ["average", "dist"]
cols = sorted(cols)
self._X_columns = cols
self._model_type = "_".join(cols)
# Load models
logger.info("Loading models")
# print(os.path.join(self.data_dir, self._model_type))
for fname in glob(os.path.join(self.data_dir, self._model_type, "*.pkl")):
factor = fname.split("/")[-1].replace(".pkl", "")
self.factor_models[factor] = joblib.load(fname)
logger.info(f"{len(self.factor_models)} models found")
def predict_proba(self, factor=None, motifs=None):
"""Predict binding probability.
Predict binding probability for either a TF (factor) or a set of
motifs. Prediction will be based on the data that been loaded,
either ATAC-seq or H3K27ac data or both.
Parameters
----------
factor : str, optional
Transcription factor name.
motifs : [type], optional
Motifs. Currently not implemented.
Returns
-------
pandas.DataFrame
DataFrame with binding probabilities
"""
if factor is None and motifs is None:
raise ValueError("Need either a TF name or one or more motifs.")
if motifs is not None:
raise NotImplementedError("Custom motifs not yet implemented!")
if factor not in self.f2m:
raise ValueError(f"Motif not known for {factor}")
model, factor = self._load_model(factor)
X = self._load_data(factor)
proba = model.predict_proba(X)[:, 1]
return pd.DataFrame(proba, index=self.regions)
def _load_data(self, factor):
# if self.region_type == "reference":
# logger.debug("Reading motif data")
tmp = pd.DataFrame(
{factor: self._motifs[factor]}, index=self.regions
) # pd.read_table(os.path.join(self.data_dir, f"{factor}.motif.txt.gz"), index_col=0)
# else:
tmp.columns = ["motif"]
if self._atac_data is not None:
tmp = tmp.join(self._atac_data)
if self._histone_data is not None:
tmp = tmp.join(self._histone_data)
if self.region_type == "reference":
tmp = tmp.join(self._avg)
tmp = tmp.join(self._dist)
tmp = tmp.dropna()
# logger.debug(str(self._X_columns))
return tmp[self._X_columns]
def _load_model(self, factor):
model = None
if factor in self.factor_models:
logger.info(f"Using {factor} model")
model = self.factor_models[factor]
elif factor in self.motif_graph:
paths = {
p: v
for p, v in nx.single_source_dijkstra_path_length(
self.motif_graph, factor
).items()
if p in self.factor_models
}
try:
sub_factor = list(paths.keys())[0]
logger.info(f"Using {factor} motif with {sub_factor} model weights")
model = self.factor_models[sub_factor]
# factor = sub_factor
except Exception:
logger.info(f"No match for {factor} based on motifs")
if model is None:
logger.info(f"No related TF found for {factor}, using general model")
model = self.factor_models["general"]
return model, factor
def predict_factor_activity(self, nregions=20_000):
"""Predict TF activity.
Predicted based on motif activity using ridge regression.
Parameters
----------
"""
# Run ridge regression using motif score to predict (relative) ATAC/H3K27ac signal
try:
nregions = int(nregions)
except ValueError:
logger.warning("nregions is not an integer, using default number of 20_000")
nregions = 20_000
activity = pd.DataFrame()
for df in (self._atac_data, self._histone_data):
if df is None:
continue
for col in df.columns:
with NamedTemporaryFile() as f:
# float16 will give NaN's
signal = df[col].astype("float32")
signal = pd.DataFrame({col: scale(signal)}, index=df.index)
if df.shape[0] < nregions:
signal.to_csv(f.name, sep="\t")
else:
signal.sample(nregions).to_csv(f.name, sep="\t")
try:
activity = activity.join(
moap(
f.name,
genome=self.genome,
method="bayesianridge",
pfmfile=self.pfmfile,
),
how="outer",
)
except Exception as e:
print(e)
# Rank aggregation
for col in activity:
activity[col] = rankdata(activity[col])
activity = activity.mean(1)
activity[:] = minmax_scale(activity)
# Take the maximum activity from the motifs of each factor
factor_activity = []
for factor, motifs in self.f2m.items():
act = activity.loc[motifs].max()
factor_activity.append([factor, act])
factor_activity = pd.DataFrame(factor_activity, columns=["factor", "activity"])
return factor_activity
def _check_input_regions(regionfiles, genome, outdir=".", verbose=True, force=False):
# Load regions from BED or region text file
if regionfiles is None:
# Keep regions to None, use reference regions.
return
infile = regionfiles[0]
if len(regionfiles) > 1:
# merge files, assumed to be all BED
peak_width = 200
cbed = CombineBedFiles(genome=genome, peakfiles=regionfiles, verbose=verbose)
combined_bed = os.path.join(outdir, "regions_combined.bed")
cbed.run(outfile=combined_bed, width=peak_width, force=force)
infile = combined_bed
df = pd.read_table(infile, header=None, sep="\t", comment="#", dtype=str)
assert df.shape[0] > 2, "regions file must have more that 2 regions."
test = str(df.at[1, 0])
if bool(re.match(r"^.*:\d+-\d+$", test)):
# it's a regions list
# or it's a Seq2science counts table
regions = df.iloc[:, 0].tolist()
elif df.shape[1] >= 3:
# it's a BED file
regions = (
# For Ensembl genome names, make sure it's a string
df.iloc[:, 0].astype(str)
+ ":"
+ df.iloc[:, 1].astype(str)
+ "-"
+ df.iloc[:, 2].astype(str)
).tolist()
else:
raise TypeError("Cannot identify regions file(s) type.")
# remove the header, if any.
header = str(regions[0])
if not bool(re.match(r"^.*:\d+-\d+$", header)):
regions = regions[1:]
return regions
def _check_input_files(*args):
files = []
for arg in args:
if arg is None:
continue
if isinstance(arg, list):
files.extend(arg)
else:
files.append(arg)
all_files_found = True
for fname in files:
if not os.path.exists(fname):
logger.exception(f"Could not find {fname}!")
all_files_found = False
if not all_files_found:
exit(1)
def predict_peaks(
outdir,
atac_bams=None,
histone_bams=None,
regionfiles=None,
reference=None,
factors=None,
genome=None,
pfmfile=None,
pfmscorefile=None,
ncpus=4,
):
"""Predict binding in a set of genomic regions.
Binding is predicted based on ATAC-seq and/or H3K27ac ChIP-seq data in
combination with motif scores. The model that is used is flexible, based
on the input data. The most accurate model will be the one that uses the
references regions in combination with both ATAC-seq and H3K27ac ChIP-seq.
The result will will be saved to an outputfile called `binding.tsv` in the
output directory, specified by the `outdir` argument. This file wil contain
three columns: factor, enhancer and binding. The binding columns represents
the binding probability.
To predict binding, `predict_peaks()` needs a set of input regions. For
human, you have two options. You can either use the reference set of
putative enhancer regions, as described in the ANANSE manuscript [1]. This
is specified by the `reference` argument.
Alternatively, you can specify one or more region files with the
`regionfiles` argument. These are files in BED or narrowPeak format, that
describe potential enhancers. For instance, a reference enhancer set, peaks
from your ATAC-seq experiments or any other collection of regions. For
accurate motif analysis, these should be as precise as possible. BroadPeaks
from histone ChIP-seq are not really suitable. NarrowPeaks from ATAC-seq,
DNase-seq or TF ChIP-seq will be fine.
Parameters
----------
outdir : str
Name of output directory.
atac_bams : list, optional
List of BAM files, by default None
histone_bams : list, optional
List of H3K27ac ChIP-seq BAM files, by default None
regionfiles : list, optional
BED file or text file with regions, or a list of BED, narrowPeak or
broadPeak files If None, then the reference regions are used.
reference : str, optional
Directory name to a reference.
factors : list, optional
List of TF names or file with TFs, one per line. If None (default),
then all TFs are used.
genome : str, optional
Genome name. The default is hg38.
pfmfile : str, optional
Motifs in PFM format, with associated motif2factors.txt file.
pfmscorefile : str, optional
Path to file with pre-scanned motif scores.
ncpus : int, optional
Number of threads to use. Default is 4.
"""
if reference is None and regionfiles is None:
logger.error("Need either input regions or location of a reference set!")
logger.error(
"For human, you can download the REMAP reference here: https://doi.org/10.5281/zenodo.4768075 "
"(please see the docs on how to install this)."
)
logger.error(
"Otherwise you need to specify one or more BED or narrowPeak files"
)
logger.error(
"with potential enhancer regions, for instance, all ATAC-seq peaks"
)
logger.error("from your combined experiments.")
sys.exit(1)
if reference is not None and regionfiles is not None:
logger.error("Need either a reference location *or* or a set of input regions")
sys.exit(1)
# Check if all specified BAM files exist
_check_input_files(atac_bams, histone_bams)
# Read the factors, from a file if needed
factors = check_input_factors(factors)
# Check genome, will fail if it is not a correct genome name or file
Genome(genome)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
# If regions are specified, read them in, combining multiple files if
# necessary.
regions = _check_input_regions(regionfiles, genome, outdir=outdir)
if reference is None:
install_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
reference = os.path.join(install_dir, "db", "default_reference")
if reference is not None:
if not os.path.exists(reference):
logger.error(f"Reference directory {reference} does not exist!")
sys.exit(1)
p = PeakPredictor(
reference=reference,
atac_bams=atac_bams,
histone_bams=histone_bams,
regions=regions,
genome=genome,
pfmfile=pfmfile,
factors=factors,
pfmscorefile=pfmscorefile,
ncpus=ncpus,
)
outfile = os.path.join(outdir, "binding.h5")
# Make sure we create a new file
with open(outfile, "w"):
pass
with HDFStore(outfile, complib="lzo", complevel=9) as hdf:
if p._atac_data is not None:
hdf.put(key="_atac", value=p._atac_data, format="table")
if p._histone_data is not None:
hdf.put(key="_h3k27ac", value=p._histone_data, format="table")
logger.info("Predicting TF activity")
factor_activity = p.predict_factor_activity()
hdf.put(key="_factor_activity", value=factor_activity, format="table")
for factor in p.factors():
try:
proba = p.predict_proba(factor)
hdf.put(
key=f"{factor}",
value=proba.iloc[:, -1].reset_index(drop=True).astype(np.float16),
format="table",
)
except ValueError as e:
logger.debug(str(e))
hdf.put(key="_index", value=proba.index.to_series(), format="table")
|
from ._version import get_versions
import os
import sys
from loguru import logger
# Remove default logger
logger.remove()
# Add logger
logger.add(sys.stderr, format="{time} | {level} | {message}", level="INFO")
# This is here to prevent very high memory usage on numpy import.
# On a machine with many cores, just importing numpy can result in up to
# 8GB of (virtual) memory. This wreaks havoc on management of the dask
# workers.
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
__version__ = get_versions()["version"]
del get_versions
|
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Predict TF influence score"""
# Python imports
from __future__ import print_function
import sys
import warnings
from collections import namedtuple
from loguru import logger
from tqdm import tqdm
import numpy as np
import pandas as pd
import networkx as nx
import multiprocessing as mp
from sklearn.preprocessing import minmax_scale
from scipy.stats import rankdata, mannwhitneyu
from adjustText import adjust_text
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings("ignore")
# Here because of multiprocessing and pickling
Expression = namedtuple("Expression", ["score", "absfc", "realfc"])
def read_network(fname, edges=100000):
"""Read network file and return networkx DiGraph."""
G = nx.DiGraph()
rnet = pd.read_csv(fname, sep="\t")
nrnet = rnet.sort_values("prob", ascending=False)
if len(nrnet) < edges:
usenet = nrnet
else:
usenet = nrnet[:edges]
for vals in usenet.iterrows():
source, target = vals[1][0].split("_", 1)
try:
if len(vals[1]) > 1:
# weight = 1 - float(vals[1])
weight = float(vals[1][1])
# if weight < 0 or weight > 1:
# sys.stderr.write("expect weight between 0 and 1")
# sys.exit(1)
else:
weight = 0
G.add_edge(source, target, weight=weight, n=1)
except Exception:
sys.stderr.write("could not parse edge weight\n")
raise
return G
def difference(S, R):
"""Calculate the network different between two cell types."""
DIF = nx.create_empty_copy(R)
for (u, v, d) in S.edges(data=True):
if (u, v) not in R.edges:
DIF.add_edge(u, v, weight=d["weight"], n=1)
else:
diff_weight = S.edges[u, v]["weight"] - R.edges[u, v]["weight"]
if diff_weight > 0:
DIF.add_edge(
u, v, weight=diff_weight, n=1, neglogweight=-np.log(diff_weight)
)
return DIF
def read_expression(fname):
"""Read differential gene expression analysis output, return dictionary with namedtuples of scores, absolute fold
change and "real" (directional) fold change.
input:
a tab-separated file containing 3 columns (HGNC gene symbols, (adjusted) p-values and log2foldchange)
header is omitted if starting with "resid"
"""
expression_change = dict()
df = pd.read_table(
fname,
index_col=0,
header=0,
dtype={"resid": str, "log2FoldChange": float, "padj": float},
)
# absolute fold change
df["fc"] = df["log2FoldChange"].abs()
# get the gscore (absolute fold change if significanlty differential)
df["score"] = df["fc"] * (df["padj"] < 0.05)
for k, row in df.iterrows():
expression_change[row.name] = Expression(
score=row.score, absfc=row.fc, realfc=row.log2FoldChange
)
return expression_change
def targetScore(node, G, expression_change, max_degree=3):
"""Calculate the influence score."""
# debug only.
# todo
# if expression_change is None:
# expression_change = {"score": {}, "fc": {}}
total_score = 0
# Get the targets that are within a certain number of steps from TF
lengths, paths = nx.single_source_dijkstra(G, node, cutoff=max_degree - 1)
targets = [t for t in lengths if 0 < lengths[t] <= max_degree]
for target in paths:
all_paths = {}
# Calculate all paths from TF to target to select to path with the lowest total weight
for path in nx.all_simple_paths(G, node, target, cutoff=max_degree - 1):
if len(path) <= max_degree:
weight = np.cumprod(
[G[s][t]["weight"] for s, t in zip(path, path[1:])]
)[-1]
# Add weight, corrected for the length of the path
all_paths[tuple(path)] = weight / (len(path) - 1)
if len(all_paths) > 0:
path, weight = sorted(all_paths.items(), key=lambda p: p[1])[-1]
# print(target, path, weight)
# outdegree of parent node of the target
# d = np.log(G.out_degree(path[-2]) + 1)
# d = G.out_degree(path[-2])
# the level (or the number of steps) that gene is away from transcription factor
pathlen = len(path)
# expression score of the target
g = expression_change[target].score if target in expression_change else 0
# weight is cumulative product of probabilities
# weight = [G[s][t]["weight"] for s, t in zip(path[:-1], path[1:])]
# cumulative sum of weight
# weight = np.cumprod(weight)[-1]
# score = g / len(path) / d * weight
score = g / pathlen * weight
total_score += score
# Get Mann-Whitney U p-value of direct targets vs. non-direct targets
direct_targets = [n for n in G[node] if n in expression_change]
non_direct_targets = [
n for n in list(G.nodes) if n in expression_change and n not in direct_targets
]
target_fc = [expression_change[t].absfc for t in direct_targets]
non_target_fc = [expression_change[t].absfc for t in non_direct_targets]
pval = mannwhitneyu(target_fc, non_target_fc)[1]
target_fc_diff = np.mean(target_fc) - np.mean(non_target_fc)
# factor, targetScore, directTargets, totalTargets, Gscore, pval, target_fc
return (
node,
total_score,
G.out_degree(node),
len(targets),
expression_change[node].absfc if node in expression_change else 0,
pval,
target_fc_diff,
)
def filter_TF(scores_df, network=None, tpmfile=None, tpm=20, overlap=0.98):
"""Filter TFs:
1) it have high expression in origin cell type;
2) 98% of its target genes are also regulated by previous TFs.
"""
tpmscore = {}
with open(tpmfile) as tpf:
next(tpf)
for line in tpf:
tpmscore[line.split()[0]] = float(line.split()[1])
tftarget = {}
for tf in scores_df.index:
tftarget[tf] = set(network[tf]) if tf in network else set()
ltf = list(scores_df.index)
keeptf = []
for i in ltf:
passtf = []
if len(tftarget[i]) > 0:
for j in ltf[: ltf.index(i)]:
if len(tftarget[i] & tftarget[j]) / len(tftarget[i]) > overlap:
break
else:
passtf.append(j)
if passtf == ltf[: ltf.index(i)] and i in tpmscore and tpmscore[i] < tpm:
keeptf.append(i)
scores_df = scores_df.loc[keeptf]
scores_df.sort_values("sumScaled", inplace=True, ascending=False)
return scores_df
def plot_influscore(infile, outfile):
"""Plot TF influence score to expression."""
mogrify = pd.read_table(infile, index_col="factor")
mogrify = mogrify.dropna()
factors = list(mogrify.sort_values("sumScaled").tail(20).index)
# factors = list(mogrify.sort_values("sumScaled").tail(20).index)
xcol = "factor_fc"
plt.figure(figsize=(8, 6))
sns.regplot(
data=mogrify,
x=xcol,
y="sumScaled",
fit_reg=False,
scatter_kws={"s": mogrify["directTargets"] / 10, "alpha": 0.5},
)
x = mogrify.loc[factors, xcol]
y = mogrify.loc[factors, "sumScaled"]
texts = []
for s, xt, yt in zip(factors, x, y):
texts.append(plt.text(xt, yt, s))
adjust_text(texts, arrowprops=dict(arrowstyle="-", color="black"))
plt.xlabel("Log2 fold change of TF")
plt.ylabel("Influence score")
plt.savefig(outfile, dpi=300)
class Influence(object):
def __init__(
self, outfile, degenes, Gbf=None, Gaf=None, filter=False, edges=100000, ncore=1
):
self.ncore = ncore
logger.info(f"Reading network(s), using top {edges} edges.")
# Load GRNs
if Gbf is None and Gaf is not None:
self.G = read_network(Gaf, edges=edges)
logger.warning("You only provide the target network!")
elif Gaf is None and Gbf is not None:
self.G = read_network(Gbf, edges=edges)
logger.warning("You only provided the source network!")
elif Gaf is None and Gbf is None:
logger.warning("You should provide at least one ANANSE network file!")
else:
G1 = read_network(Gbf, edges=edges)
G2 = read_network(Gaf, edges=edges)
self.G = difference(G2, G1)
logger.info(f"Differential network has {len(self.G.edges)} edges.")
# Load expression file
self.expression_change = read_expression(degenes)
self.outfile = outfile
# Filter TFs
self.filter = filter
def save_reg_network(self, filename):
"""Save the network difference between two cell types to a file."""
with open(filename, "w") as nw:
for (u, v, d) in self.G.edges(data=True):
nw.write(u + "\t" + v + "\t" + str(d["weight"]) + "\n")
def run_target_score(self, max_degree=3):
"""Run target score for all TFs."""
pool = mp.Pool(self.ncore)
jobs = []
tfs = [node for node in self.G.nodes() if self.G.out_degree(node) > 0]
logger.info(f"Differential network contains {len(tfs)} transcription factors.")
# differentially expressed TFs
detfs = [tf for tf in tfs if tf in self.expression_change]
if len(detfs) == 0:
sys.stderr.write(
"no overlapping transcription factors found between the network file(s) "
"(-s/--source, -t/--target) and the differential expression data (-d/--degenes)\n"
)
sys.exit(1)
detfs = [tf for tf in detfs if self.expression_change[tf].realfc > 0]
if len(detfs) == 0:
sys.stderr.write(
"no differentially expressed TFs found with a log2 fold change above 0\n"
)
sys.exit(1)
for tf in detfs:
jobs.append(
pool.apply_async(
targetScore, (tf, self.G, self.expression_change, max_degree)
)
)
# Get results and write to file
influence_file = open(self.outfile, "w")
influence_file.write(
"factor\tdirectTargets\ttotalTargets\ttargetsore\tGscore\tfactor_fc\tpval\ttarget_fc\n"
)
with tqdm(total=len(jobs)) as pbar:
for j in jobs:
(
factor,
score,
direct_targets,
total_targets,
factor_fc,
pval,
target_fc,
) = j.get()
print(
factor,
direct_targets,
total_targets,
score,
self.expression_change[factor].score,
factor_fc,
pval,
target_fc,
file=influence_file,
sep="\t",
)
pbar.update(1)
print("\n", file=influence_file)
pool.close()
influence_file.close()
scores_df = pd.read_table(self.outfile, index_col=0)
scores_df["targetScaled"] = minmax_scale(
rankdata(scores_df["targetsore"], method="dense")
)
scores_df.sort_values("targetScaled", inplace=True, ascending=False)
return self.outfile
def run_influence_score(self, influence_file, fin_expression=None):
"""Calculate influence score from target score and gscore"""
scores_df = pd.read_table(influence_file, index_col=0)
scores_df["targetScaled"] = minmax_scale(
rankdata(scores_df["targetsore"], method="dense")
)
scores_df["GscoreScaled"] = minmax_scale(
rankdata(scores_df["Gscore"], method="dense")
)
scores_df["sumScaled"] = minmax_scale(
rankdata(scores_df.targetScaled + scores_df.GscoreScaled, method="dense")
)
scores_df.sort_values("sumScaled", inplace=True, ascending=False)
scores_df = scores_df[
[
"targetScaled",
"GscoreScaled",
"sumScaled",
"directTargets",
"targetsore",
"factor_fc",
]
]
scores_df.to_csv(self.outfile, sep="\t")
if self.filter:
scores_df2 = filter_TF(
network=self.G, scores_df=scores_df, tpmfile=fin_expression
)
scores_df2.to_csv(
".".join(self.outfile.split(".")[:-1]) + "_filtered.txt", sep="\t"
)
def run_influence(self, plot=True, fin_expression=None):
logger.info("Save differential network")
self.save_reg_network(
".".join(self.outfile.split(".")[:-1]) + "_diffnetwork.txt"
)
logger.info("Run target score")
influence_file = self.run_target_score()
logger.info("Run influence score")
self.run_influence_score(influence_file, fin_expression=fin_expression)
if plot is True:
logger.info("Plot results")
plot_influscore(
self.outfile, ".".join(self.outfile.split(".")[:-1]) + ".pdf"
)
|
import os.path
import numpy as np
import pandas as pd
from scipy import stats
from ananse.utils import cleanpath
class Distributions:
def __init__(self):
# dist_functions = [f for f in dir(ananse.distributions) if f.endswith("_dist")]
dist_functions = [
scale_dist,
log_scale_dist,
scipy_dist,
peak_rank_dist,
peak_rank_file_dist,
]
self.functions = {func.__name__: func for func in dist_functions}
def get(self):
"""list distribution methods"""
return list(self.functions.keys())
def set(self, dist_func):
"""return a distribution method by name"""
dist_functions = self.get()
if dist_func not in dist_functions:
raise ValueError(
f"Distribution function '{dist_func}' not recognised. Options: {', '.join(dist_functions)}"
)
return self.functions[dist_func]
def scale_dist(scores, **kwargs): # noqa
"""
Scale the scores between 0 and 1
"""
return (scores - np.min(scores)) / (np.max(scores) - np.min(scores))
def log_scale_dist(scores, **kwargs): # noqa
"""
Scale the log of the scores between 0 and 1
"""
scores = np.log(scores + 1)
return (scores - np.min(scores)) / (np.max(scores) - np.min(scores))
def replace_infs(dist):
"""
Replace positive and negative infinity with the closes real value in the array
"""
# https://stackoverflow.com/questions/12937824/lognormal-random-numbers-centered-around-a-high-value
if not isinstance(dist, np.ndarray):
dist = np.array(dist)
min_real_val = np.nanmin(dist[dist != -np.inf])
dist[dist == -np.inf] = min_real_val
max_real_val = np.nanmax(dist[dist != np.inf])
dist[dist == np.inf] = max_real_val
return dist
def scipy_dist(scores, **kwargs):
"""
fit scores to a scipy.stats distribution.
specified distribution name via kwargs['dist']
"""
if not isinstance(scores, np.ndarray):
scores = np.array(scores)
scores = scores + 1 # add pseudocount
x = range(len(scores))
dist_name = kwargs.get("dist", "lognorm")
if dist_name not in dir(stats):
raise ValueError(f"'{dist_name}' is not a recognized scipy.stats model.")
distribution = getattr(stats, dist_name) # eval(f"stats.{dist_name}")
# fit dist to data
params = distribution.fit(scores)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF
dist = distribution.pdf(x, loc=loc, scale=scale, *arg)
dist = replace_infs(dist)
return dist
# def lognorm_dist(scores, **kwargs):
# """
# fit scores to a log normal distribution
# """
# scores = scores + 1 # add pseudocount
# x = range(len(scores))
#
# # mu = np.log(scores).mean()
# # sigma = np.log(scores).std()
# # dist = stats.lognorm([sigma], loc=mu).pdf(x)
#
# s, loc, scale = stats.lognorm.fit(scores) # floc=0
# dist = stats.lognorm.pdf(x=x, s=s, loc=loc, scale=scale)
# return dist
def peak_rank_dist(scores, **kwargs): # noqa
"""
Fit scores to a distribution similar to what the p300 model was trained on
"""
# use a lognormal distribution:
# https://github.com/jsh58/Genrich#p-value-calculation
# # peak_rank_file = "ananse/db/peak_rank.txt"
# # scores = pd.read_csv(peak_rank_file, header=None)[0]
# # mu = np.log(scores+1).mean()
# # sigma = np.log(scores+1).std()
# mu = 1.0500836750482117
# sigma = 0.8000981267240566
#
# x = len(scores)
# rng = np.random.default_rng(seed=None)
# dist = rng.lognormal(mean=mu, sigma=sigma, size=x)
#
# print("proximity to the initial distribtion")
# print("delta mu:", np.abs(mu - np.log(dist).mean()))
# print("delta std:", np.abs(sigma - np.log(dist).std()))
# best fitting distribution turns out to be this loglaplace
x = range(len(scores))
c = 0.92
loc = 1.00
scale = 1.14
dist = stats.loglaplace.pdf(x=x, c=c, loc=loc, scale=scale)
dist = replace_infs(dist)
return dist
def peak_rank_file_dist(scores, **kwargs):
"""
fit scores to the distribution in kwargs['file'].
builtin files: "peak_rank.txt" and "peak_rank_hg38_h3k27ac.txt"
"""
if not isinstance(scores, np.ndarray):
scores = np.array(scores)
dist_filename = kwargs.get("file", "peak_rank.txt")
# internal data or user data
if dist_filename in ["peak_rank.txt", "peak_rank_hg38_h3k27ac.txt"]:
package_dir = os.path.dirname(__file__)
dist_filepath = os.path.join(package_dir, "db", dist_filename)
else:
dist_filepath = cleanpath(dist_filename)
if not os.path.exists(dist_filepath):
raise FileNotFoundError(f"Could not find file {dist_filepath}")
dist = pd.read_csv(dist_filepath, header=None)
n = scores.shape[0]
max_n = dist.shape[0]
if max_n < n:
raise ValueError(
f"Too many regions ({n}) to fit to '{dist_filename}' ({max_n})"
)
dist = dist.sample(n=n, random_state=1)[0].tolist()
return dist
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.