python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from typing import Callable
import dotenv
import hydra
from omegaconf import OmegaConf, DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
OmegaConf.register_new_resolver('eval', eval)
OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y)
def dictconfig_filter_key(d: DictConfig, fn: Callable) -> DictConfig:
"""Only keep keys where fn(key) is True. Support nested DictConfig.
"""
return DictConfig({k: dictconfig_filter_key(v, fn) if isinstance(v, DictConfig) else v
for k, v in d.items() if fn(k)})
@hydra.main(config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
# Remove config keys that start with '__'. These are meant to be used only in computing
# other entries in the config.
config = dictconfig_filter_key(config, lambda k: not k.startswith('__'))
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from src.train import train
from src.eval import evaluate
from src.utils import utils
# A couple of optional utilities:
# - disabling python warnings
# - forcing debug-friendly configuration
# - verifying experiment name is set when running in experiment mode
# You can safely get rid of this line if you don't want those
utils.extras(config)
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
# Train model
mode = config.get('mode', 'train')
if mode not in ['train', 'eval']:
raise NotImplementedError(f'mode {mode} not supported')
if mode == 'train':
return train(config)
elif mode == 'eval':
return evaluate(config)
if __name__ == "__main__":
main()
| fly-master | run.py |
import math
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
max_bound = 1.0
nsteps = 1000
dots = torch.linspace(-max_bound, max_bound, nsteps)
d = 16
m = int(d * math.log(d)) # 44
seqlen = 1024
n_hashes = 4 # This is L in the LSH notation
nbuckets = 23 # This is k in the LSH notation
# We set these so that bucket_size = 1024 / (2 * 23) = 22, so Reformer has 22 * 4 = 88 parameters
# Which is the same as Performer (2 * 44)
sm = torch.exp(dots)
performer_mse = 1 / m * torch.exp(2 * max_bound**2 + 2 * dots) * sm**2 * (1 - torch.exp(-2 * max_bound**2 - 2 * dots))
performer_mse_half = 1 / (m / 2) * torch.exp(2 * max_bound**2 + 2 * dots) * sm**2 * (1 - torch.exp(-2 * max_bound**2 - 2 * dots))
performer_hyp_mse = 1 / 2 * (1 - math.exp(-max_bound**2)) * performer_mse_half
tau = torch.sqrt(2 * max_bound**2 - 2 * dots)
lsh_prob = torch.exp(-tau**2 / (4 - tau**2) * math.log(d))
lsh_prob_power = 1 - (1 - lsh_prob**nbuckets)**n_hashes
reformer_mse = sm**2 * (1 - lsh_prob_power)
lsh_prob_power_scatterbrain = 1 - (1 - lsh_prob**(nbuckets))**(n_hashes)
scatterbrain_mse = (1 - lsh_prob_power_scatterbrain) * performer_mse
scatterbrain_hyp_mse = (1 - lsh_prob_power_scatterbrain) * performer_hyp_mse
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use(['seaborn-colorblind'])
# import seaborn as sns
# sns.set_theme()
# sns.set_style('whitegrid')
plt.figure(figsize=(6, 4))
plt.plot(dots, reformer_mse, alpha=0.7, color='blue', label='Reformer')
plt.plot(dots, performer_mse, alpha=0.7, color='green', label='Performer')
plt.plot(dots, scatterbrain_mse, alpha=0.7, color='red', label='Scatterbrain')
plt.plot(dots, performer_hyp_mse, alpha=0.7, color='black', label='Performer hyp')
plt.plot(dots, scatterbrain_hyp_mse, alpha=0.7, color='brown', label='Scatterbrain hyp')
plt.xlabel(r'$q^\top k$', fontsize=14)
plt.ylabel('MSE', fontsize=14)
plt.xticks([-1.0, -0.5, 0.0, 0.5, 1.0])
plt.legend(fontsize=14)
plt.savefig('theory_mse_new.pdf', bbox_inches='tight')
plt.close()
| fly-master | analysis/mse_plot.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.aan import AAN
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestAAN:
@pytest.mark.parametrize('append_eos', [False, True])
@pytest.mark.parametrize('append_bos', [False, True])
def test_dims(self, append_bos, append_eos):
batch_size = 57
max_length = 4000
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'aan' / 'tsv_data'
cache_dir = data_dir.parent / 'cache'
datamodule = AAN(data_dir, cache_dir, max_length=max_length, append_bos=append_bos,
append_eos=append_eos, batch_size=batch_size, shuffle=True,
num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 147086
val_len = 18090
test_len = 17437
assert len(train_loader) == div_up(train_len, batch_size)
assert len(val_loader) == div_up(val_len, batch_size)
assert len(test_loader) == div_up(test_len, batch_size)
assert datamodule.vocab_size <= 258 # Might need 2 extra for bos and eos
for loader in [train_loader, val_loader, test_loader]:
x1, x2, y, lengths1, lengths2 = next(iter(loader))
assert x1.dim() == 2
assert x1.shape[0] == batch_size and x1.shape[1] <= max_length
assert x1.dtype == torch.long
assert x2.dim() == 2
assert x2.shape[0] == batch_size and x2.shape[1] <= max_length
assert x2.dtype == torch.long
assert y.shape == (batch_size,)
assert y.dtype == torch.long
assert lengths1.shape == (batch_size,)
assert lengths1.dtype == torch.long
assert torch.all(lengths1 <= max_length) and torch.all(lengths1 <= x1.shape[1])
assert lengths2.shape == (batch_size,)
assert lengths2.dtype == torch.long
assert torch.all(lengths2 <= max_length) and torch.all(lengths2 <= x2.shape[1])
if append_bos:
assert torch.all(x1[:, 0] == datamodule.vocab['<bos>'])
assert torch.all(x2[:, 0] == datamodule.vocab['<bos>'])
if append_eos:
assert torch.all(x1[torch.arange(batch_size), lengths1 - 1]
== datamodule.vocab['<eos>'])
assert torch.all(x2[torch.arange(batch_size), lengths2 - 1]
== datamodule.vocab['<eos>'])
if not append_bos and not append_eos:
for loader in [train_loader, val_loader, test_loader]:
l1, l2 = zip(*[(lengths1, lengths2) for _, _, _, lengths1, lengths2 in loader])
l1, l2 = torch.cat(l1), torch.cat(l2)
print(f"""Sequence1 length distribution: min {l1.min().item()}, max {l1.max().item()},
mean {l1.float().mean().item()}, stddev {l1.float().std().item()}""")
print(f"""Sequence2 length distribution: min {l2.min().item()}, max {l2.max().item()},
mean {l2.float().mean().item()}, stddev {l2.float().std().item()}""")
| fly-master | tests/datamodules/test_aan.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.cifar import CIFAR10, CIFAR100
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestCIFAR:
@pytest.mark.parametrize('normalize', [False, True])
@pytest.mark.parametrize('val_split', [0.2, 0.0])
@pytest.mark.parametrize('to_int', [False, True])
@pytest.mark.parametrize('data_augmentation', [None, 'standard', 'autoaugment'])
@pytest.mark.parametrize('grayscale', [False, True])
@pytest.mark.parametrize('sequential', [False, True])
@pytest.mark.parametrize('cls', [CIFAR10, CIFAR100])
def test_dims(self, cls, sequential, grayscale, data_augmentation, to_int, val_split,
normalize):
if to_int and normalize: # Not compatible
return
batch_size = 57
seed = 2357
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'cifar'
datamodule = cls(data_dir, sequential=sequential, grayscale=grayscale,
data_augmentation=data_augmentation, to_int=to_int, val_split=val_split,
normalize=normalize, batch_size=batch_size, seed=seed, shuffle=True)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = int(50000 * (1 - val_split))
val_len = int(50000 * val_split)
test_len = 10000
assert len(train_loader) == div_up(train_len, batch_size)
assert len(val_loader) == div_up(val_len, batch_size)
assert len(test_loader) == div_up(test_len, batch_size)
for loader in [train_loader] + ([] if val_split == 0.0 else [val_loader]) + [test_loader]:
x, y = next(iter(loader))
assert x.shape == (batch_size,) + datamodule.dims
assert x.dtype == torch.float if not to_int else torch.long
assert y.shape == (batch_size,)
assert y.dtype == torch.long
# Check that it's actually normalized
if normalize and data_augmentation is None and val_split == 0.0:
xs_ys = [(x, y) for x, y in train_loader]
xs, ys = zip(*xs_ys)
xs, ys = torch.cat(xs), torch.cat(ys)
dims_to_reduce = (0, 2, 3) if not sequential else (0, 1)
x_mean, x_std = xs.mean(dim=dims_to_reduce), xs.std(dim=dims_to_reduce)
assert torch.allclose(x_mean, torch.zeros_like(x_mean), atol=1e-3)
assert torch.allclose(x_std, torch.ones_like(x_mean), atol=1e-3)
| fly-master | tests/datamodules/test_cifar.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.listops import ListOps
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestListOps:
@pytest.mark.parametrize('append_eos', [False, True])
@pytest.mark.parametrize('append_bos', [False, True])
def test_dims(self, append_bos, append_eos):
batch_size = 57
max_length = 2000
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'listops' / 'listops-1000'
cache_dir = data_dir.parent / 'cache'
datamodule = ListOps(data_dir, cache_dir, max_length=max_length, append_bos=append_bos,
append_eos=append_eos, batch_size=batch_size, shuffle=True,
num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 96000
val_len = 2000
test_len = 2000
assert len(train_loader) == div_up(train_len, batch_size)
assert len(val_loader) == div_up(val_len, batch_size)
assert len(test_loader) == div_up(test_len, batch_size)
assert datamodule.vocab_size <= 258 # Might need 2 extra for bos and eos
for loader in [train_loader, val_loader, test_loader]:
x, y, lengths = next(iter(loader))
assert x.dim() == 2
assert x.shape[0] == batch_size and x.shape[1] <= max_length
assert x.dtype == torch.long
assert y.shape == (batch_size,)
assert y.dtype == torch.long
assert lengths.shape == (batch_size,)
assert lengths.dtype == torch.long
assert torch.all(lengths <= max_length) and torch.all(lengths <= x.shape[1])
if append_bos:
assert torch.all(x[:, 0] == datamodule.vocab['<bos>'])
if append_eos:
assert torch.all(x[torch.arange(batch_size), lengths - 1]
== datamodule.vocab['<eos>'])
if not append_bos and not append_eos:
for loader in [train_loader, val_loader, test_loader]:
l = torch.cat([lengths for _, _, lengths in loader])
print(f"""Sequence length distribution: min {l.min().item()}, max {l.max().item()},
mean {l.float().mean().item()}, stddev {l.float().std().item()}""")
| fly-master | tests/datamodules/test_listops.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.imdb import IMDB
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestIMDB:
@pytest.mark.parametrize('val_split', [0.2, 0.0])
@pytest.mark.parametrize('append_eos', [False, True])
@pytest.mark.parametrize('append_bos', [False, True])
@pytest.mark.parametrize('tokenizer_type', ['word', 'char'])
def test_dims(self, tokenizer_type, append_bos, append_eos, val_split):
batch_size = 57
max_length = 1000
seed = 2357
vocab_min_freq = 5 if tokenizer_type == 'word' else 1
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'imdb' / 'cache'
datamodule = IMDB(data_dir, cache_dir, max_length=max_length, tokenizer_type=tokenizer_type,
vocab_min_freq=vocab_min_freq, append_bos=append_bos,
append_eos=append_eos, val_split=val_split, batch_size=batch_size,
num_workers=4, seed=seed, shuffle=True)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = int(25000 * (1 - val_split))
val_len = int(25000 * val_split) if val_split != 0.0 else 25000
test_len = 25000
assert len(train_loader) == div_up(train_len, batch_size)
assert len(val_loader) == div_up(val_len, batch_size)
assert len(test_loader) == div_up(test_len, batch_size)
if tokenizer_type == 'char':
assert datamodule.vocab_size <= 258 # Might need 2 extra for bos and eos
for loader in [train_loader, val_loader, test_loader]:
x, y, lengths = next(iter(loader))
assert x.dim() == 2
assert x.shape[0] == batch_size and x.shape[1] <= max_length
assert x.dtype == torch.long
assert y.shape == (batch_size,)
assert y.dtype == torch.long
assert lengths.shape == (batch_size,)
assert lengths.dtype == torch.long
assert torch.all(lengths <= max_length) and torch.all(lengths <= x.shape[1])
if append_bos:
assert torch.all(x[:, 0] == datamodule.vocab['<bos>'])
if append_eos:
assert torch.all(x[torch.arange(batch_size), lengths - 1]
== datamodule.vocab['<eos>'])
if val_split == 0.0 and not append_bos and not append_eos:
l = torch.cat([lengths for _, _, lengths in train_loader])
print(f"""Sequence length distribution: min {l.min().item()}, max {l.max().item()},
mean {l.float().mean().item()}, stddev {l.float().std().item()}""")
| fly-master | tests/datamodules/test_imdb.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.pathfinder import PathFinder
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestPathFinder:
@pytest.mark.parametrize('test_split', [0.1, 0.0])
@pytest.mark.parametrize('val_split', [0.1, 0.0])
@pytest.mark.parametrize('to_int', [False, True])
@pytest.mark.parametrize('sequential', [False, True])
@pytest.mark.parametrize('level', ['easy', 'intermediate', 'hard'])
@pytest.mark.parametrize('resolution', [32, 64, 128, 256])
@pytest.mark.parametrize('use_tar_dataset', [False, True])
def test_dims(self, use_tar_dataset, resolution, level, sequential, to_int, val_split,
test_split):
batch_size = 57
seed = 2357
data_dir = (Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
/ 'pathfinder')
if use_tar_dataset:
data_dir = data_dir / f'pathfinder{resolution}.tar'
datamodule = PathFinder(data_dir, resolution, level, sequential=sequential, to_int=to_int,
val_split=val_split, test_split=test_split, batch_size=batch_size,
seed=seed, shuffle=True, num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
# There's an empty file in the pathfinder32 easy dataset
dataset_len = 199999 if resolution == 32 and level == 'easy' else 200000
assert (len(datamodule.dataset_train) + len(datamodule.dataset_val)
+ len(datamodule.dataset_test)) == dataset_len
val_len = int(dataset_len * val_split)
test_len = int(dataset_len * test_split)
train_len = dataset_len - val_len - test_len
assert len(train_loader) == div_up(train_len, batch_size)
assert len(val_loader) == div_up(val_len, batch_size)
assert len(test_loader) == div_up(test_len, batch_size)
for loader in [train_loader] + (([] if val_split == 0.0 else [val_loader])
+ ([] if test_split == 0.0 else [test_loader])):
x, y = next(iter(loader))
assert x.shape == (batch_size,) + datamodule.dims
assert x.dtype == torch.float if not to_int else torch.long
assert y.shape == (batch_size,)
assert y.dtype == torch.long
if val_split == 0.0 and test_split == 0.0 and sequential and not to_int:
xs_ys = [(x, y) for x, y in train_loader]
xs, ys = zip(*xs_ys)
xs, ys = torch.cat(xs), torch.cat(ys)
xs = xs * 255
x_mean, x_std = xs.float().mean(), xs.float().std()
print(f"Pixel distribution: mean {x_mean.item()}, stddev {x_std.item()}")
| fly-master | tests/datamodules/test_pathfinder.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
from munch import Munch
import torch
from src.datamodules.language_modeling import WikiText2, WikiText103
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestWikiText2:
@pytest.mark.parametrize('vocab_type', ['word', 'bpe'])
def test_dims(self, vocab_type):
batch_size = 32
max_length = 192
seed = 2357
num_shards = 8
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'wikitext-2'
datamodule = WikiText2(data_dir, vocab_type=vocab_type,
batch_size=batch_size, max_length=max_length,
roll_seed=seed, batch_first=True)
# Fake a trainer
datamodule.trainer = Munch(global_rank=2, world_size=num_shards)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
if vocab_type == 'word':
train_len = 2088628
val_len = 217646
test_len = 245569
# Subtract 1 because the target is 1 off from the dataset
assert len(train_loader) == div_up(train_len - 1, batch_size * num_shards * max_length)
assert len(val_loader) == div_up(val_len - 1, batch_size * num_shards * max_length)
assert len(test_loader) == div_up(test_len - 1, batch_size * num_shards * max_length)
for loader in [train_loader, val_loader, test_loader]:
x, y, length, _ = next(iter(loader))
assert x.dim() == 2
assert x.shape[0] == batch_size and x.shape[1] <= max_length
assert x.dtype == torch.long
assert y.dim() == 2
assert y.shape[0] == batch_size and y.shape[1] <= max_length
assert y.dtype == torch.long
assert isinstance(length, int)
assert length <= max_length and length <= x.shape[1]
class TestWikiText103:
@pytest.mark.parametrize('vocab_type', ['word', 'bpe'])
def test_dims(self, vocab_type):
batch_size = 32
max_length = 192
seed = 2357
num_shards = 8
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'wikitext-103'
datamodule = WikiText103(data_dir, vocab_type=vocab_type,
batch_size=batch_size, max_length=max_length,
roll_seed=seed, batch_first=True)
# Fake a trainer
datamodule.trainer = Munch(global_rank=2, world_size=num_shards)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
if vocab_type == 'word':
train_len = 103227021
val_len = 217646
test_len = 245569
# Subtract 1 because the target is 1 off from the dataset
assert len(train_loader) == div_up(train_len - 1, batch_size * num_shards * max_length)
assert len(val_loader) == div_up(val_len - 1, batch_size * num_shards * max_length)
assert len(test_loader) == div_up(test_len - 1, batch_size * num_shards * max_length)
for loader in [train_loader, val_loader, test_loader]:
x, y, length, _ = next(iter(loader))
assert x.dim() == 2
assert x.shape[0] == batch_size and x.shape[1] <= max_length
assert x.dtype == torch.long
assert y.dim() == 2
assert y.shape[0] == batch_size and y.shape[1] <= max_length
assert y.dtype == torch.long
assert isinstance(length, int)
assert length <= max_length and length <= x.shape[1]
| fly-master | tests/datamodules/test_language_modeling.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.masked_language_modeling import MLMDataModule
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestMLMDataModule:
def test_wikitext2(self):
batch_size = 7
dataset_name = 'wikitext'
dataset_config_name = 'wikitext-2-raw-v1'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'wikitext-2' / 'cache'
max_length = 512
dupe_factor = 2
datamodule = MLMDataModule(dataset_name, tokenizer_name='bert-base-uncased',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
dupe_factor=dupe_factor, batch_size=batch_size,
num_workers_preprocess=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 69293
val_len = 7222
test_len = 8353
assert len(train_loader) == div_up(train_len, batch_size)
assert len(val_loader) == div_up(val_len, batch_size)
assert len(test_loader) == div_up(test_len, batch_size)
for loader in [train_loader, val_loader, test_loader]:
batch = next(iter(loader))
assert batch.keys() == {'input_ids', 'labels', 'attention_mask', 'token_type_ids', 'next_sentence_label'}
seqlen = batch['input_ids'].shape[-1]
assert batch['input_ids'].shape == (batch_size, seqlen)
assert batch['input_ids'].dtype == torch.long
assert batch['labels'].shape == (batch_size, seqlen)
assert batch['labels'].dtype == torch.long
assert batch['attention_mask'].shape == (batch_size, seqlen)
assert batch['attention_mask'].dtype == torch.long
assert batch['token_type_ids'].shape == (batch_size, seqlen)
assert batch['token_type_ids'].dtype == torch.long
assert batch['next_sentence_label'].shape == (batch_size,)
assert batch['next_sentence_label'].dtype == torch.bool
def test_wikipedia(self):
batch_size = 8
dataset_name = 'wikipedia'
dataset_config_name = '20200501.en'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'wikipedia' / 'cache'
max_length = 512
dupe_factor = 2
datamodule = MLMDataModule(dataset_name, tokenizer_name='bert-base-uncased',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
dupe_factor=dupe_factor, batch_size=batch_size,
num_workers_preprocess=32)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
for loader in [train_loader, val_loader, test_loader]:
batch = next(iter(loader))
assert batch.keys() == {'input_ids', 'labels', 'attention_mask', 'token_type_ids', 'next_sentence_label'}
seqlen = batch['input_ids'].shape[-1]
assert batch['input_ids'].shape == (batch_size, seqlen)
assert batch['input_ids'].dtype == torch.long
assert batch['labels'].shape == (batch_size, seqlen)
assert batch['labels'].dtype == torch.long
assert batch['attention_mask'].shape == (batch_size, seqlen)
assert batch['attention_mask'].dtype == torch.bool
assert batch['token_type_ids'].shape == (batch_size, seqlen)
assert batch['token_type_ids'].dtype == torch.bool
assert batch['next_sentence_label'].shape == (batch_size,)
assert batch['next_sentence_label'].dtype == torch.bool
@pytest.mark.parametrize('max_length', [128, 512])
def test_wikipedia_from_text(self, max_length):
batch_size = 8
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'bert' / 'wikicorpus_text'
path = str(data_dir)
cache_dir = data_dir.parent / 'wikicorpus' / 'cache'
dupe_factor = 5
datamodule = MLMDataModule(path, tokenizer_name='bert-base-uncased',
max_length=max_length, cache_dir=cache_dir,
dupe_factor=dupe_factor, batch_size=batch_size,
num_workers_preprocess=64)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
for loader in [train_loader, val_loader, test_loader]:
batch = next(iter(loader))
assert batch.keys() == {'input_ids', 'labels', 'attention_mask', 'token_type_ids', 'next_sentence_label'}
seqlen = batch['input_ids'].shape[-1]
assert batch['input_ids'].shape == (batch_size, seqlen)
assert batch['input_ids'].dtype == torch.long
assert batch['labels'].shape == (batch_size, seqlen)
assert batch['labels'].dtype == torch.long
assert batch['attention_mask'].shape == (batch_size, seqlen)
# Could be bool or long, depending on whether the sequences were padding by the tokenizer
assert batch['attention_mask'].dtype in [torch.bool, torch.long]
assert batch['token_type_ids'].shape == (batch_size, seqlen)
assert batch['token_type_ids'].dtype in [torch.bool, torch.long]
assert batch['next_sentence_label'].shape == (batch_size,)
assert batch['next_sentence_label'].dtype == torch.bool
@pytest.mark.parametrize('max_length', [128, 512])
def test_bookcorpus(self, max_length):
batch_size = 8
# "bookcorpus" has already processed the books into sentences, which is not what we want
dataset_name = 'bookcorpusopen'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'bert' / 'bookcorpus'
# cache_dir = data_dir / 'cache'
cache_dir = None
dupe_factor = 5
datamodule = MLMDataModule(dataset_name, tokenizer_name='bert-base-uncased',
max_length=max_length, cache_dir=cache_dir,
dupe_factor=dupe_factor, batch_size=batch_size,
num_workers_preprocess=64)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
for loader in [train_loader, val_loader, test_loader]:
batch = next(iter(loader))
assert batch.keys() == {'input_ids', 'labels', 'attention_mask', 'token_type_ids', 'next_sentence_label'}
seqlen = batch['input_ids'].shape[-1]
assert batch['input_ids'].shape == (batch_size, seqlen)
assert batch['input_ids'].dtype == torch.long
assert batch['labels'].shape == (batch_size, seqlen)
assert batch['labels'].dtype == torch.long
assert batch['attention_mask'].shape == (batch_size, seqlen)
assert batch['attention_mask'].dtype in [torch.bool, torch.long]
assert batch['token_type_ids'].shape == (batch_size, seqlen)
assert batch['token_type_ids'].dtype in [torch.bool, torch.long]
assert batch['next_sentence_label'].shape == (batch_size,)
assert batch['next_sentence_label'].dtype == torch.bool
@pytest.mark.parametrize('max_length', [128, 512])
def test_wikibook_from_text(self, max_length):
batch_size = 8
data_dir_common = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data')) / 'bert'
path = [str(data_dir_common / 'wikicorpus_text'), 'bookcorpusopen']
cache_dir = [data_dir_common / 'wikicorpus' / 'cache', data_dir_common / 'bookcorpus' / 'cache']
dupe_factor = 5
datamodule = MLMDataModule(path, tokenizer_name='bert-base-uncased',
max_length=max_length, cache_dir=cache_dir,
dupe_factor=dupe_factor, batch_size=batch_size,
num_workers_preprocess=64)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
for loader in [train_loader, val_loader, test_loader]:
batch = next(iter(loader))
assert batch.keys() == {'input_ids', 'labels', 'attention_mask', 'token_type_ids', 'next_sentence_label'}
seqlen = batch['input_ids'].shape[-1]
assert batch['input_ids'].shape == (batch_size, seqlen)
assert batch['input_ids'].dtype == torch.long
assert batch['labels'].shape == (batch_size, seqlen)
assert batch['labels'].dtype == torch.long
assert batch['attention_mask'].shape == (batch_size, seqlen)
assert batch['attention_mask'].dtype in [torch.bool, torch.long]
assert batch['token_type_ids'].shape == (batch_size, seqlen)
assert batch['token_type_ids'].dtype in [torch.bool, torch.long]
assert batch['next_sentence_label'].shape == (batch_size,)
assert batch['next_sentence_label'].dtype == torch.bool
| fly-master | tests/datamodules/test_masked_language_modeling.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.language_modeling_hf import LMDataModule
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestLMDataModule:
def test_wikitext2(self):
batch_size = 7
dataset_name = 'wikitext'
dataset_config_name = 'wikitext-2-raw-v1'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'wikitext-2' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=False, batch_size=batch_size, num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 2391884
val_len = 247289
test_len = 283287
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_wikitext103(self):
batch_size = 7
dataset_name = 'wikitext'
dataset_config_name = 'wikitext-103-raw-v1'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'wikitext-103' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=False, batch_size=batch_size, num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 117920140
val_len = 247289
test_len = 283287
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_openwebtext(self):
batch_size = 8
dataset_name = 'openwebtext'
dataset_config_name = None
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'openwebtext' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=True, batch_size=batch_size,
num_workers=64)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 9035582198
val_len = 4434897
test_len = 4434897
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
| fly-master | tests/datamodules/test_language_modeling_hf.py |
import pytest
import torch
from timm.scheduler import CosineLRScheduler
from src.optim.timm_lr_scheduler import TimmCosineLRScheduler
def test_lr():
n_epochs = 310
model = torch.nn.Linear(3, 3)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=0.03)
kwargs = dict(t_initial=300, lr_min=1e-5, decay_rate=0.1, warmup_lr_init=1e-6, warmup_t=10,
cycle_limit=1)
scheduler_timm = CosineLRScheduler(optimizer, **kwargs)
scheduler_timm.step(epoch=0)
lrs_timm = []
for epoch in range(n_epochs):
lrs_timm.append(optimizer.param_groups[0]['lr'])
scheduler_timm.step(epoch = epoch + 1)
lrs_timm = torch.tensor(lrs_timm)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=0.03)
scheduler_mine = TimmCosineLRScheduler(optimizer, **kwargs)
lrs_mine = []
for epoch in range(n_epochs):
lrs_mine.append(optimizer.param_groups[0]['lr'])
scheduler_mine.step()
lrs_mine = torch.tensor(lrs_mine)
assert torch.allclose(lrs_timm, lrs_mine, atol=1e-7, rtol=1e-5)
| fly-master | tests/optim/test_timm_lr_schedulers.py |
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat
# from triton.ops.blocksparse import matmul, softmax
from triton.ops.blocksparse import softmax
from deepspeed.ops.sparse_attention import FixedSparsityConfig
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.blocksparse_attention import BlockSparseAttention
from src.models.attention.blocksparse_utils import sparsify_tensor, densify_tensor
from src.models.attention.blocksparse_utils import mask_tensor
from src.utils.padding import pad_to_multiple
from src.models.attention.blocksparse_matmul import matmul
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestBlockSparseAttention:
# Adapted from https://github.com/openai/triton/blob/8bedcce9befbbe95d8fe0a082718edc4050e2831/python/test/operators/test_blocksparse.py#L13
def test_blocksparse_matmul(self):
seed = 2357
block_size = 16
batch_size = 2
n_head = 3
seqlen_q = 64
seqlen_k = 48
dim = 32
softmax_temp = 0.23
# create inputs
seed_cpu_cuda(seed)
q = torch.randn((batch_size, n_head, seqlen_q, dim), device="cuda", requires_grad=True)
k = torch.randn((batch_size, n_head, seqlen_k, dim), device="cuda", requires_grad=True)
v = torch.randn((batch_size, n_head, seqlen_k, dim), device="cuda", requires_grad=True)
shape = (seqlen_q, seqlen_k)
p, r = shape[0] // block_size, shape[1] // block_size
layout = torch.randint(2, (n_head, p, r)) # On cpu
nnz = layout.bool().sum()
# triton result
matmul_sdd_op = matmul(layout, block_size, 'sdd', trans_a=False, trans_b=True)
qk_sparse = matmul_sdd_op(q, k)
qk_dense = densify_tensor(qk_sparse, layout)
assert qk_sparse.shape == (batch_size, nnz, block_size, block_size)
assert qk_dense.shape == (batch_size, n_head, seqlen_q, seqlen_k)
softmax_op = softmax(layout, block_size)
attn = softmax_op(qk_sparse.clone(), scale=softmax_temp)
matmul_dsd_op = matmul(layout, block_size, 'dsd', trans_a=False, trans_b=False)
out = matmul_dsd_op(attn, v)
# torch result
qk_dense_pt = q @ k.transpose(-1, -2)
qk_dense_masked_pt = mask_tensor(qk_dense_pt, layout)
qk_sparse_pt = sparsify_tensor(qk_dense_pt, layout)
assert qk_sparse_pt.shape == (batch_size, nnz, block_size, block_size)
assert qk_dense_masked_pt.shape == (batch_size, n_head, seqlen_q, seqlen_k)
qk_dense_inf_filled_pt = mask_tensor(qk_dense_pt, layout, value=float('-inf'))
attn_pt = torch.softmax(qk_dense_inf_filled_pt * softmax_temp, dim=-1)
# Some rows could be all zero, and torch.softmax will fill those with NaNs
zero_rows = (~(layout.to(attn_pt.device).bool())).all(dim=-1, keepdims=True)
zero_rows = repeat(zero_rows, 'h p 1 -> h (p blk_sz) 1', blk_sz=block_size)
attn_pt.masked_fill_(zero_rows, 0.0)
out_pt = attn_pt @ v
# Compare results
assert torch.allclose(qk_dense, qk_dense_masked_pt)
assert torch.allclose(qk_sparse, qk_sparse_pt)
assert torch.allclose(densify_tensor(attn, layout), attn_pt)
assert torch.allclose(out, out_pt, rtol=1e-5, atol=1e-7)
@pytest.mark.parametrize('device', ['cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 0.1, 0.235])
@pytest.mark.parametrize('dim', [15, 33, 51])
@pytest.mark.parametrize('num_local_blocks', [1, 3, 4])
@pytest.mark.parametrize('block_size', [16, 32])
def test_output(self, block_size, num_local_blocks, dim, softmax_temp, device):
seed = 2357
embed_dim = dim
v_dim = 17
num_heads = 7
batch_size = 18
q_seqlen = 235
k_seqlen = 179
q_seqlen_padded = int(math.ceil(q_seqlen / block_size) * block_size)
k_seqlen_padded = int(math.ceil(k_seqlen / block_size) * block_size)
seed_cpu_cuda(seed)
attn_mask = FullMask(torch.randint(low=0, high=2, size=(q_seqlen, k_seqlen),
dtype=torch.bool, device=device))
# attn_mask = None
key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,),
device=device), max_len=k_seqlen)
# key_padding_mask = None
sparsity_config = dict(
_target_='deepspeed.ops.sparse_attention.FixedSparsityConfig',
num_heads=num_heads,
block=block_size,
num_local_blocks=num_local_blocks
)
# sparsity_config = FixedSparsityConfig(num_heads = num_heads, block=block_size,
# num_local_blocks=num_local_blocks)
blocksparse_attn = BlockSparseAttention(
sparsity_config, softmax_temp=softmax_temp, attention_dropout=0.0,
max_seq_length=max(q_seqlen_padded, k_seqlen_padded)
).to(device)
full_attn = FullAttention(softmax_temp=softmax_temp, attention_dropout=0.0).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
layout = blocksparse_attn.get_layout(q_seqlen_padded, k_seqlen_padded)
out_bs, A_bs = blocksparse_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
assert out_bs.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_bs.shape == (batch_size, num_heads, q_seqlen, k_seqlen)
assert torch.all(A_bs >= 0)
# Sum of each row should be either 0.0 or 1.0
A_bs_sum = A_bs.sum(dim=-1)
assert torch.all(torch.isclose(A_bs_sum, torch.ones_like(A_bs_sum))
| torch.isclose(A_bs_sum, torch.zeros_like(A_bs_sum)))
assert torch.allclose(out_bs, torch.einsum('bhts,bshd->bthd', A_bs, v),
rtol=1e-6, atol=1e-7)
_, A_full = full_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
A_full.nan_to_num_()
# Test that A_bs is equivalent to zero-ing out some blocks in A_full and then
# re-normalize so each row sums to 1
A_full_padded = pad_to_multiple(A_full, block_size, dims=(-1, -2))
A_full_padded_masked = mask_tensor(A_full_padded, layout)
A_full_bs = F.normalize(A_full_padded_masked, p=1, dim=-1, eps=1e-8)[:, :, :q_seqlen, :k_seqlen]
assert torch.allclose(A_bs, A_full_bs, rtol=1e-4, atol=1e-6)
| fly-master | tests/models/attention/test_blocksparse_attention.py |
import pytest
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import LengthMask, TriangularCausalMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.sbsmyrf_attention import SBSmyrfAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestSBSmyrfAttention:
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('nb_features', [73, 26, 30000])
@pytest.mark.parametrize('n_clusters', [4, 6, 8])
@pytest.mark.parametrize('n_hashes', [1, 2, 3])
# @pytest.mark.parametrize('causal', [False, True])
@pytest.mark.parametrize('causal', [False])
def test_output(self, causal, n_hashes, n_clusters, nb_features, softmax_temp, device):
if nb_features > 10000 and device == 'cpu': # Would be too slow on CPU
return
# TODO: test causal masks
seed = 2357
embed_dim = 21
v_dim = 17
num_heads = 7
batch_size = 18
q_seqlen = 47
k_seqlen = 39 if not causal else q_seqlen
seed_cpu_cuda(seed)
attn_mask = None if not causal else TriangularCausalMask(q_seqlen, device=device)
key_padding_mask = LengthMask(torch.randint(low=k_seqlen // 4, high=k_seqlen,
size=(batch_size,), device=device),
max_len=k_seqlen)
key_padding_mask = None
q_cluster_size = (q_seqlen + n_clusters - 1) // n_clusters
k_cluster_size = (k_seqlen + n_clusters - 1) // n_clusters
sbsmyrf_attn = SBSmyrfAttention(n_hashes, q_cluster_size, k_cluster_size,
embed_dim, nb_features=nb_features,
softmax_temp=softmax_temp, attention_dropout=0.0,
causal=causal).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
# key_padding_mask = LengthMask(k.new_full((k.shape[0],), k.shape[1], dtype=torch.long))
out_sbsmyrf, (A_sbsmyrf, A_sbsmyrf_unnormalized, A_lr_unnormalized, smyrf_mask) = sbsmyrf_attn(
q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=True,
return_attn_unnormalized=True
)
assert out_sbsmyrf.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_sbsmyrf.shape == (batch_size, num_heads, q_seqlen, k_seqlen)
assert torch.all(A_sbsmyrf >= 0)
# Sum of each row should be either 0.0 or 1.0
A_sbsmyrf_sum = A_sbsmyrf.sum(dim=-1)
assert torch.all(torch.isclose(A_sbsmyrf_sum, torch.ones_like(A_sbsmyrf_sum), atol=1e-2)
| torch.isclose(A_sbsmyrf_sum, torch.zeros_like(A_sbsmyrf_sum), atol=1e-2))
# For some reason if nb_features is large this fails,
# maybe because of clamping the normalization
if nb_features < 1000:
assert torch.allclose(out_sbsmyrf, torch.einsum('bhts,bshd->bthd', A_sbsmyrf, v),
rtol=1e-3, atol=1e-3)
temperature = 1 / math.sqrt(embed_dim) if softmax_temp is None else softmax_temp
A_full_unnormalized = torch.exp(torch.einsum('bthe,bshe->bhts', q * temperature, k))
if attn_mask is not None:
A_full_unnormalized.masked_fill_(~attn_mask.bool_matrix, 0.0)
if key_padding_mask is not None:
A_full_unnormalized.masked_fill_(rearrange(~key_padding_mask.bool_matrix, 'b s -> b 1 1 s'),
0.0)
# Test that A_sbsmyrf_unnormalized matches A_full_unnormalized on the smyrf indices
# and A_lr_unnormalized on the non-smyrf indices.
rel_error = ((A_sbsmyrf_unnormalized - A_full_unnormalized)
/ A_full_unnormalized.clamp_min_(1e-6)).abs().mean()
if device == 'cuda':
print(f'Relative error of attention matrix: {rel_error}')
# For some reason if nb_features is large this fails,
# maybe because of clamping the normalization
if nb_features < 1000:
assert torch.allclose(A_sbsmyrf_unnormalized.masked_select(smyrf_mask),
A_full_unnormalized.masked_select(smyrf_mask),
rtol=1e-3, atol=1e-4)
assert torch.allclose(A_sbsmyrf_unnormalized.masked_select(~smyrf_mask),
A_lr_unnormalized.masked_select(~smyrf_mask),
rtol=1e-3, atol=1e-4)
# If nb_features is large, test that A_sbsmyrf_unnormalized is close to A_full_unnormalized
if nb_features > 10000 and (softmax_temp is None):
assert torch.allclose(rel_error, torch.zeros(1, device=device), atol=0.2)
| fly-master | tests/models/attention/test_sbsmyrf_attention.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.linformer_attention import LinformerAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestLinformerAttention:
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('share_kv', [False, True])
@pytest.mark.parametrize('proj_dim_k', [13, 47, 88])
@pytest.mark.parametrize('seq_len', [127, 28, 468])
def test_output(self, seq_len, proj_dim_k, share_kv, softmax_temp, device):
seed = 2357
embed_dim = 21
v_dim = 17
num_heads = 7
batch_size = 18
q_seqlen = 47
# [2021-08-08] local_dot_product_cuda has a bug when q_seqlen != k_seqlen
# https://github.com/idiap/fast-transformers/issues/98
k_seqlen = seq_len
seed_cpu_cuda(seed)
key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,),
device=device), max_len=k_seqlen)
lin_attn = LinformerAttention(seq_len, k=proj_dim_k, share_kv=share_kv,
softmax_temp=softmax_temp, attention_dropout=0.0).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
out_lin, A_lin = lin_attn(q, k, v, key_padding_mask=key_padding_mask, need_weights=True)
assert out_lin.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_lin.shape == (batch_size, num_heads, q_seqlen, proj_dim_k)
assert torch.all(A_lin >= 0)
# Sum of each row should be either 0.0 or 1.0
A_local_sum = A_lin.sum(dim=-1)
assert torch.all(torch.isclose(A_local_sum, torch.ones_like(A_local_sum))
| torch.isclose(A_local_sum, torch.zeros_like(A_local_sum)))
| fly-master | tests/models/attention/test_linformer_attention.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.reformer_attention import ReformerAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestReformerAttention:
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('bucket_size', [16, 32, 64])
@pytest.mark.parametrize('n_hashes', [1, 2, 3])
def test_output(self, n_hashes, bucket_size, softmax_temp, device):
seed = 2357
embed_dim = 21
v_dim = 17
# num_heads = 7
# batch_size = 18
num_heads = 1
# batch_size = 18
batch_size = 1
# seqlen = 213
seqlen = 64
seed_cpu_cuda(seed)
# attn_mask = None
attn_mask = FullMask(torch.randint(low=0, high=2, size=(seqlen, seqlen),
dtype=torch.bool, device=device))
# key_padding_mask = None
key_padding_mask = LengthMask(torch.randint(low=0, high=seqlen, size=(batch_size,),
device=device), max_len=seqlen)
reformer_attn = ReformerAttention(n_hashes=n_hashes, bucket_size=bucket_size,
allow_duplicate_attention=False,
softmax_temp=softmax_temp, attention_dropout=0.0).to(device)
q = torch.randn(batch_size, seqlen, num_heads, embed_dim, device=device)
k = q
v = torch.randn(batch_size, seqlen, num_heads, v_dim, device=device)
out_reformer, A_reformer = reformer_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
assert out_reformer.shape == (batch_size, seqlen, num_heads, v_dim)
assert A_reformer.shape == (batch_size, num_heads, seqlen, seqlen)
assert torch.all(A_reformer >= 0)
# Sum of each row should be either 0.0 or 1.0
# A_smyrf_sum = A_reformer.sum(dim=-1)
# assert torch.all(torch.isclose(A_smyrf_sum, torch.ones_like(A_smyrf_sum))
# | torch.isclose(A_smyrf_sum, torch.zeros_like(A_smyrf_sum)))
assert torch.allclose(out_reformer, torch.einsum('bhts,bshd->bthd', A_reformer, v),
rtol=1e-5, atol=1e-6)
| fly-master | tests/models/attention/test_reformer_attention.py |
import torch
import triton
import pytest
from src.models.attention.blocksparse_sum import blocksparse_sum
from src.models.attention.blocksparse_utils import sparsify_tensor, mask_tensor
@pytest.mark.parametrize(
"BLOCK, WIDTH",
[(block, width) for block in [16, 32] for width in [256, 576, 1024, 1792]],
)
def test_logsumexp(BLOCK, WIDTH, DTYPE=torch.float32):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 4, WIDTH, WIDTH
scale = 0.4
# create inputs
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
x = torch.randn((Z, H, M, N), dtype=DTYPE, requires_grad=True, device="cuda")
# triton result
op = blocksparse_sum(layout, BLOCK)
tx = sparsify_tensor(x, layout)
ty = op(tx * scale)
grad = torch.randn_like(ty)
tdx, = torch.autograd.grad(ty, x, grad)
# torch result
rx = mask_tensor(x, layout, value=0)
ry = torch.sum(rx * scale, -1)
rdx, = torch.autograd.grad(ry, x, grad)
# compare
assert torch.allclose(ry, ty, rtol=1e-4, atol=1e-5)
assert torch.allclose(rdx, tdx)
| fly-master | tests/models/attention/test_blocksparse_sum.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.local_attention import LocalAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestLocalAttention:
def test_local_dot_product(self):
"""Test if we can import and run local_dot_product at all
"""
from fast_transformers.local_product import local_dot_product, local_weighted_average
batch_size = 1
num_heads = 3
seqlen = 10
embed_dim = 7
local_context = 5
q = torch.randn(batch_size, num_heads, seqlen, embed_dim, requires_grad=True)
k = torch.randn(batch_size, num_heads, seqlen, embed_dim, requires_grad=True)
v = torch.randn(batch_size, num_heads, seqlen, embed_dim, requires_grad=True)
attn_additive_mask = torch.zeros(10, 10)
lengths = torch.full((1,), 10)
qk = local_dot_product(q, k, attn_additive_mask, lengths, local_context)
qk.sum().backward()
assert qk.shape == (batch_size, num_heads, seqlen, local_context)
qk = local_dot_product(q.cuda(), k.cuda(), attn_additive_mask.cuda(), lengths.cuda(), local_context)
assert qk.shape == (batch_size, num_heads, seqlen, local_context)
A = torch.softmax(1.5 * qk, dim=-1)
out = local_weighted_average(A, v.cuda())
out.sum().backward() # Test that backward also runs
assert out.shape == (batch_size, num_heads, seqlen, embed_dim)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('local_context', [3, 4, 28, 33])
def test_output(self, local_context, softmax_temp, device):
seed = 2357
embed_dim = 21
v_dim = 17
num_heads = 7
batch_size = 18
q_seqlen = 47
# [2021-08-08] local_dot_product_cuda has a bug when q_seqlen != k_seqlen
# https://github.com/idiap/fast-transformers/issues/98
k_seqlen = 39 if device == 'cpu' else q_seqlen
seed_cpu_cuda(seed)
# For arbitrary attn_mask, we could have a row with no valid keys. That row should actually
# be zero but for implementation simplicity we don't correct it.
# Therefore we're only testing with full attn_mask.
# attn_mask = FullMask(torch.randint(low=0, high=2, size=(q_seqlen, k_seqlen),
# dtype=torch.bool, device=device))
attn_mask = None
key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,),
device=device), max_len=k_seqlen)
# key_padding_mask = LengthMask(torch.ones(batch_size, dtype=torch.long) * 3, max_len=k_seqlen)
local_attn = LocalAttention(local_context, softmax_temp=softmax_temp,
attention_dropout=0.0).to(device)
full_attn = FullAttention(softmax_temp=softmax_temp, attention_dropout=0.0).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
out_local, A_local = local_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
assert out_local.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_local.shape == (batch_size, num_heads, q_seqlen, k_seqlen)
assert torch.all(A_local >= 0)
# Sum of each row should be either 0.0 or 1.0
A_local_sum = A_local.sum(dim=-1)
assert torch.all(torch.isclose(A_local_sum, torch.ones_like(A_local_sum))
| torch.isclose(A_local_sum, torch.zeros_like(A_local_sum)))
assert torch.all((A_local >= 1e-6).sum(dim=-1) <= local_context)
assert torch.allclose(out_local, torch.einsum('bhts,bshd->bthd', A_local, v),
rtol=1e-6, atol=1e-7)
_, A_full = full_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
# Test that A_local is equivalent to zero-ing out non-local elements A_full and then
# re-normalize so each row sums to 1
i = rearrange(torch.arange(q_seqlen, device=q.device), 't -> 1 1 t 1')
j = torch.arange(k_seqlen, device=k.device)
idx = j - i
local_mask = ((idx >= -(local_context // 2))
& (idx < (local_context + 1) // 2)
& (j < rearrange(key_padding_mask.lengths, 'b -> b 1 1 1')))
A_full_local = F.normalize(A_full.masked_fill(~local_mask, 0.0), p=1, dim=-1)
assert torch.allclose(A_local, A_full_local, rtol=1e-5, atol=1e-7)
| fly-master | tests/models/attention/test_local_attention.py |
import torch
import triton
import pytest
# from triton.ops.blocksparse import matmul
from src.models.attention.blocksparse_matmul import matmul
@pytest.mark.parametrize(
"MODE, TRANS_A, TRANS_B, BLOCK, DTYPE",
[
(mode, at, bt, block, dtype) for dtype in ["float32"] for mode in ["sdd"]
for at in [False] for bt in [True] for block in [16]
],
)
# def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K=256):
def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=64, N=64, K=48):
DTYPE = {"float16": torch.float16, "float32": torch.float32}[DTYPE]
# set seed
torch.random.manual_seed(0)
# create inputs
a = torch.randn((Z, H, K, M) if TRANS_A else (Z, H, M, K), dtype=DTYPE, device="cuda")
b = torch.randn((Z, H, N, K) if TRANS_B else (Z, H, K, N), dtype=DTYPE, device="cuda")
shape = {
"sdd": (M, N),
"dsd": (a.shape[2], a.shape[3]),
"dds": (b.shape[2], b.shape[3]),
}[MODE]
layout = torch.randint(2, (H, shape[0] // BLOCK, shape[1] // BLOCK))
# triton result
op = matmul(layout, BLOCK, MODE, trans_a=TRANS_A, trans_b=TRANS_B)
ra = triton.testing.sparsify_tensor(a, layout, BLOCK) if MODE == "dsd" else a
rb = triton.testing.sparsify_tensor(b, layout, BLOCK) if MODE == "dds" else b
rc = triton.testing.catch_oor(lambda : op(ra, rb), pytest)
# torch result
ta = triton.testing.mask_tensor(a, layout, BLOCK) if MODE == "dsd" else a
tb = triton.testing.mask_tensor(b, layout, BLOCK) if MODE == "dds" else b
ta = ta.transpose(2, 3) if TRANS_A else ta
tb = tb.transpose(2, 3) if TRANS_B else tb
tc = torch.matmul(ta, tb)
tc = triton.testing.mask_tensor(tc, layout, BLOCK) if MODE == "sdd" else tc
tc = triton.testing.sparsify_tensor(tc, layout, BLOCK) if MODE == "sdd" else tc
# compare
assert torch.allclose(rc, tc)
| fly-master | tests/models/attention/test_triton.py |
import torch
import triton
import pytest
from src.models.attention.blocksparse_logsumexp import logsumexp
from src.models.attention.blocksparse_utils import sparsify_tensor, mask_tensor
@pytest.mark.parametrize(
"BLOCK, WIDTH",
[(block, width) for block in [16, 32] for width in [256, 576, 1024, 1792]],
)
def test_logsumexp(BLOCK, WIDTH, DTYPE=torch.float32):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 4, WIDTH, WIDTH
scale = 0.4
# create inputs
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
x = torch.randn((Z, H, M, N), dtype=DTYPE, requires_grad=True, device="cuda")
# triton result
op = logsumexp(layout, BLOCK)
tx = sparsify_tensor(x, layout)
ty = op(tx * scale)
grad = torch.randn_like(ty)
tdx, = torch.autograd.grad(ty, x, grad)
# torch result
rx = mask_tensor(x, layout, value=float("-inf"))
ry = torch.logsumexp(rx * scale, -1)
rdx, = torch.autograd.grad(ry, x, grad)
# compare
assert torch.allclose(ry, ty)
assert torch.allclose(rdx, tdx)
| fly-master | tests/models/attention/test_blocksparse_logsumexp.py |
import pytest
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from fast_transformers.masking import LengthMask, TriangularCausalMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.sbblocksparse_attention import SBBlockSparseAttention
from src.models.attention.blocksparse_utils import mask_tensor
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestSBBlockSparseAttention:
@pytest.mark.parametrize('device', ['cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('nb_features', [80, 32, 1024])
@pytest.mark.parametrize('num_local_blocks', [1, 3, 4])
@pytest.mark.parametrize('block_size', [16])
@pytest.mark.parametrize('causal', [False])
def test_output(self, causal, block_size, num_local_blocks, nb_features, softmax_temp, device):
if nb_features > 10000 and device == 'cpu': # Would be too slow on CPU
return
# TODO: test causal masks
seed = 2357
embed_dim = 32
v_dim = 48
num_heads = 7
batch_size = 18
q_seqlen = 240
# [2021-08-08] local_dot_product_cuda has a bug when q_seqlen != k_seqlen
# https://github.com/idiap/fast-transformers/issues/98
k_seqlen = 64 if device == 'cpu' or not causal else q_seqlen
q_seqlen_padded = int(math.ceil(q_seqlen / block_size) * block_size)
k_seqlen_padded = int(math.ceil(k_seqlen / block_size) * block_size)
seed_cpu_cuda(seed)
attn_mask = None if not causal else TriangularCausalMask(q_seqlen, device=device)
# key_padding_mask = LengthMask(torch.randint(low=k_seqlen // 4, high=k_seqlen,
# size=(batch_size,), device=device),
# max_len=k_seqlen)
key_padding_mask = None
sparsity_config = dict(
_target_='deepspeed.ops.sparse_attention.FixedSparsityConfig',
num_heads=num_heads,
block=block_size,
num_local_blocks=num_local_blocks
)
sbblocksparse_attn = SBBlockSparseAttention(sparsity_config, embed_dim, nb_features,
softmax_temp=softmax_temp, attention_dropout=0.0,
causal=causal,
max_seq_length=max(q_seqlen, k_seqlen)).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
layout = sbblocksparse_attn.get_layout(q_seqlen_padded, k_seqlen_padded)
# key_padding_mask = LengthMask(k.new_full((k.shape[0],), k.shape[1], dtype=torch.long))
out_sblocal, (A_sbblocksparse, A_sbblocksparse_unnormalized, A_lr_unnormalized) = sbblocksparse_attn(
q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=True,
return_attn_unnormalized=True
)
assert out_sblocal.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_sbblocksparse.shape == (batch_size, num_heads, q_seqlen, k_seqlen)
assert torch.all(A_sbblocksparse >= 0)
# Sum of each row should be either 0.0 or 1.0
A_sblocal_sum = A_sbblocksparse.sum(dim=-1)
assert torch.all(torch.isclose(A_sblocal_sum, torch.ones_like(A_sblocal_sum), atol=1e-2)
| torch.isclose(A_sblocal_sum, torch.zeros_like(A_sblocal_sum), atol=1e-2))
assert torch.allclose(out_sblocal, torch.einsum('bhts,bshd->bthd', A_sbblocksparse, v),
rtol=1e-3, atol=1e-3)
temperature = 1 / math.sqrt(embed_dim) if softmax_temp is None else softmax_temp
A_full_unnormalized = torch.exp(torch.einsum('bthe,bshe->bhts', q * temperature, k))
if attn_mask is not None:
A_full_unnormalized.masked_fill_(~attn_mask.bool_matrix, 0.0)
if key_padding_mask is not None:
A_full_unnormalized.masked_fill_(rearrange(~key_padding_mask.bool_matrix,
'b s -> b 1 1 s'),
0.0)
# Test that A_sbblocksparse_unnormalized matches A_full_unnormalized on the block sparse
# indices and A_lr_unnormalized on the non-block sparse indices.
blocksparse_mask = mask_tensor(torch.ones_like(A_full_unnormalized), layout).bool()
assert torch.allclose(A_sbblocksparse_unnormalized.masked_select(blocksparse_mask),
A_full_unnormalized.masked_select(blocksparse_mask),
rtol=1e-3, atol=1e-4)
assert torch.allclose(A_sbblocksparse_unnormalized.masked_select(~blocksparse_mask),
A_lr_unnormalized.masked_select(~blocksparse_mask),
rtol=1e-3, atol=1e-4)
rel_error = ((A_sbblocksparse_unnormalized - A_full_unnormalized)
/ A_full_unnormalized.clamp_min_(1e-6)).abs().mean()
if device == 'cuda':
print(f'Relative error of attention matrix: {rel_error}')
| fly-master | tests/models/attention/test_sbblocksparse_attention.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.smyrf_attention import SmyrfAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestSmyrfAttention:
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('n_clusters', [4, 6, 8])
@pytest.mark.parametrize('n_hashes', [1, 2, 3])
def test_output(self, n_hashes, n_clusters, softmax_temp, device):
seed = 2357
embed_dim = 21
v_dim = 17
num_heads = 7
batch_size = 18
q_seqlen = 257
k_seqlen = 173
seed_cpu_cuda(seed)
attn_mask = None
key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,),
device=device), max_len=k_seqlen)
q_cluster_size = (q_seqlen + n_clusters - 1) // n_clusters
k_cluster_size = (k_seqlen + n_clusters - 1) // n_clusters
smyrf_attn = SmyrfAttention(n_hashes, q_cluster_size, k_cluster_size,
softmax_temp=softmax_temp, attention_dropout=0.0).to(device)
full_attn = FullAttention(softmax_temp=softmax_temp, attention_dropout=0.0).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
out_smyrf, A_smyrf = smyrf_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
assert out_smyrf.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_smyrf.shape == (batch_size, num_heads, q_seqlen, k_seqlen)
assert torch.all(A_smyrf >= 0)
# Sum of each row should be either 0.0 or 1.0
A_smyrf_sum = A_smyrf.sum(dim=-1)
assert torch.all(torch.isclose(A_smyrf_sum, torch.ones_like(A_smyrf_sum))
| torch.isclose(A_smyrf_sum, torch.zeros_like(A_smyrf_sum)))
assert torch.allclose(out_smyrf, torch.einsum('bhts,bshd->bthd', A_smyrf, v),
rtol=1e-5, atol=1e-6)
# Test that A_smyrf is equivalent to zero-ing out some elements A_full and then
# re-normalize so each row sums to 1
if n_hashes == 1:
_, A_full = full_attn(q, k, v, attn_mask, key_padding_mask, need_weights=True)
smyrf_mask = A_smyrf > 0.0
A_full_smyrf = F.normalize(A_full.masked_fill(~smyrf_mask, 0.0), p=1, dim=-1)
assert torch.allclose(A_smyrf, A_full_smyrf, rtol=1e-5, atol=1e-6)
| fly-master | tests/models/attention/test_smyrf_attention.py |
import pytest
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import LengthMask, TriangularCausalMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.sblocal_attention import SBLocalAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestSBLocalAttention:
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('softmax_temp', [None, 1.0, 0.235])
@pytest.mark.parametrize('nb_features', [73, 26, 30000])
@pytest.mark.parametrize('local_context', [3, 4, 28, 33])
# @pytest.mark.parametrize('causal', [False, True])
@pytest.mark.parametrize('causal', [False])
def test_output(self, causal, local_context, nb_features, softmax_temp, device):
if nb_features > 10000 and device == 'cpu': # Would be too slow on CPU
return
# TODO: test causal masks
seed = 2357
embed_dim = 21
v_dim = 17
num_heads = 7
batch_size = 18
q_seqlen = 47
# [2021-08-08] local_dot_product_cuda has a bug when q_seqlen != k_seqlen
# https://github.com/idiap/fast-transformers/issues/98
k_seqlen = 39 if device == 'cpu' or not causal else q_seqlen
seed_cpu_cuda(seed)
attn_mask = None if not causal else TriangularCausalMask(q_seqlen, device=device)
key_padding_mask = LengthMask(torch.randint(low=k_seqlen // 4, high=k_seqlen,
size=(batch_size,), device=device),
max_len=k_seqlen)
sblocal_attn = SBLocalAttention(local_context, embed_dim, nb_features,
softmax_temp=softmax_temp, attention_dropout=0.0,
causal=causal).to(device)
q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device)
k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device)
v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device)
# key_padding_mask = LengthMask(k.new_full((k.shape[0],), k.shape[1], dtype=torch.long))
out_sblocal, (A_sblocal, A_sblocal_unnormalized, A_lr_unnormalized) = sblocal_attn(
q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=True,
return_attn_unnormalized=True
)
assert out_sblocal.shape == (batch_size, q_seqlen, num_heads, v_dim)
assert A_sblocal.shape == (batch_size, num_heads, q_seqlen, k_seqlen)
assert torch.all(A_sblocal >= 0)
# Sum of each row should be either 0.0 or 1.0
A_sblocal_sum = A_sblocal.sum(dim=-1)
assert torch.all(torch.isclose(A_sblocal_sum, torch.ones_like(A_sblocal_sum), atol=1e-2)
| torch.isclose(A_sblocal_sum, torch.zeros_like(A_sblocal_sum), atol=1e-2))
assert torch.allclose(out_sblocal, torch.einsum('bhts,bshd->bthd', A_sblocal, v),
rtol=1e-3, atol=1e-3)
temperature = 1 / math.sqrt(embed_dim) if softmax_temp is None else softmax_temp
A_full_unnormalized = torch.exp(torch.einsum('bthe,bshe->bhts', q * temperature, k))
if attn_mask is not None:
A_full_unnormalized.masked_fill_(~attn_mask.bool_matrix, 0.0)
A_full_unnormalized.masked_fill_(rearrange(~key_padding_mask.bool_matrix, 'b s -> b 1 1 s'),
0.0)
# Test that A_sblocal_unnormalized matches A_full_unnormalized on the local indices
# and A_lr_unnormalized on the non-local indices.
i = rearrange(torch.arange(q_seqlen, device=q.device), 't -> 1 1 t 1')
j = torch.arange(k_seqlen, device=k.device)
idx = j - i
local_mask = ((idx >= -(local_context // 2))
& (idx < (local_context + 1) // 2)
& (j < rearrange(key_padding_mask.lengths, 'b -> b 1 1 1')))
assert torch.allclose(A_sblocal_unnormalized.masked_select(local_mask),
A_full_unnormalized.masked_select(local_mask),
rtol=1e-3, atol=1e-4)
assert torch.allclose(A_sblocal_unnormalized.masked_select(~local_mask),
A_lr_unnormalized.masked_select(~local_mask),
rtol=1e-3, atol=1e-4)
rel_error = ((A_sblocal_unnormalized - A_full_unnormalized)
/ A_full_unnormalized.clamp_min_(1e-6)).abs().mean()
if device == 'cuda':
print(f'Relative error of attention matrix: {rel_error}')
# If nb_features is large, test that A_sblocal_unnormalized is close to A_full_unnormalized
if nb_features > 10000 and local_context > 10 and (softmax_temp is None
or softmax_temp < 1.0):
assert torch.allclose(rel_error, torch.zeros(1, device=device), atol=0.2)
| fly-master | tests/models/attention/test_sblocal_attention.py |
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from deepspeed.ops.sparse_attention import FixedSparsityConfig
from src.models.layers.blocksparse_linear import BlockSparseLinear
from src.models.layers.fastlinear import NinjaTurtleLinear, ButterflyGlobalLinear
from src.models.attention.blocksparse_utils import sparsify_tensor, densify_tensor
from src.utils.padding import pad_to_multiple
from src.models.attention.blocksparse_matmul import matmul
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def setup(batch_size, in_features, out_features, block_size):
x = torch.randn(batch_size, in_features, requires_grad=True, device='cuda')
kwargs = dict(window_size=block_size, stripes=1, step=2, gtoken=block_size)
sparsity_config = dict(
_target_='src.models.layers.fastlinear.NinjaTurtleSparsityConfig',
block=block_size,
**kwargs
)
bs_linear = BlockSparseLinear(in_features, out_features, sparsity_config, bias=True).to('cuda')
bs_fake_linear = NinjaTurtleLinear(in_features, out_features, bias=True, **kwargs).to('cuda')
return x, bs_linear, bs_fake_linear
class TestBlockSparseLinear:
@pytest.mark.parametrize('block_size', [16, 32])
@pytest.mark.parametrize('out_features', [698, 3081])
@pytest.mark.parametrize('in_features', [497, 149])
def test_init(self, in_features, out_features, block_size):
batch_size = 128
x, bs_linear, bs_fake_linear = setup(batch_size, in_features, out_features, block_size)
assert torch.allclose(bs_linear.weight.mean(), bs_fake_linear.weight.mean(), atol=1e-3)
assert torch.allclose(bs_linear.weight.std(), bs_fake_linear.weight.std(), atol=1e-2)
assert torch.allclose(bs_linear.bias.mean(), bs_fake_linear.bias.mean(), atol=1e-2)
assert torch.allclose(bs_linear.bias.std(), bs_fake_linear.bias.std(), atol=1e-2)
output = bs_linear.to('cuda')(x) - bs_linear.bias
assert output.mean().abs().item() < 1e-2
assert 0.3 < output.std().item() < 3.0
@pytest.mark.parametrize('out_features', [698, 3081])
@pytest.mark.parametrize('in_features', [497, 149])
def test_backends(self, in_features, out_features):
"""The two backends (huggingface and triton) should yield the same output and gradients.
"""
block_size = 32
batch_size = 128
x = torch.randn(batch_size, in_features, requires_grad=True, device='cuda')
kwargs = dict(window_size=block_size, stripes=1, step=2, gtoken=block_size)
sparsity_config = dict(
_target_='src.models.layers.fastlinear.NinjaTurtleSparsityConfig',
block=block_size,
**kwargs
)
bs_linear_triton = BlockSparseLinear(in_features, out_features, sparsity_config, bias=True,
backend='triton').to('cuda')
bs_linear_hf = BlockSparseLinear(in_features, out_features, sparsity_config, bias=True,
backend='huggingface').to('cuda')
with torch.no_grad():
bs_linear_hf.weight.copy_(rearrange(bs_linear_triton.weight,
'1 nnz blksz blksz1 -> (nnz blksz1) blksz'))
bs_linear_hf.bias.copy_(bs_linear_triton.bias)
out_triton = bs_linear_triton(x)
grad = torch.randn_like(out_triton)
grad_x_triton, grad_weight_triton = torch.autograd.grad(out_triton,
(x, bs_linear_triton.weight), grad)
x = x.clone().detach().requires_grad_(True)
out_hf = bs_linear_hf(x)
grad_x_hf, grad_weight_hf = torch.autograd.grad(out_hf, (x, bs_linear_hf.weight), grad)
assert torch.allclose(out_triton, out_hf, rtol=1e-5, atol=1e-6)
assert torch.allclose(grad_x_triton, grad_x_hf, rtol=1e-4, atol=1e-5)
assert torch.allclose(rearrange(grad_weight_triton,
'1 nnz blksz blksz1 -> (nnz blksz1) blksz'),
grad_weight_hf, rtol=1e-4, atol=1e-5)
@pytest.mark.parametrize('block_size', [16, 32])
@pytest.mark.parametrize('out_features', [698, 1280, 3081])
@pytest.mark.parametrize('in_features', [640, 497, 149])
def test_output(self, in_features, out_features, block_size):
"""With the same weight, the fast implementation (BlockSparseLinear) should yield the same
output and gradient as the slow implementation.
"""
batch_size = 128
x, bs_linear, bs_fake_linear = setup(batch_size, in_features, out_features, block_size)
with torch.no_grad():
if in_features % block_size != 0 or out_features % block_size != 0:
# Make sparse_mask block-aligned in these cases
sparse_mask = bs_linear.layout
sparse_mask = repeat(sparse_mask, 'p r -> (p blksz) (r blksz1)',
blksz=block_size, blksz1=block_size)
sparse_mask = sparse_mask[:out_features, :in_features]
bs_fake_linear.sparse_mask = sparse_mask
weight_dense = pad_to_multiple(bs_fake_linear.weight, multiple=block_size, dims=(0, 1))
weight_sparse = sparsify_tensor(rearrange(weight_dense, 'd2 d1 -> 1 d2 d1'),
bs_linear.layout)
layout = rearrange(bs_linear.layout, 'd1 d2 -> 1 d1 d2')
assert torch.allclose(densify_tensor(weight_sparse,
layout)[:, :, :out_features, :in_features]
* bs_fake_linear.sparse_mask,
bs_fake_linear.weight * bs_fake_linear.sparse_mask)
if bs_linear.backend == 'triton':
bs_linear.weight.copy_(weight_sparse)
elif bs_linear.backend == 'huggingface':
bs_linear.weight.copy_(rearrange(weight_sparse,
'1 nnz blksz blksz1 -> (nnz blksz1) blksz'))
bs_linear.bias.copy_(bs_fake_linear.bias)
out = bs_linear(x)
grad = torch.randn_like(out)
grad_x, grad_weight = torch.autograd.grad(out, (x, bs_linear.weight), grad)
x = x.clone().detach().requires_grad_(True)
out_slow = bs_fake_linear(x)
grad_x_slow, grad_weight_slow = torch.autograd.grad(out_slow, (x, bs_fake_linear.weight),
grad)
assert torch.allclose(out, out_slow, rtol=1e-4, atol=1e-5)
assert torch.allclose(grad_x, grad_x_slow, rtol=1e-4, atol=1e-5)
if bs_linear.backend == 'huggingface':
grad_weight = rearrange(grad_weight, '(nnz blksz1) blksz -> 1 nnz blksz blksz1',
blksz=block_size, blksz1=block_size)
grad_weight_dense = densify_tensor(grad_weight, layout)
assert torch.allclose(grad_weight_dense,
pad_to_multiple(grad_weight_slow, multiple=block_size, dims=(0, 1)),
rtol=1e-4, atol=1e-5)
| fly-master | tests/models/layers/test_blocksparse_linear.py |
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.layers.blocksparse_linear import BlockSparseLinear, FlatBlockButterflySparsityConfig
class TestFlatBlockButterflySparsityConfig:
@pytest.mark.parametrize('butterfly_size,n_factors,block_size',
[(32, 3, 16), (16, 2, 32), (16, 3, 32), (16, 4, 32), (8, 2, 32), (8, 3, 32)])
def test_parameters_512(self, butterfly_size, n_factors, block_size):
in_features, out_features = 512, 2048
self = FlatBlockButterflySparsityConfig(butterfly_size, n_factors, block=block_size,
global_size=0)
mask = self.make_layout(out_features, in_features)
print(f'Saving: {mask.float().mean().item()}')
batch_size = 3
x = torch.randn(batch_size, in_features)
s_cfg = {'_target_': 'src.models.layers.blocksparse_linear.FlatBlockButterflySparsityConfig',
'butterfly_size': butterfly_size,
'n_factors': n_factors,
'block': block_size}
self = BlockSparseLinear(in_features, out_features, s_cfg, backend='dense')
out = self(x)
assert out.shape == (batch_size, out_features)
@pytest.mark.parametrize('butterfly_size,n_factors,block_size',
[(32, 3, 8), (16, 2, 16), (16, 3, 16), (16, 4, 16), (8, 2, 32), (8, 3, 32)])
def test_parameters_768(self, butterfly_size, n_factors, block_size):
in_features, out_features = 768, 3072
self = FlatBlockButterflySparsityConfig(butterfly_size, n_factors, block=block_size,
global_size=0)
mask = self.make_layout(out_features, in_features)
print(f'Saving: {mask.float().mean().item()}')
batch_size = 3
x = torch.randn(batch_size, in_features)
s_cfg = {'_target_': 'src.models.layers.blocksparse_linear.FlatBlockButterflySparsityConfig',
'butterfly_size': butterfly_size,
'n_factors': n_factors,
'block': block_size}
self = BlockSparseLinear(in_features, out_features, s_cfg, backend='dense')
out = self(x)
assert out.shape == (batch_size, out_features)
| fly-master | tests/models/layers/test_flatblockbutterfly_sparsity.py |
import math
import torch
import pytest
from src.models.layers.block_butterfly_multiply import block_butterfly_multiply
from src.models.layers.block_butterfly_multiply import block_butterfly_factor_multiply
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('nstacks', [1, 2, 3])
@pytest.mark.parametrize('nblocks', [1, 2, 3])
@pytest.mark.parametrize('increasing_stride', [True, False])
@pytest.mark.parametrize('log_n', [3, 4, 5])
@pytest.mark.parametrize('block_size', [1, 2, 4, 6])
def test_block_butterfly_multiply(block_size, log_n, increasing_stride, nblocks, nstacks, device):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
batch_size = 3
twiddle = torch.randn(nstacks, nblocks, log_n, n // 2, 2, 2, block_size, block_size,
device=device)
input = torch.randn(batch_size, nstacks, block_size * n, device=device)
increasing_stride = True
output_size = None
output = block_butterfly_multiply(twiddle, input, increasing_stride=increasing_stride,
output_size=output_size)
assert output.shape == (batch_size, nstacks, block_size * n)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('nstacks', [1, 2, 3])
@pytest.mark.parametrize('increasing_stride', [True, False])
@pytest.mark.parametrize('log_n', [3, 4, 5])
@pytest.mark.parametrize('block_size', [1, 2, 4, 6])
def test_block_butterfly_factor_multiply(block_size, log_n, increasing_stride, nstacks, device):
# set seed
torch.random.manual_seed(0)
nblocks = 1
n = 8
batch_size = 3
log_n = int(math.log2(n))
twiddle = torch.randn(nstacks, nblocks, log_n, n // 2, 2, 2, block_size, block_size,
device=device)
input = torch.randn(batch_size, nstacks, block_size * n, device=device)
increasing_stride = True
output = block_butterfly_multiply(twiddle, input, increasing_stride=increasing_stride)
assert output.shape == (batch_size, nstacks, block_size * n)
output_factor = input
for idx in range(log_n):
output_factor = block_butterfly_factor_multiply(twiddle[:, 0], output_factor, idx,
increasing_stride=increasing_stride)
assert torch.allclose(output, output_factor)
| fly-master | tests/models/layers/test_block_butterfly_multiply.py |
import math
import torch
import pytest
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply_reference
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('log_n', [4, 10, 12])
def test_block_diag_butterfly_multiply_reference(log_n, device, dtype):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
sqrtn = 1 << (log_n // 2)
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
out1 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=1)
out2 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2)
out3 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=3)
assert torch.allclose(out1, out2, rtol=1e-4, atol=1e-4)
assert torch.allclose(out2, out3, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_block_diag_butterfly_multiply_reference_rectangular(device, dtype):
# set seed
torch.random.manual_seed(0)
n = 768
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(8, 96 * 2, 96, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(24, 16, 64, device=x.device, dtype=x.dtype, requires_grad=True)
out2 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2)
out3 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=3)
assert torch.allclose(out2, out3, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('log_n', [4, 10, 12])
def test_block_diag_butterfly_multiply(log_n, device, dtype):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
sqrtn = 1 << (log_n // 2)
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
out = blockdiag_butterfly_multiply(x, w1_bfly, w2_bfly)
grad = torch.randn_like(out)
dx, dw1_bfly, dw2_bfly = torch.autograd.grad(out, (x, w1_bfly, w2_bfly), grad,
retain_graph=True)
assert out.shape == (batch_size, n)
out_ref = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly)
dx_ref, dw1_bfly_ref, dw2_bfly_ref = torch.autograd.grad(out_ref, (x, w1_bfly, w2_bfly), grad,
retain_graph=True)
assert torch.allclose(out, out_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dx, dx_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dw1_bfly, dw1_bfly_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dw2_bfly, dw2_bfly_ref, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_block_diag_butterfly_multiply_rectangular(device, dtype):
# set seed
torch.random.manual_seed(0)
n = 768
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(8, 96 * 2, 96, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(24, 16, 64, device=x.device, dtype=x.dtype, requires_grad=True)
out = blockdiag_butterfly_multiply(x, w1_bfly, w2_bfly)
grad = torch.randn_like(out)
dx, dw1_bfly, dw2_bfly = torch.autograd.grad(out, (x, w1_bfly, w2_bfly), grad,
retain_graph=True)
assert out.shape == (batch_size, 24 * 16)
out_ref = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly)
dx_ref, dw1_bfly_ref, dw2_bfly_ref = torch.autograd.grad(out_ref, (x, w1_bfly, w2_bfly), grad,
retain_graph=True)
assert torch.allclose(out, out_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dx, dx_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dw1_bfly, dw1_bfly_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dw2_bfly, dw2_bfly_ref, rtol=1e-4, atol=1e-4)
| fly-master | tests/models/layers/test_blockdiag_butterfly_multiply.py |
import pytest
import torch
import torch.nn as nn
from einops import rearrange, reduce
from fast_transformers.masking import FullMask, LengthMask
from src.models.modules.multihead_attention import MultiheadAttention
from src.models.attention.full_attention import FullAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class TestMultiheadAttention:
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('batch_first', [True, False])
@pytest.mark.parametrize('add_zero_attn', [True, False])
@pytest.mark.parametrize('add_bias_kv', [True, False])
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('dropout', [0.0, 0.3])
@pytest.mark.parametrize('same_kv', [True, False])
@pytest.mark.parametrize('same_qk', [True, False])
def test_output(self, same_qk, same_kv, dropout, bias, add_bias_kv, add_zero_attn, batch_first,
device):
seed = 2357
embed_dim = 21
num_heads = 3
batch_size = 5
q_seqlen = 47
k_seqlen = 37 if not same_qk else q_seqlen
seed_cpu_cuda(seed)
attn_mask = FullMask(torch.randint(low=0, high=2, size=(q_seqlen, k_seqlen),
dtype=torch.bool, device=device))
key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,),
device=device), max_len=k_seqlen)
seed_cpu_cuda(seed)
mha_pt = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout, bias=bias,
add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn,
batch_first=batch_first).to(device)
seed_cpu_cuda(seed)
mha = MultiheadAttention(embed_dim, num_heads, bias=bias,
add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn,
batch_first=batch_first).to(device)
full_attn = FullAttention(attention_dropout=dropout)
q = torch.randn(batch_size, q_seqlen, embed_dim, device=device)
k = q if same_qk else torch.randn(batch_size, k_seqlen, embed_dim, device=device)
v = k if same_kv else torch.randn(batch_size, k_seqlen, embed_dim, device=device)
if not batch_first:
q, k, v = [rearrange(x, 'b s d -> s b d').contiguous() for x in [q, k, v]]
for training in [True, False]:
seed_cpu_cuda(seed + 1)
mha_pt.train(training)
# Pytorch uses a different convention for the mask: 1 means ignore and 0 means keep
output_pt, attn_pt = mha_pt(q, k, v,
attn_mask=~attn_mask.bool_matrix,
key_padding_mask=~key_padding_mask.bool_matrix
)
seed_cpu_cuda(seed + 1)
mha.train(training)
full_attn.train(training)
output, attn = mha(full_attn, q, k, v,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=(dropout == 0.0))
assert output.shape == ((batch_size, q_seqlen, embed_dim) if batch_first
else (q_seqlen, batch_size, embed_dim))
if attn is not None:
assert attn.shape == (batch_size, num_heads, q_seqlen,
k_seqlen + int(add_bias_kv) + int(add_zero_attn))
# We put the bias_k/v and the zero_k/v in a different position compared to Pytorch's
# implementation, so the dropout mask would be different.
if not (dropout != 0.0 and (add_bias_kv or add_zero_attn)):
assert torch.allclose(output, output_pt, rtol=1e-5, atol=1e-6, equal_nan=True)
# Our FullAttention returns the attention weight *before* dropout, while PyTorch
# returns the attention weight *after* dropout, so we don't expect them to be equal.
# Also we return the attention weights for all heads, while Pytorch takes the mean.
if dropout == 0.0 and not add_bias_kv and not add_zero_attn:
attn_mean = reduce(attn, 'b h t s -> b t s', 'mean')
assert torch.allclose(attn_mean, attn_pt, rtol=1e-5, atol=1e-6, equal_nan=True)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('batch_first', [True, False])
@pytest.mark.parametrize('add_zero_attn', [True, False])
@pytest.mark.parametrize('add_bias_kv', [True, False])
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('dropout', [0.0, 0.3])
@pytest.mark.parametrize('same_kv', [True, False])
@pytest.mark.parametrize('same_qk', [True, False])
def test_kdim_vdim(self, same_qk, same_kv, dropout, bias, add_bias_kv, add_zero_attn,
batch_first, device):
""" Because of our different interpretation of kdim and vdim compared to Pytorch,
we only test the output shape and don't compare to Pytorch's output.
"""
seed = 2357
embed_dim = 21
kdim = 33
vdim = 18
num_heads = 3
batch_size = 5
q_seqlen = 47
k_seqlen = 37 if not same_qk else q_seqlen
seed_cpu_cuda(seed)
attn_mask = FullMask(torch.randint(low=0, high=2, size=(q_seqlen, k_seqlen),
dtype=torch.bool, device=device))
key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,),
device=device), max_len=k_seqlen)
seed_cpu_cuda(seed)
mha = MultiheadAttention(embed_dim, num_heads, bias=bias,
add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn,
kdim=kdim, vdim=vdim, batch_first=batch_first).to(device)
full_attn = FullAttention(attention_dropout=dropout)
q = torch.randn(batch_size, q_seqlen, embed_dim, device=device)
k = q if same_qk else torch.randn(batch_size, k_seqlen, embed_dim, device=device)
v = k if same_kv else torch.randn(batch_size, k_seqlen, embed_dim, device=device)
if not batch_first:
q, k, v = [rearrange(x, 'b s d -> s b d').contiguous() for x in [q, k, v]]
for training in [True, False]:
mha.train(training)
full_attn.train(training)
output, attn = mha(full_attn, q, k, v,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=(dropout == 0.0))
assert output.shape == ((batch_size, q_seqlen, embed_dim) if batch_first
else (q_seqlen, batch_size, embed_dim))
if attn is not None:
assert attn.shape == (batch_size, num_heads, q_seqlen,
k_seqlen + int(add_bias_kv) + int(add_zero_attn))
| fly-master | tests/models/modules/test_multihead_attention.py |
import math
import torch
import pytest
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply_reference
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.ops.blockdiag_butterfly_einsum import (
blockdiag_butterfly_multiply_einsum_simple, blockdiag_butterfly_project_einsum_simple,
blockdiag_butterfly_multiply_einsum, blockdiag_butterfly_project_einsum,
blockdiag_butterfly_multiply_einsum_rank, blockdiag_butterfly_project_einsum_rank
)
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('log_n', [4, 10, 12])
def test_block_diag_butterfly_multiply_einsum_simple(log_n, device, dtype):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
sqrtn = 1 << (log_n // 2)
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
out1 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2)
out2 = blockdiag_butterfly_multiply_einsum_simple(x, w1_bfly, w2_bfly)
assert torch.allclose(out1, out2, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_block_diag_butterfly_multiply_einsum_simple_rectangular(device, dtype):
# set seed
torch.random.manual_seed(0)
n = 768
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(8, 96 * 2, 96, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(96 * 2, 16, 8, device=x.device, dtype=x.dtype, requires_grad=True)
out2 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2)
out3 = blockdiag_butterfly_multiply_einsum_simple(x, w1_bfly, w2_bfly)
assert torch.allclose(out2, out3, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('log_n', [2, 4, 10])
def test_block_diag_butterfly_project_einsum_sqrtn(log_n, device):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
sqrtn = 1 << (log_n // 2)
x = torch.eye(n, device=device)
w1_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
bfly = blockdiag_butterfly_multiply(x, w1_bfly, w2_bfly).t()
w1_bfly_projected, w2_bfly_projected = blockdiag_butterfly_project_einsum_simple(bfly,
nblocks1=sqrtn,
nblocks2=sqrtn)
bfly_projected = blockdiag_butterfly_multiply(x, w1_bfly_projected, w2_bfly_projected).t()
print((bfly_projected - bfly).abs().max())
assert torch.allclose(bfly_projected, bfly, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_block_diag_butterfly_project_einsum_simple(device):
# set seed
torch.random.manual_seed(0)
n = 768
x = torch.eye(n, device=device)
nblocks1, nblocks2 = 8, 96 * 2
w1_bfly = torch.randn(nblocks1, nblocks2, 96, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(nblocks2, 16, nblocks1, device=x.device, dtype=x.dtype, requires_grad=True)
bfly = blockdiag_butterfly_multiply(x, w1_bfly, w2_bfly).t()
w1_bfly_projected, w2_bfly_projected = blockdiag_butterfly_project_einsum_simple(bfly,
nblocks1=nblocks1,
nblocks2=nblocks2)
assert w1_bfly_projected.shape == w1_bfly.shape
assert w2_bfly_projected.shape == w2_bfly.shape
bfly_projected = blockdiag_butterfly_multiply(x, w1_bfly_projected, w2_bfly_projected).t()
print((bfly_projected - bfly).abs().max())
assert torch.allclose(bfly_projected, bfly, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_block_diag_butterfly_project_einsum(device):
# set seed
torch.random.manual_seed(0)
n = 768
x = torch.eye(n, device=device)
nblocks1, nblocks2 = 8, 24
b1, b2 = 8, 2
w1_bfly = torch.randn(nblocks1, nblocks2 * b1, 96, device=x.device, dtype=x.dtype,
requires_grad=True)
w2_bfly = torch.randn(nblocks2, 16, nblocks1 * b1, device=x.device, dtype=x.dtype,
requires_grad=True)
bfly = blockdiag_butterfly_multiply_einsum(x, w1_bfly, w2_bfly, b2=b2).t()
w1_bfly_projected, w2_bfly_projected = blockdiag_butterfly_project_einsum(bfly,
nblocks1=nblocks1,
nblocks2=nblocks2,
b1=b1, b2=b2)
assert w1_bfly_projected.shape == w1_bfly.shape
assert w2_bfly_projected.shape == w2_bfly.shape
bfly_projected = blockdiag_butterfly_multiply_einsum(x, w1_bfly_projected, w2_bfly_projected,
b2=b2).t()
print((bfly_projected - bfly).abs().max())
assert torch.allclose(bfly_projected, bfly, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_block_diag_butterfly_project_einsum_rank(device):
# set seed
torch.random.manual_seed(0)
n = 768
x = torch.eye(n, device=device)
nblocks1, nblocks2 = 8, 24
rank = 8
w1_bfly = torch.randn(nblocks1, nblocks2 * rank, 96, device=x.device, dtype=x.dtype,
requires_grad=True)
w2_bfly = torch.randn(nblocks2, 16, nblocks1 * rank, device=x.device, dtype=x.dtype,
requires_grad=True)
bfly = blockdiag_butterfly_multiply_einsum_rank(x, w1_bfly, w2_bfly).t()
w1_bfly_projected, w2_bfly_projected = blockdiag_butterfly_project_einsum_rank(bfly,
nblocks1=nblocks1,
nblocks2=nblocks2,
rank=rank)
assert w1_bfly_projected.shape == w1_bfly.shape
assert w2_bfly_projected.shape == w2_bfly.shape
bfly_projected = blockdiag_butterfly_multiply_einsum_rank(x, w1_bfly_projected,
w2_bfly_projected).t()
print((bfly_projected - bfly).abs().max())
assert torch.allclose(bfly_projected, bfly, rtol=1e-4, atol=1e-4)
| fly-master | tests/ops/test_blockdiag_butterfly_einsum.py |
import math
import torch
import pytest
from einops import rearrange
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.ops.blockdiag_butterfly_projection import blockdiag_butterfly_project, factors
from src.ops.blockdiag_butterfly_projection import ButterflyFFT, ButterflyFFT2
# from src.ops.permutation import bitreversal_permutation
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('log_n', [2, 4, 10, 12])
def test_block_diag_butterfly_project_sqrtn(log_n, device):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
sqrtn = 1 << (log_n // 2)
x = torch.eye(n, device=device)
w1_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
bfly = blockdiag_butterfly_multiply(x, w1_bfly, w2_bfly).t()
w1_bfly_projected, w2_bfly_projected = blockdiag_butterfly_project(bfly)
bfly_projected = blockdiag_butterfly_multiply(x, w1_bfly_projected, w2_bfly_projected).t()
print((bfly_projected - bfly).abs().max())
assert torch.allclose(bfly_projected, bfly, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('direction', ['fft', 'ifft'])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('log_n', [2, 4, 10])
def test_block_diag_butterfly_project_fft_sqrtn(log_n, device, direction):
# set seed
torch.random.manual_seed(0)
n = 1 << log_n
sqrtn = 1 << (log_n // 2)
batch_size = 3
eye = torch.eye(n, dtype=torch.complex64, device=device)
transform = torch.fft.fft if direction == 'fft' else torch.fft.ifft
dft = transform(eye, norm='ortho').t()
# perm = bitreversal_permutation(n)
# We don't actually need the bitreversal permutation, any permutation that swap
# the axes of the sqrtn x sqrtn input will work.
perm = rearrange(torch.arange(n, device=device), '(i j) -> (j i)', i=sqrtn)
# The BP (butterfly - permutation) decomposition of FFT / iFFT
# Converting to complex128 makes the approximation an order of magnitude more accurate
w1_fft_projected, w2_fft_projected = blockdiag_butterfly_project(dft[:, perm].cdouble())
w1_fft_projected, w2_fft_projected = w1_fft_projected.cfloat(), w2_fft_projected.cfloat()
fft_projected = blockdiag_butterfly_multiply(eye, w1_fft_projected, w2_fft_projected).t()
print((fft_projected - dft[:, perm]).abs().max())
assert torch.allclose(fft_projected, dft[:, perm], rtol=1e-4, atol=1e-4)
x = torch.randn(batch_size, n, dtype=torch.complex64, device=device)
out_fft = transform(x, norm='ortho')
out = blockdiag_butterfly_multiply(x[:, perm], w1_fft_projected, w2_fft_projected)
assert torch.allclose(out, out_fft, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('direction', ['fft', 'ifft'])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('norm', ['ortho', None])
@pytest.mark.parametrize('n', [15, 36, 196, 27, 48, 42, 85, 168, 512])
def test_block_diag_butterfly_project_fft_rectangular(n, norm, device, direction):
# set seed
torch.random.manual_seed(0)
batch_size = 3
eye = torch.eye(n, dtype=torch.complex64, device=device)
transform = torch.fft.fft if direction == 'fft' else torch.fft.ifft
dft = transform(eye, norm=norm).t()
sizes = factors(n)[-1]
sizes = (sizes[1], sizes[0])
perm = rearrange(torch.arange(n, device=device), '(i j) -> (j i)', j=sizes[0])
# The BP (butterfly - permutation) decomposition of FFT / iFFT
# Converting to complex128 makes the approximation an order of magnitude more accurate
w1_fft_projected, w2_fft_projected = blockdiag_butterfly_project(dft[:, perm].cdouble(),
sizes=sizes)
w1_fft_projected, w2_fft_projected = w1_fft_projected.cfloat(), w2_fft_projected.cfloat()
fft_projected = blockdiag_butterfly_multiply(eye, w1_fft_projected, w2_fft_projected).t()
print((fft_projected - dft[:, perm]).abs().max())
assert torch.allclose(fft_projected, dft[:, perm], rtol=1e-4, atol=1e-4)
x = torch.randn(batch_size, n, dtype=torch.complex64, device=device)
out_fft = transform(x, norm=norm)
out = blockdiag_butterfly_multiply(x[:, perm], w1_fft_projected, w2_fft_projected)
assert torch.allclose(out, out_fft, rtol=1e-4, atol=1e-4)
bfly_fft = ButterflyFFT(n, direction=direction, norm=norm).to(device=device)
out_module = bfly_fft(x)
assert torch.allclose(out_module, out_fft, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize('direction', ['fft', 'ifft'])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('norm', ['ortho', None])
@pytest.mark.parametrize('n2', [85, 512])
@pytest.mark.parametrize('n1', [42, 160, 161])
def test_butterflyfft2(n1, n2, norm, device, direction):
# set seed
torch.random.manual_seed(0)
batch_size = 3
x = torch.randn(batch_size, n1, n2, dtype=torch.complex64, device=device)
transform = torch.fft.fft2 if direction == 'fft' else torch.fft.ifft2
out_fft = transform(x, norm=norm)
bfly_fft = ButterflyFFT2(n1, n2, direction=direction, norm=norm).to(device=device)
out = bfly_fft(x)
assert torch.allclose(out, out_fft, rtol=1e-4, atol=1e-4)
| fly-master | tests/ops/test_blockdiag_butterfly_projection.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from src.ops.fused_softmax_dropout import _fused_softmax_dropout
@pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('seqlen', [128, 512, 1024])
def test_softmax_dropout(seqlen, dtype):
device = 'cuda'
dropout_prob = 0.37
rtol, atol = (1e-5, 1e-6) if dtype == torch.float32 else (1e-3, 3e-4)
# set seed
torch.random.manual_seed(0)
batch_size = 16
nheads = 2
x = torch.randn(batch_size, nheads, seqlen, seqlen, device=device, dtype=dtype,
requires_grad=True)
lengths = torch.randint(seqlen - 20, seqlen, (batch_size, 1), device=device)
attention_mask_bool = repeat(torch.arange(seqlen, device=device),
's -> b s', b=batch_size) < lengths
attention_mask = torch.zeros(batch_size, seqlen, device=device, dtype=dtype)
attention_mask[~attention_mask_bool] = -10000.0
attention_mask = rearrange(attention_mask, 'b s -> b 1 1 s')
torch.random.manual_seed(0)
out, dropout_mask = _fused_softmax_dropout.apply(x, dropout_prob, attention_mask,
True)
out_pt = F.softmax(x + attention_mask, dim=-1, dtype=x.dtype) * dropout_mask / (1 - dropout_prob)
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol)
g = torch.randn_like(out)
dx, = torch.autograd.grad(out, x, g)
dx_pt, = torch.autograd.grad(out_pt, x, g)
assert torch.allclose(dx, dx_pt, rtol=rtol, atol=atol)
| fly-master | tests/ops/test_fused_softmax_dropout.py |
import math
import torch
import pytest
from einops import rearrange
from src.ops.blockdiag_multiply import blockdiag_multiply_reference, blockdiag_multiply
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_blockdiag_multiply(device, dtype):
# set seed
torch.random.manual_seed(0)
n = 768
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(8, 96 * 2, 96, device=x.device, dtype=x.dtype, requires_grad=True)
out = blockdiag_multiply(x, weight)
grad = torch.randn_like(out)
dx, dweight = torch.autograd.grad(out, (x, weight), grad, retain_graph=True)
assert out.shape == (batch_size, 8 * 96 * 2)
out_ref = blockdiag_multiply_reference(x, weight)
dx_ref, dweight_bfly = torch.autograd.grad(out_ref, (x, weight), grad, retain_graph=True)
assert torch.allclose(out, out_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dx, dx_ref, rtol=1e-4, atol=1e-4)
assert torch.allclose(dweight, dweight_bfly, rtol=1e-4, atol=1e-4)
| fly-master | tests/ops/test_blockdiag_multiply.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from src.ops.triton.softmax_dropout import softmax_dropout
@pytest.mark.parametrize('dtype', [torch.float32, torch.float16])
@pytest.mark.parametrize('seqlen', [128, 512, 1024])
def test_softmax_dropout(seqlen, dtype):
device = 'cuda'
dropout_prob = 0.37
rtol, atol = (1e-5, 1e-6) if dtype == torch.float32 else (1e-3, 3e-4)
# set seed
torch.random.manual_seed(0)
batch_size = 16
nheads = 2
x = torch.randn(batch_size, nheads, seqlen, seqlen, device=device, dtype=dtype, requires_grad=True)
lengths = torch.randint(seqlen - 20, seqlen, (batch_size, 1), device=device)
attention_mask_bool = repeat(torch.arange(seqlen, device=device),
's -> b s', b=batch_size) < lengths
attention_mask = torch.zeros(batch_size, seqlen, device=device, dtype=dtype)
attention_mask[~attention_mask_bool] = -10000.0
attention_mask = rearrange(attention_mask, 'b s -> b 1 1 s')
torch.random.manual_seed(0)
out_pt = F.dropout(F.softmax(x + attention_mask, dim=-1, dtype=x.dtype), dropout_prob)
torch.random.manual_seed(0)
out = softmax_dropout(x, dropout_prob, mask=attention_mask, mask_type='bk')
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol)
g = torch.randn_like(out)
dx, = torch.autograd.grad(out, x, g.clone()) # Need to clone as the backward is in-place.
dx_pt, = torch.autograd.grad(out_pt, x, g)
assert torch.allclose(dx, dx_pt, rtol=rtol, atol=atol)
| fly-master | tests/ops/triton/test_softmax_dropout.py |
"""Convert T2T-ViT checkpoints to be compatible with our rewrite
"""
import re
import sys
import shutil
from pathlib import Path
import numpy as np
import torch
def main():
for file_name in sys.argv[1:]:
path = Path(file_name).expanduser()
if not str(path).endswith('.og'): # Back up original checkpoint
path_og = Path(str(path) + '.og')
shutil.copy2(path, path_og)
state_dict = torch.load(path, map_location='cpu')
# T2T-ViT checkpoint is nested in the key 'state_dict_ema'
if state_dict.keys() == {'state_dict_ema'}:
state_dict = state_dict['state_dict_ema']
# Replace the names of some of the submodules
def key_mapping(key):
if key == 'pos_embed':
return 'pos_embed.pe'
elif key.startswith('tokens_to_token.'):
return re.sub('^tokens_to_token.', 'patch_embed.', key)
else:
return key
state_dict = {key_mapping(k): v for k, v in state_dict.items()}
torch.save(state_dict, path)
if __name__ == '__main__':
main()
| fly-master | scripts/convert_checkpoint_t2t_vit.py |
fly-master | src/__init__.py |
|
from typing import List, Optional
from pathlib import Path
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils import utils
log = utils.get_logger(__name__)
def train(config: DictConfig) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# We want to add fields to config so need to call OmegaConf.set_struct
OmegaConf.set_struct(config, False)
# Init lightning model
model: LightningModule = hydra.utils.instantiate(config.task, cfg=config, _recursive_=False)
datamodule: LightningDataModule = model._datamodule
# Init lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config.callbacks.items():
if cb_conf is not None and "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
if config.get('resume'):
try:
checkpoint_path = Path(config.callbacks.model_checkpoint.dirpath)
if checkpoint_path.is_dir():
checkpoint_path /= 'last.ckpt'
if checkpoint_path.is_file():
config.trainer.resume_from_checkpoint = str(checkpoint_path)
else:
log.info(f'Checkpoint file {str(checkpoint_path)} not found. Will start training from scratch')
except KeyError:
pass
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(
config.trainer, callbacks=callbacks, logger=logger, _convert_="partial"
)
# Train the model
log.info("Starting training!")
trainer.fit(model=model, datamodule=datamodule)
# Evaluate model on test set, using the best model achieved during training
if config.get("test_after_training") and not config.trainer.get("fast_dev_run"):
log.info("Starting testing!")
trainer.test()
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Print path to best checkpoint
if not config.trainer.get("fast_dev_run"):
log.info(f"Best model ckpt: {trainer.checkpoint_callback.best_model_path}")
# Return metric score for hyperparameter optimization
optimized_metric = config.get("optimized_metric")
if optimized_metric:
return trainer.callback_metrics[optimized_metric]
| fly-master | src/train.py |
from typing import List, Optional
from pathlib import Path
import torch
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils import utils
log = utils.get_logger(__name__)
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
if path.is_dir():
path /= 'checkpoint_last.pt'
# dst = f'cuda:{torch.cuda.current_device()}'
log.info(f'Loading checkpoint from {str(path)}')
state_dict = torch.load(path, map_location=device)
# T2T-ViT checkpoint is nested in the key 'state_dict_ema'
if state_dict.keys() == {'state_dict_ema'}:
state_dict = state_dict['state_dict_ema']
return state_dict
def evaluate(config: DictConfig) -> None:
"""Example of inference with trained model.
It loads trained image classification model from checkpoint.
Then it loads example image and predicts its label.
"""
# load model from checkpoint
# model __init__ parameters will be loaded from ckpt automatically
# you can also pass some parameter explicitly to override it
# We want to add fields to config so need to call OmegaConf.set_struct
OmegaConf.set_struct(config, False)
# load Lightning model
checkpoint_type = config.eval.get('checkpoint_type', 'lightning')
if checkpoint_type not in ['lightning', 'pytorch']:
raise NotImplementedError(f'checkpoint_type ${checkpoint_type} not supported')
if checkpoint_type == 'lightning':
cls = hydra.utils.get_class(config.task._target_)
trained_model = cls.load_from_checkpoint(checkpoint_path=config.eval.ckpt)
else:
trained_model: LightningModule = hydra.utils.instantiate(config.task, cfg=config,
_recursive_=False)
load_return = trained_model.model.load_state_dict(load_checkpoint(config.eval.ckpt,
device=trained_model.device),
strict=False)
log.info(load_return)
# datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
datamodule: LightningDataModule = trained_model._datamodule
datamodule.prepare_data()
datamodule.setup()
# print model hyperparameters
log.info(f'Model hyperparameters: {trained_model.hparams}')
# Init Lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config["callbacks"].items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init Lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config["logger"].items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init Lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(
config.trainer, callbacks=callbacks, logger=logger, _convert_="partial"
)
# Evaluate the model
log.info("Starting evaluation!")
if config.eval.get('run_val', True):
trainer.validate(model=trained_model, datamodule=datamodule)
if config.eval.get('run_test', True):
trainer.test(model=trained_model, datamodule=datamodule)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=trained_model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
| fly-master | src/eval.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/metrics/sequence_perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
import torch
import torch.nn.functional as F
from torchmetrics import Metric
__all__ = ['Perplexity']
class Perplexity(Metric):
"""
This class computes mean perplexity across the batches of sequences.
You have to provide ``logits`` (float tensor of shape [batch_size x seq_length x vocab_size]) and
``labels`` (int tensor of shape [batch_size x seq_length] with values from the range [0, vocab_size-1])
to the :meth:`update` method. If some of the sequences are shorter than seq_length, you can also provide
an optional argument ``mask`` (bool tensor of shape [batch_size x seq_length]) which masks out tokens
not participating in perplexity computation.
See :doc:`PyTorch Lightning Metrics<pytorch-lightning:metrics>` for the metric usage instructions.
Args:
compute_on_step:
Forward only calls ``update()`` and returns ``None`` if this is set to ``False``. default: ``True``
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()`` before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: ``None`` (which selects the entire
world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When ``None``, DDP will be used
to perform the allgather.
"""
def __init__(self, compute_on_step=True, dist_sync_on_step=False, process_group=None, dist_sync_fn=None):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
# Total sum of exponentiated average negative log likelihoods
self.add_state('nll_mean', default=torch.tensor(0.0, dtype=torch.float64), dist_reduce_fx='mean')
# Total number of sequences in all batches
self.add_state('numel', default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
def update(self, logits: torch.Tensor, labels: torch.Tensor, mask=None):
# if mask is None:
# mask = torch.ones_like(labels)
# mask = mask.to(logits.dtype)
# log_probs = torch.log_softmax(logits, dim=-1)
# target_log_probs = log_probs.gather(-1, labels.unsqueeze(-1)).squeeze(-1)
# nll = -(target_log_probs * mask).sum()
# self.numel += mask.sum().long()
# self.nll_sum += nll
# TODO: ignoring mask rn
current_sum = self.nll_mean.double() * self.numel
self.numel += labels.numel()
loss = F.cross_entropy(logits, labels)
self.nll_mean = (current_sum + loss.double() * labels.numel()) / self.numel
def compute(self):
"""
Returns perplexity across all workers and resets to 0 :attr:`nll_sum` and :attr:`numel`.
"""
if self.numel.eq(0):
return None
# return (self.nll_sum / self.numel).exp()
return self.nll_mean.exp()
| fly-master | src/metrics/perplexity.py |
import torch
from torch import Tensor
from torchmetrics import Metric, Accuracy
class AccuracyMine(Accuracy):
"""Wrap torchmetrics.Accuracy to take argmax of y in case of Mixup.
"""
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
super().update(preds, target.argmax(dim=-1) if target.is_floating_point() else target)
# TD [2022-02-10] torchmetrics.Accuracy doesn't work with negative ignore_index yet
# https://github.com/PyTorchLightning/metrics/pull/362
class AccuracyIgnoreIndex(Metric):
def __init__(self, ignore_index=None, compute_on_step=True, dist_sync_on_step=False,
process_group=None, dist_sync_fn=None):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
self.ignore_index = ignore_index
# Total number of sequences in all batches
self.add_state('total', default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
# Total number of correct predictions
self.add_state('correct', default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
def update(self, preds: torch.Tensor, target: torch.Tensor):
if preds.is_floating_point():
preds = preds.argmax(dim=-1)
matched = (preds == target)
if self.ignore_index is not None:
matched = matched[target != self.ignore_index]
self.total += matched.numel()
self.correct += matched.count_nonzero()
def compute(self):
"""
Returns perplexity across all workers and resets to 0 :attr:`nll_sum` and :attr:`total`.
"""
if self.total.eq(0):
return None
return self.correct / self.total
| fly-master | src/metrics/accuracy.py |
import torch
import torch.nn as nn
from einops import rearrange
class RelativeL2(nn.Module):
def forward(self, x, y):
x = rearrange(x, 'b ... -> b (...)')
y = rearrange(y, 'b ... -> b (...)')
diff_norms = torch.linalg.norm(x - y, ord=2, dim=-1)
y_norms = torch.linalg.norm(y, ord=2, dim=-1)
return (diff_norms / y_norms).mean()
| fly-master | src/losses/relative_l2.py |
# Copied from https://github.com/HobbitLong/SupContrast/blob/master/losses.py
"""
Author: Yonglong Tian ([email protected])
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
| fly-master | src/losses/supcon.py |
from typing import Any, List
import torch
import hydra
from pytorch_lightning import LightningModule, LightningDataModule
from torchmetrics import MetricCollection
from einops import rearrange
from omegaconf import OmegaConf
from src.utils.utils import get_logger
from src.optim.param_grouping import group_parameters_for_optimizer
from src.utils.checkpoint import load_checkpoint
logger = get_logger(__name__)
class SequenceModel(LightningModule):
def __init__(self, cfg, model_cfg=None):
"""If model_cfg is passed, it will take precedence over cfg.model
"""
super().__init__()
# this line ensures params passed to LightningModule will be saved to ckpt
# it also allows to access params with 'self.hparams' attribute
self.save_hyperparameters(cfg)
self.cfg = cfg
self.model_cfg = model_cfg or self.cfg.model
self.instantiate_datamodule()
self.instantiate_model()
self.warmstart()
self.instantiate_loss()
self.instantiate_metrics()
def instantiate_datamodule(self):
logger.info(f"Instantiating datamodule <{self.cfg.datamodule._target_}>")
# Calling this self.datamodule will mess with PL since it also assigns self.datamodule
self._datamodule: LightningDataModule = hydra.utils.instantiate(self.cfg.datamodule)
self._datamodule.prepare_data()
self._datamodule.setup()
def instantiate_model(self):
if hasattr(self._datamodule, 'num_classes'):
self.model_cfg.num_classes = self._datamodule.num_classes
if (hasattr(self._datamodule, 'vocab_size')
and self.model_cfg.get('embedding_cfg', None) is not None):
self.model_cfg.embedding_cfg.num_embeddings = self._datamodule.vocab_size
logger.info(f"Instantiating model <{self.model_cfg._target_}>")
recursive = getattr(self.model_cfg, '_recursive_', False)
self.model = hydra.utils.instantiate(self.model_cfg, _recursive_=recursive)
def instantiate_loss(self):
loss_fn_cfg = self.cfg.train.get('loss_fn', {'_target_': 'torch.nn.CrossEntropyLoss'})
self.loss_fn = hydra.utils.instantiate(loss_fn_cfg)
loss_fn_val_cfg = self.cfg.train.get('loss_fn_val', loss_fn_cfg)
self.loss_fn_val = hydra.utils.instantiate(loss_fn_val_cfg)
def instantiate_metrics(self):
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
if 'eval' in self.cfg and 'metrics' in self.cfg.eval:
metrics_cfg = self.cfg.eval.metrics
else:
metrics_cfg = {'acc': {'_target_': 'torchmetrics.Accuracy'}}
metrics = MetricCollection({name: hydra.utils.instantiate(cfg)
for name, cfg in metrics_cfg.items()})
self.train_metrics = metrics.clone(prefix='train/')
self.val_metrics = metrics.clone(prefix='val/')
self.test_metrics = metrics.clone(prefix='test/')
def warmstart(self):
if self.cfg.train.get('warmstart', None) is not None:
logger.info(f"Warm-starting with weights from {self.cfg.train.warmstart.path}")
strict = self.cfg.train.warmstart.get('strict', True)
state_dict = load_checkpoint(self.cfg.train.warmstart.path)
if self.cfg.train.warmstart.get('post_process', None) is not None:
state_dict = hydra.utils.instantiate(self.cfg.train.warmstart.post_process,
state_dict)
load_return = self.model.load_state_dict(state_dict, strict=False)
logger.info(load_return)
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def step(self, batch: Any, is_train=True):
try:
x, y, lengths = batch
except ValueError:
x, y = batch
lengths = None
output = self.forward(x) if lengths is None else self.forward(x, lengths=lengths)
loss = self.loss_fn(output, y) if is_train else self.loss_fn_val(output, y)
return loss, output, y
def shared_step(self, batch: Any, batch_idx: int, phase='train'):
loss, output, targets = self.step(batch, is_train=(phase == 'train'))
with torch.no_grad():
metrics = getattr(self, f'{phase}_metrics')(output, targets)
self.log(f"{phase}/loss", loss, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True)
self.log_dict(metrics, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
return {"loss": loss, "output": output, "targets": targets}
def training_step(self, batch: Any, batch_idx: int):
return self.shared_step(batch, batch_idx, phase='train')
def validation_step(self, batch: Any, batch_idx: int):
return self.shared_step(batch, batch_idx, phase='val')
def test_step(self, batch: Any, batch_idx: int):
return self.shared_step(batch, batch_idx, phase='test')
def configure_optimizers(self):
if 'optimizer_param_grouping' in self.cfg.train: # Set zero weight decay for some params
parameters = group_parameters_for_optimizer(self.model, self.cfg.train.optimizer,
**self.cfg.train.optimizer_param_grouping)
else:
# parameters = self.model.parameters()
parameters = self.parameters() # [21-09-08] AG: this will train task specific parameters such as Retrieval head for AAN
optimizer = hydra.utils.instantiate(self.cfg.train.optimizer, parameters)
# Log optimizer info
for i, g in enumerate(optimizer.param_groups):
ntensors = len(g['params'])
nparams = sum(p.numel() for p in g['params'])
hparams = {k: v for k, v in g.items() if k != 'params'}
logger.info(f'Optimizer group {i}: {ntensors} tensors, {nparams} parameters, {hparams}')
if 'scheduler' not in self.cfg.train:
return optimizer
else:
# lr_scheduler should be called either every step (default) or every epoch
lr_scheduler = hydra.utils.instantiate(self.cfg.train.scheduler, optimizer)
return [optimizer], {'scheduler': lr_scheduler,
'interval': self.cfg.train.get('scheduler_interval', 'step'),
'monitor': self.cfg.train.get('scheduler_monitor', 'val/loss')}
class SequenceDualModel(SequenceModel):
def step(self, batch: Any, is_train=True):
x1, x2, y, lengths1, lengths2 = batch
output = self.forward(x1, x2, lengths1=lengths1, lengths2=lengths2)
loss = self.loss_fn(output, y) if is_train else self.loss_fn_val(output, y)
output = torch.argmax(output, dim=1)
return loss, output, y
class SequenceLMModel(SequenceModel):
def instantiate_model(self):
if (hasattr(self._datamodule, 'vocab_size')
and self.model_cfg.get('embedding_cfg', None) is not None):
self.model_cfg.embedding_cfg.num_embeddings = self._datamodule.vocab_size
logger.info(f"Instantiating model <{self.model_cfg._target_}>")
# Huggingface models need the config object to be instantiated first
config = hydra.utils.instantiate(self.model_cfg.pop('config'), _recursive_=False)
self.model = hydra.utils.instantiate(self.model_cfg, config, _recursive_=False)
def step(self, batch: Any, is_train=True):
x, y = batch
output = self.forward(x).logits
output = rearrange(output, '... C -> (...) C')
y = rearrange(y, '... -> (...)')
loss = self.loss_fn(output, y) if is_train else self.loss_fn_val(output, y)
return loss, output, y
| fly-master | src/tasks/seq.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
from typing import Any, List, Sequence
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from src.utils.flops import has_deepspeed_profiling, has_fvcore_profiling
from src.utils.flops import profile_deepspeed, profile_fvcore
class FlopCount(Callback):
"""Counter the number of FLOPs used by the model
"""
def __init__(self, profilers: List[str] = ['fvcore', 'deepspeed'],
input_size: tuple = (3, 224, 224), device=None):
if not isinstance(profilers, Sequence):
profilers = [profilers]
if any(p not in ['fvcore', 'deepspeed'] for p in profilers):
raise NotImplementedError('Only support fvcore and deepspeed profilers')
if 'fvcore' in profilers and not has_fvcore_profiling:
raise ImportError('fvcore is not installed. Install it by running `pip install fvcore`')
elif 'deepspeed' in profilers and not has_deepspeed_profiling:
raise ImportError('deepspeed is not installed')
super().__init__()
self.profilers = profilers
self.input_size = tuple(input_size)
self.device = device
@rank_zero_only
def on_fit_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
if 'fvcore' in self.profilers:
_, macs, _, acts = profile_fvcore(pl_module.to(self.device), input_size=self.input_size,
detailed=True)
trainer.logger.log_hyperparams({'GMACs': macs * 1e-9, 'MActs': acts * 1e-6})
if 'deepspeed' in self.profilers:
macs, _= profile_deepspeed(pl_module.to(self.device), input_size=self.input_size,
detailed=True)
if 'fvcore' not in self.profilers: # fvcore's MACs seem more accurate
trainer.logger.log_hyperparams({'GMACs': macs * 1e-9})
| fly-master | src/callbacks/flop_count.py |
import subprocess
from pathlib import Path
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if trainer.fast_dev_run:
raise Exception(
"Cannot use wandb callbacks since pytorch lightning disables loggers in `fast_dev_run=true` mode."
)
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeAsArtifact(Callback):
"""Upload all code files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str, use_git: bool = True):
"""
Args:
code_dir: the code directory
use_git: if using git, then upload all files that are not ignored by git.
if not using git, then upload all '*.py' file
"""
self.code_dir = code_dir
self.use_git = use_git
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
if self.use_git:
# get .git folder
# https://alexwlchan.net/2020/11/a-python-function-to-ignore-a-path-with-git-info-exclude/
git_dir_path = Path(
subprocess.check_output(["git", "rev-parse", "--git-dir"]).strip().decode("utf8")
).resolve()
for path in Path(self.code_dir).resolve().rglob("*"):
if (
path.is_file()
# ignore files in .git
and not str(path).startswith(str(git_dir_path)) # noqa: W503
# ignore files ignored by git
and ( # noqa: W503
subprocess.run(["git", "check-ignore", "-q", str(path)]).returncode == 1
)
):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
else:
for path in Path(self.code_dir).resolve().rglob("*.py"):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_keyboard_interrupt(self, trainer, pl_module):
self.on_train_end(trainer, pl_module)
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in Path(self.ckpt_dir).rglob("*.ckpt"):
ckpts.add_file(str(path))
experiment.log_artifact(ckpts)
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(targets, preds, average=None)
r = recall_score(targets, preds, average=None)
p = precision_score(targets, preds, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, dim=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
| fly-master | src/callbacks/wandb_callbacks.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def set_affinity(trainer):
try:
from src.utils.gpu_affinity import set_affinity
nproc_per_node = torch.cuda.device_count()
affinity = set_affinity(trainer.local_rank, nproc_per_node, 'socket_unique_continuous')
log.info(f'{trainer.local_rank}: thread affinity: {affinity}')
# TD [2022-05-07] Somehow calling this causes GPU 0 to allocate extra ~800MB of memory per
# number of GPUs (e.g., 6.4GB of extra memory in a 8-GPU setup). H/t Dan.
# l2_promote()
except:
pass
class GpuAffinity(Callback):
"""Set GPU affinity and increase the L2 fetch granularity.
Adapted from https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL
"""
# Setting on init so that the affinity is correct at the start, but after that it seems to change.
# Setting on setup so that the affinity remains correct.
def setup(self, trainer: Trainer, pl_module: LightningModule, stage=None) -> None:
set_affinity(trainer)
def on_init_start(self, trainer: Trainer) -> None:
set_affinity(trainer)
| fly-master | src/callbacks/gpu_affinity.py |
# Inspired by https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/utilities/grads.py
# However, they compute grad at every iteration (I think), and the .item() calls incur a lot of overhead
# (6-7% slow down on GPT-2 small). Instead we only compute for iterations where we need to log, and don't
# call .item() explicitly.
from typing import Any
from collections import OrderedDict
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
import torch
import torch.nn as nn
try:
from apex.contrib.layer_norm import FastLayerNorm
except ImportError:
FastLayerNorm = None
class NormMonitor(Callback):
"""Monitor the scales of weights and gradients.
"""
def __init__(self, layer_norm_only: bool = False):
super().__init__()
self.layer_norm_only = layer_norm_only
# Use on_before_optimizer_step instead of on_train_batch_start since there might be
# gradient accumulation and we only care about scale when it could change (i.e., optimizer.step).
@rank_zero_only
def on_before_optimizer_step(self, trainer: Trainer, pl_module, *args: Any, **kwargs: Any) -> None:
if not trainer._logger_connector.should_update_logs:
return
model = pl_module.model
named_parameters = {}
if self.layer_norm_only:
ln_modules = (nn.LayerNorm, nn.Embedding)
if FastLayerNorm is not None:
ln_modules += (FastLayerNorm,)
for mn, m in model.named_modules():
if isinstance(m, ln_modules):
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
named_parameters[fpn] = p
else:
named_parameters = dict(model.named_parameters())
stats = {}
param_l1_norm, grad_l1_norm = [], []
for param_name, param in named_parameters.items():
param_abs = param.abs()
param_abs_mean = param_abs.mean()
stats[f'stats/{param_name}_max'] = param_abs.max()
stats[f'stats/{param_name}_mean'] = param_abs_mean
param_l1_norm.append(param_abs_mean * param.numel())
if param.grad is not None:
# Gradient is already unscaled by the AMP loss scaler at this point
# https://github.com/Lightning-AI/lightning/pull/9606
param_grad_abs = param.grad.abs()
param_grad_abs_mean = param_grad_abs.mean()
stats[f'stats/{param_name}_grad_max'] = param_grad_abs.max()
stats[f'stats/{param_name}_grad_mean'] = param_grad_abs_mean
grad_l1_norm.append(param_grad_abs_mean * param.grad.numel())
stats['total_param_l1_norm'] = torch.stack(param_l1_norm).sum()
if grad_l1_norm:
stats['total_grad_l1_norm'] = torch.stack(grad_l1_norm).sum()
# Sort by params name
stats = OrderedDict(sorted(stats.items()))
if trainer.loggers is not None:
for logger in trainer.loggers:
logger.log_metrics(stats, step=trainer.fit_loop.epoch_loop._batches_that_stepped)
| fly-master | src/callbacks/norm_monitor.py |
# Inspired by https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/callbacks/stochastic_weight_avg.py
# https://github.com/PyTorchLightning/Lightning-Bolts/blob/master/pl_bolts/callbacks/byol_updates.py
# https://forums.pytorchlightning.ai/t/adopting-exponential-moving-average-ema-for-pl-pipeline/488/2
# https://github.com/PyTorchLightning/pytorch-lightning/issues/8100
from typing import Dict, Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
from src.utils.ema import ExponentialMovingAverage
class EMACallback(Callback):
"""TD [2021-08-31]: saving and loading from checkpoint should work.
"""
def __init__(self, decay: float, use_num_updates: bool = True):
"""
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
super().__init__()
self.decay = decay
self.use_num_updates = use_num_updates
self.ema = None
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule"):
# It's possible that we already loaded EMA from the checkpoint
if self.ema is None:
self.ema = ExponentialMovingAverage([p for p in pl_module.parameters() if p.requires_grad],
decay=self.decay, use_num_updates=self.use_num_updates)
# Ideally we want on_after_optimizer_step but pytorch-lightning doesn't have it
# We only want to update when parameters are changing.
# Because of gradient accumulation, this doesn't happen every training step.
# https://github.com/PyTorchLightning/pytorch-lightning/issues/11688
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
if (batch_idx + 1) % trainer.accumulate_grad_batches == 0:
self.ema.update()
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
# During the initial validation we don't have self.ema yet
if self.ema is not None:
self.ema.store()
self.ema.copy_to()
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema is not None:
self.ema.restore()
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema is not None:
self.ema.store()
self.ema.copy_to()
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema is not None:
self.ema.restore()
def on_save_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
) -> Dict[str, Any]:
return self.ema.state_dict()
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule",
callback_state: Dict[str, Any]
) -> None:
if self.ema is None:
self.ema = ExponentialMovingAverage([p for p in pl_module.parameters() if p.requires_grad],
decay=self.decay, use_num_updates=self.use_num_updates)
self.ema.load_state_dict(callback_state)
| fly-master | src/callbacks/ema.py |
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
class SpeedMonitor(Callback):
"""Monitor the speed of each step and each epoch.
"""
def __init__(self, intra_step_time: bool = True, inter_step_time: bool = True,
epoch_time: bool = True, verbose=False):
super().__init__()
self._log_stats = AttributeDict(
{
'intra_step_time': intra_step_time,
'inter_step_time': inter_step_time,
'epoch_time': epoch_time,
}
)
self.verbose = verbose
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_intra_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_inter_step_time = None
def on_test_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_inter_step_time = None
@rank_zero_only
def on_train_batch_start(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
batch: Any,
batch_idx: int,
dataloader_idx: int
) -> None:
if self._log_stats.intra_step_time:
self._snap_intra_step_time = time.time()
if not trainer._logger_connector.should_update_logs:
return
logs = {}
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs["time/inter_step (ms)"] = (time.time() - self._snap_inter_step_time) * 1000
if trainer.logger is not None:
trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if self.verbose and self._log_stats.intra_step_time and self._snap_intra_step_time:
pl_module.print(f"time/intra_step (ms): {(time.time() - self._snap_intra_step_time) * 1000}")
if not trainer._logger_connector.should_update_logs:
return
logs = {}
if self._log_stats.intra_step_time and self._snap_intra_step_time:
logs["time/intra_step (ms)"] = (time.time() - self._snap_intra_step_time) * 1000
if trainer.logger is not None:
trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule",) -> None:
logs = {}
if self._log_stats.epoch_time and self._snap_epoch_time:
logs["time/epoch (s)"] = time.time() - self._snap_epoch_time
if trainer.logger is not None:
trainer.logger.log_metrics(logs, step=trainer.global_step)
| fly-master | src/callbacks/speed_monitor.py |
fly-master | src/callbacks/__init__.py |
|
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/fault_tolerance.py
from typing import Any
from pathlib import Path
import pytorch_lightning as pl
class ModelCheckpointMine(pl.callbacks.model_checkpoint.ModelCheckpoint):
def __init__(self, *args, fault_tolerant=False, **kwargs):
super().__init__(*args, **kwargs)
self.fault_tolerant = fault_tolerant
def on_exception(self, trainer: "pl.Trainer", *_: Any, **__: Any) -> None:
if self.fault_tolerant:
# overwrite if necessary
trainer.save_checkpoint(str(Path(self.dirpath) / '.pl_auto_save.ckpt'))
def teardown(self, trainer: "pl.Trainer", *_: Any, **__: Any) -> None:
if self.fault_tolerant:
trainer.strategy.remove_checkpoint(str(Path(self.dirpath) / '.pl_auto_save.ckpt'))
# TD [2022-07-17] I was trying to make resuming from standard checkpoint fault-tolerant.
# However, when it resumes it's off by 1 iteration. My attempt to fix it in seq.py (below) didn't work.
# So I decided to just copy _FaultToleranceCheckpoint and just save on_exception.
# def on_save_checkpoint(self, checkpoint):
# # TD [2022-07-12] The "completed" counter is off by 1 so when it resumes
# # it's off by 1 iteration. However, the data is still off by 1 iteration, probably
# # because the dataloader_state_dict['counter'] is off by @batch_size, and idk how
# # to fix it cleanly.
# checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['total']['completed'] += 1
# checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['current']['completed'] += 1
# checkpoint['loops']['fit_loop']['epoch_loop.state_dict']['_batches_that_stepped'] += 1
# checkpoint['loops']['fit_loop']['epoch_loop.state_dict']['dataloader_state_dict'][0]['state'][0]['num_batches_fetched'] += 1
| fly-master | src/callbacks/model_checkpoint.py |
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(Callback):
"""Log the number of parameters of the model
"""
def __init__(self, total_params_log: bool = True, trainable_params_log: bool = True,
non_trainable_params_log: bool = True):
super().__init__()
self._log_stats = AttributeDict(
{
'total_params_log': total_params_log,
'trainable_params_log': trainable_params_log,
'non_trainable_params_log': non_trainable_params_log,
}
)
@rank_zero_only
def on_fit_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
logs = {}
if self._log_stats.total_params_log:
logs["model/params_total"] = sum(p.numel() for p in pl_module.parameters())
if self._log_stats.trainable_params_log:
logs["model/params_trainable"] = sum(p.numel() for p in pl_module.parameters()
if p.requires_grad)
if self._log_stats.non_trainable_params_log:
logs["model/params_not_trainable"] = sum(p.numel() for p in pl_module.parameters()
if not p.requires_grad)
if trainer.logger is not None:
trainer.logger.log_hyperparams(logs)
| fly-master | src/callbacks/params_log.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/lr_monitor.py.
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
class LossScaleMonitor(Callback):
"""Monitor the loss scale for AMP (fp16).
"""
# Use on_before_optimizer_step instead of on_train_batch_start since there might be
# gradient accumulation and we only care about the loss scale when it could change (i.e.,
# optimizer.step).
@rank_zero_only
def on_before_optimizer_step(self, trainer: Trainer, *args: Any, **kwargs: Any) -> None:
if not trainer._logger_connector.should_update_logs:
return
if hasattr(trainer, 'precision_plugin') and hasattr(trainer.precision_plugin, 'scaler'):
scaler = trainer.precision_plugin.scaler
if scaler is not None:
stats = {
'scaler/scale': scaler.get_scale(),
'scaler/growth_tracker': scaler._get_growth_tracker(),
}
if trainer.loggers is not None:
for logger in trainer.loggers:
logger.log_metrics(stats, step=trainer.fit_loop.epoch_loop._batches_that_stepped)
| fly-master | src/callbacks/loss_scale_monitor.py |
from typing import Any, List, Dict, Tuple, Union, Optional, Callable, cast
from pathlib import Path, PurePath
from PIL import Image
from fs.tarfs import TarFS
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, random_split, get_worker_info
from einops.layers.torch import Rearrange, Reduce
# [2021-08-19] TD: Somehow I get segfault if I import pytorch_lightning *after* torchvision
from pytorch_lightning import LightningDataModule
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.datasets.folder import has_file_allowed_extension
# There's an empty file in the dataset
PATHFINDER_BLACKLIST = {'pathfinder32/curv_baseline/imgs/0/sample_172.png'}
def pil_loader_grayscale(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
return Image.open(f).convert('L')
class PathFinderDataset(ImageFolder):
"""Path Finder dataset."""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
super().__init__(root, loader=pil_loader_grayscale, transform=transform,
target_transform=target_transform, is_valid_file=is_valid_file)
def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]:
"""Override this so it doesn't call the parent's method
"""
return [], {}
@staticmethod
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
Args:
directory (str): root dataset directory, corresponding to ``self.root``.
class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
extensions (optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``class_to_idx`` is empty.
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
FileNotFoundError: In case no valid file was found for any class.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
# We ignore class_to_idx
directory = Path(directory).expanduser()
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
path_list = sorted(list((directory / 'metadata').glob('*.npy')), key=lambda path: int(path.stem))
if not path_list:
raise FileNotFoundError(f'No metadata found at {str(directory)}')
# Get the 'pathfinder32/curv_baseline part of data_dir
data_dir_stem = Path().joinpath(*directory.parts[-2:])
instances = []
for metadata_file in path_list:
with open(metadata_file, 'r') as f:
for metadata in f.read().splitlines():
metadata = metadata.split()
image_path = Path(metadata[0]) / metadata[1]
if (is_valid_file(str(image_path))
and str(data_dir_stem / image_path) not in PATHFINDER_BLACKLIST):
label = int(metadata[3])
instances.append((str(directory / image_path), label))
return instances
class PathFinderTarDataset(PathFinderDataset):
"""Path Finder dataset."""
def __init__(
self,
archive: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
root_in_archive: str = '',
) -> None:
self.root_in_archive = PurePath(root_in_archive)
# open tar file. in a multiprocessing setting (e.g. DataLoader workers), we
# have to open one file handle per worker (stored as the tar_obj dict), since
# when the multiprocessing method is 'fork', the workers share this TarDataset.
# we want one file handle per worker because TarFile is not thread-safe.
# As done in https://github.com/jotaf98/simple-tar-dataset/blob/master/tardataset.py
worker = get_worker_info()
worker = worker.id if worker else None
self.tar_fs = {worker: TarFS(str(Path(archive).expanduser()))}
ImageFolder.__init__(self, archive, loader=None, transform=transform,
target_transform=target_transform, is_valid_file=is_valid_file)
def make_dataset(
self,
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
Args:
directory (str): archive dataset directory, corresponding to ``self.archive``.
class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
extensions (optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``class_to_idx`` is empty.
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
FileNotFoundError: In case no valid file was found for any class.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
# We ignore directory and class_to_idx
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
metadata_fs = self.get_tar_fs().opendir(str(self.root_in_archive / 'metadata'))
path_list = sorted(list(metadata_fs.filterdir('/', files=['*.npy'])),
key=lambda path_info: int(path_info.stem))
if not path_list:
raise FileNotFoundError(f'No metadata found in {str(self.root)}')
# Get the 'pathfinder32/curv_baseline part of data_dir
data_dir_stem = PurePath().joinpath(*self.root_in_archive.parts[-2:])
instances = []
for metadata_file in path_list:
for metadata in metadata_fs.readtext(metadata_file.name).splitlines():
metadata = metadata.split()
image_path = PurePath(metadata[0]) / metadata[1]
if (is_valid_file(str(image_path))
and str(data_dir_stem / image_path) not in PATHFINDER_BLACKLIST):
label = int(metadata[3])
instances.append((str(self.root_in_archive / image_path), label))
return instances
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
with self.get_tar_fs().openbin(path) as f:
sample = Image.open(f).convert('L') # Open in grayscale
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def get_tar_fs(self):
worker = get_worker_info()
worker = worker.id if worker else None
if worker not in self.tar_fs:
self.tar_fs[worker] = TarFS(str(Path(self.root).expanduser()))
return self.tar_fs[worker]
def __del__(self):
"""Close the TarFile file handles on exit."""
for o in self.tar_fs.values():
o.close()
def __getstate__(self):
"""Serialize without the TarFile references, for multiprocessing compatibility."""
state = dict(self.__dict__)
state['tar_fs'] = {}
return state
class PathFinder(LightningDataModule):
num_classes = 2
def __init__(self, data_dir, resolution, level, sequential=False, to_int=False, pool=1, val_split=0.1,
test_split=0.1, batch_size=32, num_workers=1, seed=42, shuffle=False,
pin_memory=False, drop_last=False, **kwargs):
"""If data_dir points to a tar file (e.g., pathfinder/pathfinder.tar), we support reading
directly from that tar file without extraction.
That tar file should have the same structure as the pathfinder dir: e.g., it should contain
pathfinder32/curv_contour_length_14 in the archive.
"""
super().__init__(**kwargs)
assert resolution in [32, 64, 128, 256]
self.resolution = resolution
assert level in ['easy', 'intermediate', 'hard']
self.level = level
level_dir = {'easy': 'curv_baseline', 'intermediate': 'curv_contour_length_9',
'hard': 'curv_contour_length_14'}[level]
self.prefix_dir = Path(f'pathfinder{resolution}') / level_dir
self.data_dir = Path(data_dir).expanduser()
self.use_tar_dataset = self.data_dir.suffix == '.tar'
self.sequential = sequential
self.to_int = to_int
self.pool = pool
self.val_split = val_split
self.test_split = test_split
self.batch_size = batch_size
self.num_workers = num_workers
self.seed = seed
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if not sequential:
self.dims = (1, resolution, resolution)
else:
self.dims = (resolution * resolution, 1) if not to_int else (resolution * resolution,)
if to_int:
self.vocab_size = 256
def default_transforms(self):
transform_list = [transforms.ToTensor()]
if self.pool > 1:
transform_list.append(Reduce('1 (h h2) (w w2) -> 1 h w', 'mean', h2=self.pool, w2=self.pool))
if self.to_int:
transform_list.append(transforms.Lambda(lambda x: (x * 255).long()))
if self.sequential:
# If to_int, it makes more sense to get rid of the channel dimension
transform_list.append(Rearrange('1 h w -> (h w)') if self.to_int
else Rearrange('1 h w -> (h w) 1'))
return transforms.Compose(transform_list)
def prepare_data(self):
if self.use_tar_dataset:
if not self.data_dir.is_file():
raise FileNotFoundError(f"""
Tar file {str(self.data_dir)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then compress the pathfinderX (X=32, 64, 128, 256) directory into a tar file:
tar -cvf pathfinder32.tar pathfinder32
Then point data_dir to the pathfinder32.tar file.
""")
else:
if not (self.data_dir / self.prefix_dir).is_dir():
raise FileNotFoundError(f"""
Directory {str(self.data_dir / self.prefix_dir)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then point data_dir to the directory that contains pathfinderX, where X is the
resolution (either 32, 64, 128, or 256).
""")
def setup(self, stage=None):
if stage == 'test' and hasattr(self, 'dataset_test'):
return
# [2021-08-18] TD: I ran into RuntimeError: Too many open files.
# https://github.com/pytorch/pytorch/issues/11201
torch.multiprocessing.set_sharing_strategy('file_system')
if self.use_tar_dataset:
dataset = PathFinderTarDataset(str(self.data_dir), root_in_archive=str(self.prefix_dir),
transform=self.default_transforms())
else:
dataset = PathFinderDataset(self.data_dir / self.prefix_dir,
transform=self.default_transforms())
len_dataset = len(dataset)
val_len = int(self.val_split * len_dataset)
test_len = int(self.test_split * len_dataset)
train_len = len_dataset - val_len - test_len
self.dataset_train, self.dataset_val, self.dataset_test = random_split(
dataset, [train_len, val_len, test_len],
generator=torch.Generator().manual_seed(self.seed)
)
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
return self._data_loader(self.dataset_train, shuffle=self.shuffle)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test)
def _data_loader(self, dataset: Dataset, shuffle: bool = False) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
| fly-master | src/datamodules/pathfinder.py |
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
from itertools import chain
from pathlib import Path
import pickle
from typing import Any, List, Union
from multiprocessing.shared_memory import SharedMemory
import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader, Dataset
from transformers import AutoTokenizer
from datasets import load_dataset
from pytorch_lightning import LightningDataModule
from src.datamodules.datasets.lm_dataset import LMDataset
from src.datamodules.fault_tolerant_sampler import RandomFaultTolerantSampler
from src.datamodules.fault_tolerant_sampler import FaultTolerantDistributedSampler
from src.datamodules.datasets.detokenizer import DATASET_TOKENIZATION_REGISTRY
from src.utils.utils import get_logger
logger = get_logger()
# https://github.com/numpy/numpy/issues/18294
class SHMArray(np.ndarray): #copied from https://numpy.org/doc/stable/user/basics.subclassing.html#slightly-more-realistic-example-attribute-added-to-existing-array
def __new__(cls, input_array, shm=None):
obj = np.asarray(input_array).view(cls)
obj.shm = shm
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.shm = getattr(obj, 'shm', None)
class LMDataModule(LightningDataModule):
def __init__(self, dataset_name, tokenizer_name, dataset_config_name=None, max_length=1024,
cache_dir=None, val_ratio=0.0005, val_split_seed=2357, add_eos=True,
detokenize=False, batch_size=32, batch_size_eval=None, num_workers=1, shuffle=False,
pin_memory=False, drop_last=False, fault_tolerant=False):
super().__init__()
self.dataset_name = dataset_name
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
def prepare_data(self):
if self.cache_dir is None: # Just download the dataset
load_dataset(self.dataset_name, self.dataset_config_name)
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
if stage == 'test' and hasattr(self, 'dataset_test'):
return
concat_ids, self.tokenizer = self.process_dataset()
self.vocab_size = len(self.tokenizer)
# Create all splits
self.dataset_train, self.dataset_val, self.dataset_test = [
LMDataset(concat_ids[split], seq_len=self.max_length)
for split in ['train', 'validation', 'test']
]
def process_dataset(self):
cache_dir = None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
raw_datasets = load_dataset(self.dataset_name, self.dataset_config_name)
# https://github.com/stanford-crfm/mistral/blob/main/src/corpora/auto.py
if 'validation' not in raw_datasets:
assert "train" in raw_datasets, "You must have train in raw_datasets to make a validation raw_datasets"
raw_datasets = raw_datasets["train"].train_test_split(
test_size=self.val_ratio, seed=self.val_split_seed,
shuffle=True # Otherwise test will be at the end of the dataset
)
raw_datasets['validation'] = raw_datasets['test']
# [2021-12-25] TD: Running the detokenizer on wikitext-103 makes ppl worse
# (GPT2-small val ppl after 10 epochs ~22 -> ~25)
# However, it's useful for zero-shot transfer from Openwebtext,
# as after detokenization it's closer to Openwebtext's format.
# https://github.com/stanford-crfm/mistral/issues/12
if self.detokenize:
if self.dataset_name in DATASET_TOKENIZATION_REGISTRY:
detokenizer = DATASET_TOKENIZATION_REGISTRY[self.dataset_name]
raw_datasets = raw_datasets.map(
lambda example: {'text': detokenizer(example['text'])},
num_proc=max(self.num_workers, 1),
desc='Running detokenizer on dataset'
)
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name, use_fast=True)
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# [2021-12-25] TD: For wikitext, don't need to add the EOS since each example already ends
# with '\n', and there are no other '\n' in the examples.
# assert all([t.count('\n') == 1 for t in raw_datasets['train']['text'] if t])
# Add EOS token to the end of the text if the text is not empty
# https://github.com/stanford-crfm/mistral/issues/91
# https://github.com/stanford-crfm/mistral/pull/98
if self.add_eos:
add_eos = lambda seq: (seq + tokenizer.eos_token) if seq else seq
add_eos_batched = lambda seqs: [add_eos(seq) for seq in seqs]
tokenize = lambda example: tokenizer(add_eos_batched(example[text_column_name]))
else:
tokenize = lambda example: tokenizer(example[text_column_name])
# tokenized_datasets = raw_datasets.map(
# tokenize,
# batched=True,
# num_proc=max(self.num_workers, 1),
# remove_columns=column_names,
# desc="Running tokenizer on dataset",
# )
dtype = np.uint16 if tokenizer.vocab_size < 64 * 1024 else np.int32
def tokenize_concat(examples):
# We just need 'input_ids', not 'attention_mask' (since it's all 1)
input_ids = np.fromiter(chain(*tokenize(examples)['input_ids']), dtype=dtype)
# Need to return a list since we're doing batched processing
return {'input_ids': [input_ids], 'len': [len(input_ids)]}
tokenized_datasets = raw_datasets.map(
tokenize_concat,
batched=True,
num_proc=max(self.num_workers, 1),
remove_columns=column_names,
desc="Running tokenizer on dataset",
)
# Concatenate all input_ids into an array in shared memory
def write_ids_to_shm(example, shm_name, array_len):
shm = SharedMemory(name=shm_name)
shm_arr = np.ndarray((array_len,), dtype=dtype, buffer=shm.buf)
start_idx = example['len_offset'] - len(example['input_ids'])
shm_arr[start_idx:example['len_offset']] = example['input_ids']
shm.close()
concat_ids = {}
for name, ds in tokenized_datasets.items():
tokenized_datasets[name] = ds.add_column('len_offset', np.cumsum(ds['len']))
array_len = tokenized_datasets[name][-1]['len_offset']
shm = SharedMemory(create=True, size=array_len * np.dtype(dtype).itemsize)
shm_name = shm.name
tokenized_datasets[name].map(
write_ids_to_shm,
fn_kwargs={'shm_name': shm_name, 'array_len': array_len},
batched=False,
num_proc=max(self.num_workers, 1),
desc="Concatenating examples",
)
shm_arr = np.ndarray((array_len,), dtype=dtype, buffer=shm.buf)
# We need to keep a reference to the shared memory, otherwise it gets garbage-collected
# when it goes out of scope, and that memory is gone.
# https://github.com/numpy/numpy/issues/18294
concat_ids[name] = SHMArray(shm_arr, shm=shm)
if cache_dir is not None:
self._save_to_cache(concat_ids, tokenizer, cache_dir)
return concat_ids, tokenizer
def _save_to_cache(self, concat_ids, tokenizer, cache_dir):
cache_dir.mkdir(parents=True, exist_ok=True)
logger.info(f'Saving to cache at {str(cache_dir)}')
for k, v in concat_ids.items():
np.save(cache_dir / f'{k}.npy', v)
with open(cache_dir / 'tokenizer.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger.info(f'Load from cache at {str(cache_dir)}')
concat_ids = {split: np.load(cache_dir / f'{split}.npy', mmap_mode='r')
for split in ['train', 'validation', 'test']}
with open(cache_dir / 'tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
return concat_ids, tokenizer
@property
def _cache_dir_name(self):
return f'tokenizer_name-{self.tokenizer_name}-val_ratio-{self.val_ratio}-val_split_seed-{self.val_split_seed}-add_eos-{self.add_eos}-detokenize-{self.detokenize}'
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
if self.shuffle and self.fault_tolerant:
# sampler = RandomFaultTolerantSampler(self.dataset_train)
sampler = FaultTolerantDistributedSampler(self.dataset_train)
shuffle = False
else:
sampler = None
shuffle = self.shuffle
return self._data_loader(self.dataset_train, batch_size=self.batch_size,
shuffle=shuffle, sampler=sampler)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test, batch_size=self.batch_size_eval)
def _data_loader(self, dataset: Dataset, batch_size: int, shuffle: bool = False,
sampler=None) -> DataLoader:
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=1, # Data is already in memory, we don't need many workers
shuffle=shuffle,
sampler=sampler,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
persistent_workers=True
)
| fly-master | src/datamodules/language_modeling_hf.py |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pickle
import logging
from typing import Any, List, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchtext
from datasets import load_dataset, DatasetDict, Value
from pytorch_lightning import LightningDataModule
class AAN(LightningDataModule):
num_classes = 2
def __init__(self, data_dir=current_dir, cache_dir=None, max_length=4000, append_bos=False,
append_eos=False, batch_size=32, num_workers=1, shuffle=False, pin_memory=False,
drop_last=False, **kwargs):
super().__init__(**kwargs)
self.data_dir = Path(data_dir).expanduser()
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
self.append_bos = append_bos
self.append_eos = append_eos
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
def prepare_data(self):
if self.cache_dir is None:
for split in ['train', 'eval', 'test']:
split_path = self.data_dir / f'new_aan_pairs.{split}.tsv'
if not split_path.is_file():
raise FileNotFoundError(f"""
File {str(split_path)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then point data_dir to the tsv_data directory.
""")
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
if stage == 'test' and hasattr(self, 'dataset_test'):
return
# [2021-08-18] TD: I ran into RuntimeError: Too many open files.
# https://github.com/pytorch/pytorch/issues/11201
torch.multiprocessing.set_sharing_strategy('file_system')
dataset, self.tokenizer, self.vocab = self.process_dataset()
self.vocab_size = len(self.vocab)
dataset.set_format(type='torch', columns=['input_ids1', 'input_ids2', 'label'])
self.dataset_train, self.dataset_val, self.dataset_test = (
dataset['train'], dataset['val'], dataset['test']
)
def collate_batch(batch):
xs1, xs2, ys = zip(*[(data['input_ids1'], data['input_ids2'], data['label'])
for data in batch])
lengths1 = torch.tensor([len(x) for x in xs1])
lengths2 = torch.tensor([len(x) for x in xs2])
xs1 = nn.utils.rnn.pad_sequence(xs1, padding_value=self.vocab['<pad>'], batch_first=True)
xs2 = nn.utils.rnn.pad_sequence(xs2, padding_value=self.vocab['<pad>'], batch_first=True)
ys = torch.tensor(ys)
return xs1, xs2, ys, lengths1, lengths2
self.collate_fn = collate_batch
def process_dataset(self):
cache_dir = None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
dataset = load_dataset('csv',
data_files={'train': str(self.data_dir / 'new_aan_pairs.train.tsv'),
'val': str(self.data_dir / 'new_aan_pairs.eval.tsv'),
'test': str(self.data_dir / 'new_aan_pairs.test.tsv')},
delimiter='\t',
column_names=['label', 'input1_id', 'input2_id', 'text1', 'text2'],
keep_in_memory=True)
dataset = dataset.remove_columns(['input1_id', 'input2_id'])
new_features = dataset['train'].features.copy()
new_features['label'] = Value('int32')
dataset = dataset.cast(new_features)
tokenizer = list # Just convert a string to a list of chars
# Account for <bos> and <eos> tokens
max_length = self.max_length - int(self.append_bos) - int(self.append_eos)
tokenize = lambda example: {'tokens1': tokenizer(example['text1'])[:max_length],
'tokens2': tokenizer(example['text2'])[:max_length]}
dataset = dataset.map(tokenize, remove_columns=['text1', 'text2'], keep_in_memory=True,
load_from_cache_file=False, num_proc=max(self.num_workers, 1))
vocab = torchtext.vocab.build_vocab_from_iterator(
dataset['train']['tokens1'] + dataset['train']['tokens2'],
specials=(['<pad>', '<unk>']
+ (['<bos>'] if self.append_bos else [])
+ (['<eos>'] if self.append_eos else []))
)
vocab.set_default_index(vocab['<unk>'])
encode = lambda text: vocab(
(['<bos>'] if self.append_bos else []) + text + (['<eos>'] if self.append_eos else [])
)
numericalize = lambda example: {'input_ids1': encode(example['tokens1']),
'input_ids2': encode(example['tokens2'])}
dataset = dataset.map(numericalize, remove_columns=['tokens1', 'tokens2'],
keep_in_memory=True, load_from_cache_file=False,
num_proc=max(self.num_workers, 1))
if cache_dir is not None:
self._save_to_cache(dataset, tokenizer, vocab, cache_dir)
return dataset, tokenizer, vocab
def _save_to_cache(self, dataset, tokenizer, vocab, cache_dir):
cache_dir = self.cache_dir / self._cache_dir_name
logger = logging.getLogger(__name__)
logger.info(f'Saving to cache at {str(cache_dir)}')
dataset.save_to_disk(str(cache_dir))
with open(cache_dir / 'tokenizer.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
with open(cache_dir / 'vocab.pkl', 'wb') as f:
pickle.dump(vocab, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger = logging.getLogger(__name__)
logger.info(f'Load from cache at {str(cache_dir)}')
dataset = DatasetDict.load_from_disk(str(cache_dir))
with open(cache_dir / 'tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
with open(cache_dir / 'vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
return dataset, tokenizer, vocab
@property
def _cache_dir_name(self):
return f'max_length-{self.max_length}-append_bos-{self.append_bos}-append_eos-{self.append_eos}'
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
return self._data_loader(self.dataset_train, shuffle=self.shuffle)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test)
def _data_loader(self, dataset: Dataset, shuffle: bool = False) -> DataLoader:
return DataLoader(
dataset,
collate_fn=self.collate_fn,
batch_size=self.batch_size,
shuffle=shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
| fly-master | src/datamodules/aan.py |
# Adapted from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/data_utils.py
# https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/data_utils.py
# https://github.com/pytorch/examples/blob/master/word_language_model/main.py
# https://github.com/HazyResearch/hippo/blob/master/dataloaders/lm.py
import subprocess
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import numpy as np
import torch
from pytorch_lightning import LightningDataModule
from src.datamodules.datasets.vocabulary import OpenAIVocab, Vocab
from src.utils.distributed import sync_workers
from src.utils.utils import get_logger
logger = get_logger()
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', mem_len=None, ext_len=None, warmup=True,
roll_seed=None, # roll data based on seed
batch_first=False,
shard_id=0, num_shards=1, # For distributed training
):
"""
data -- LongTensor -- the LongTensor is strictly ordered
bsz; batch size *per shard* (i.e. per GPU)
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.mem_len = mem_len
self.warmup = warmup
self.shard_id = shard_id
self.num_shards = num_shards
self.roll_seed = roll_seed
self.batch_first = batch_first
self.device = device
total_bsz = bsz * num_shards
# Work out how cleanly we can divide the dataset into total_bsz parts.
n_step = data.size(0) // total_bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data[:n_step * total_bsz]
# Evenly divide the data across the bsz batches.
self.data = data.view(total_bsz, -1).t().contiguous().pin_memory() # (..., batch_size)
if mem_len and warmup:
self.warmup_batches = (mem_len + bptt - 1) // bptt
self.warmup_elems = self.warmup_batches * bptt
warmup_data = self.data.roll((self.warmup_elems, 1), (0, 1))[:self.warmup_elems]
self.data = torch.cat((warmup_data, self.data))
# Partition data for DistributedDataParallel
self.data = self.data.chunk(num_shards, dim=1)[shard_id]
# Number of mini-batches
# Need to subtract 1 because target is data shifted by 1
self.n_batch = (self.data.size(0) - 1 + self.bptt - 1) // self.bptt
self.last_iter = None
self.epoch = -1
def roll(self, seed):
rng = torch.Generator()
rng.manual_seed(seed)
for i in range(self.data.size(1)):
row = self.data[:, i]
shift = torch.randint(0, self.data.size(0), (1,), generator=rng)
row = torch.cat((row[shift:], row[:shift]))
self.data[:, i] = row
def get_batch(self, i, bptt=None):
""" Get batch starting at token index i """
if bptt is None:
bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx].to(self.device, non_blocking=True)
target = self.data[i+1:i+1+seq_len].to(self.device, non_blocking=True)
if self.mem_len and self.warmup:
warm = i >= self.warmup_elems
else:
warm = True
if self.batch_first:
return data.t(), target.t(), seq_len, warm
else:
return data, target, seq_len, warm
def get_fixlen_iter(self, start=0):
if start != 0:
start += self.bptt
for i in range(start, self.data.size(0) - 1, self.bptt):
self.last_iter = i
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_length = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_length, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
if self.batch_first:
yield data.t(), target.t(), seq_len
else:
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
self.epoch += 1
if self.roll_seed is not None:
self.roll(self.roll_seed + self.epoch)
return self.get_fixlen_iter()
def __len__(self):
return self.n_batch
class WikiText2(LightningDataModule):
name = 'wt2'
vocab_kwargs = {'special': ['<eos>'], 'lower_case': False}
encode_kwargs = {'ordered': True}
def __init__(self, data_dir, vocab_type='word', batch_size=32, max_length=1024,
val_batch_size=None, val_max_length=None, roll_seed=None, batch_first=False):
super().__init__()
self.data_dir = Path(data_dir).expanduser()
if vocab_type not in ['word', 'bpe']:
raise RuntimeError('Unsupported vocab')
self.vocab_type = vocab_type
self.batch_size = batch_size
self.max_length = max_length
self.val_batch_size = val_batch_size if val_batch_size is not None else self.batch_size
self.val_max_length = val_max_length if val_max_length is not None else self.max_length
self.roll_seed = roll_seed
self.batch_first = batch_first
def prepare_data(self):
if not self.data_dir.is_dir():
subprocess.run([str(current_dir / 'datasets' / 'getdata.sh'), self.name,
str(self.data_dir.parent.absolute())], check=True)
if not (self.data_dir / self._cache_file_name).is_file():
self.process_dataset()
def setup(self, stage=None):
if stage == 'test' and hasattr(self, 'dataset_test'):
return
self.vocab, self.dataset_train, self.dataset_val, self.dataset_test = self.process_dataset()
def process_dataset(self):
if (self.data_dir / self._cache_file_name).is_file():
return self._load_from_cache()
else:
logger.info(f'Producing dataset {self.name}...')
if self.vocab_type == 'word':
vocab = Vocab(**self.vocab_kwargs)
elif self.vocab_type == 'bpe':
vocab = OpenAIVocab()
else:
raise RuntimeError('Unsupported vocab')
vocab = self._vocab_count(vocab)
vocab.build_vocab()
train = vocab.encode_file(str(self.data_dir / 'train.txt'), **self.encode_kwargs)
val = vocab.encode_file(str(self.data_dir / 'valid.txt'), **self.encode_kwargs)
test = vocab.encode_file(str(self.data_dir / 'test.txt'), **self.encode_kwargs)
self._save_to_cache((vocab, train, val, test))
return vocab, train, val, test
def _vocab_count(self, vocab):
vocab.count_file(self.data_dir / 'train.txt')
vocab.count_file(self.data_dir / 'valid.txt')
vocab.count_file(self.data_dir / 'test.txt')
return vocab
def _save_to_cache(self, obj):
cache_path = self.data_dir / self._cache_file_name
with sync_workers() as rank:
if rank == 0:
try:
torch.save(obj, cache_path)
logger.info(f'Saved dataset to {cache_path}')
except:
pass
def _load_from_cache(self):
cache_path = self.data_dir / self._cache_file_name
if cache_path.is_file():
logger.info(f'Loading cached dataset from {str(cache_path)}')
return torch.load(cache_path)
else:
raise FileNotFoundError(f'Cache file {str(cache_path)} does not exist.')
@property
def _cache_file_name(self):
return f'cache.{self.vocab_type}.pt'
def train_dataloader(self, *args, **kwargs):
shard_id = self.trainer.global_rank
num_shards = self.trainer.world_size
return LMOrderedIterator(self.dataset_train, bsz=self.batch_size, bptt=self.max_length,
roll_seed=self.roll_seed, batch_first=self.batch_first,
shard_id=shard_id, num_shards=num_shards)
def val_dataloader(self, *args, **kwargs):
shard_id = self.trainer.global_rank
num_shards = self.trainer.world_size
return LMOrderedIterator(self.dataset_val, bsz=self.val_batch_size,
bptt=self.val_max_length, batch_first=self.batch_first,
shard_id=shard_id, num_shards=num_shards)
def test_dataloader(self, *args, **kwargs):
shard_id = self.trainer.global_rank
num_shards = self.trainer.world_size
return LMOrderedIterator(self.dataset_test, bsz=self.val_batch_size,
bptt=self.val_max_length, batch_first=self.batch_first,
shard_id=shard_id, num_shards=num_shards)
class WikiText103(WikiText2):
name = 'wt103'
def _vocab_count(self, vocab):
vocab.count_file(self.data_dir / 'train.txt')
return vocab
| fly-master | src/datamodules/language_modeling.py |
# Adapted from https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/datamodules/imagenet_datamodule.py
import os
from pathlib import Path
from typing import Any, List, Union, Callable, Optional
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
from pl_bolts.transforms.dataset_normalizations import imagenet_normalization
from torchvision import transforms
from torchvision.datasets import ImageFolder
class ImagenetDataModule(LightningDataModule):
"""
.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2017/08/
Sample-of-Images-from-the-ImageNet-Dataset-used-in-the-ILSVRC-Challenge.png
:width: 400
:alt: Imagenet
Specs:
- 1000 classes
- Each image is (3 x varies x varies) (here we default to 3 x 224 x 224)
Imagenet train, val and test dataloaders.
The train set is the imagenet train.
The val set is taken from the train set with `num_imgs_per_val_class` images per class.
For example if `num_imgs_per_val_class=2` then there will be 2,000 images in the validation set.
The test set is the official imagenet validation set.
Example::
from pl_bolts.datamodules import ImagenetDataModule
dm = ImagenetDataModule(IMAGENET_PATH)
model = LitModel()
Trainer().fit(model, datamodule=dm)
"""
name = "imagenet"
def __init__(
self,
data_dir: str,
cache_dir: Optional[str] = None,
image_size: int = 224,
num_workers: int = 0,
batch_size: int = 32,
shuffle: bool = True,
pin_memory: bool = True,
drop_last: bool = False,
dali: Optional[str] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Args:
data_dir: path to the imagenet dataset file
num_imgs_per_val_class: how many images per class for the validation set
image_size: final image size
num_workers: how many data workers
batch_size: batch_size
shuffle: If true shuffles the data every epoch
pin_memory: If true, the data loader will copy Tensors into CUDA pinned memory before
returning them
drop_last: If true drops the last incomplete batch
"""
super().__init__(*args, **kwargs)
self.image_size = image_size
self.dims = (3, self.image_size, self.image_size)
self.data_dir = Path(data_dir).expanduser()
self.cache_dir = cache_dir
self.use_archive_dataset = (self.data_dir.suffix == '.tar'
or self.data_dir.suffix == '.zip')
self.num_workers = num_workers
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
assert dali in [None, 'cpu', 'gpu']
if dali is not None and self.use_archive_dataset:
raise NotImplementedError('dali is not compatible with archive dataset')
self.dali = dali
@property
def num_classes(self) -> int:
"""
Return:
1000
"""
return 1000
def _verify_splits(self, data_dir: str, split: str) -> None:
dirs = os.listdir(data_dir)
if split not in dirs:
raise FileNotFoundError(
f"a {split} Imagenet split was not found in {data_dir},"
f" make sure the folder contains a subfolder named {split}"
)
def prepare_data(self) -> None:
"""This method already assumes you have imagenet2012 downloaded. It validates the data using the meta.bin.
.. warning:: Please download imagenet on your own first.
"""
if not self.use_archive_dataset:
self._verify_splits(self.data_dir, "train")
self._verify_splits(self.data_dir, "val")
else:
if not self.data_dir.is_file():
raise FileNotFoundError(f"""Archive file {str(self.data_dir)} not found.""")
def setup(self, stage: Optional[str] = None) -> None:
"""Creates train, val, and test dataset."""
if self.dali is not None:
return
if stage == "fit" or stage is None:
train_transforms = (self.train_transform() if self.train_transforms is None
else self.train_transforms)
val_transforms = (self.val_transform() if self.val_transforms is None
else self.val_transforms)
if not self.use_archive_dataset:
self.dataset_train = ImageFolder(self.data_dir / 'train',
transform=train_transforms)
self.dataset_val = ImageFolder(self.data_dir / 'val', transform=val_transforms)
else:
from src.datamodules.datasets.archive_imagefolder import ArchiveImageFolder
self.dataset_train = ArchiveImageFolder(str(self.data_dir), cache_dir=self.cache_dir,
root_in_archive='train',
transform=train_transforms)
self.dataset_val = ArchiveImageFolder(str(self.data_dir), cache_dir=self.cache_dir,
root_in_archive='val',
transform=val_transforms)
if stage == "test" or stage is None:
test_transforms = (self.val_transform() if self.test_transforms is None
else self.test_transforms)
if not self.use_archive_dataset:
self.dataset_test = ImageFolder(self.data_dir / 'val', transform=test_transforms)
else:
from src.datamodules.datasets.archive_imagefolder import ArchiveImageFolder
self.dataset_test = ArchiveImageFolder(str(self.data_dir), cache_dir=self.cache_dir,
root_in_archive='val',
transform=test_transforms)
def train_transform(self) -> Callable:
"""The standard imagenet transforms.
.. code-block:: python
transforms.Compose([
transforms.RandomResizedCrop(self.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
"""
preprocessing = transforms.Compose(
[
transforms.RandomResizedCrop(self.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
imagenet_normalization(),
]
)
return preprocessing
def val_transform(self) -> Callable:
"""The standard imagenet transforms for validation.
.. code-block:: python
transforms.Compose([
transforms.Resize(self.image_size + 32),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
"""
preprocessing = transforms.Compose(
[
transforms.Resize(self.image_size + 32),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
imagenet_normalization(),
]
)
return preprocessing
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
if self.dali is None:
return self._data_loader(self.dataset_train, shuffle=self.shuffle)
else:
return self._dali_loader(is_train=True, shuffle=self.shuffle)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
if self.dali is None:
return self._data_loader(self.dataset_val)
else:
return self._dali_loader(is_train=False)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
if self.dali is None:
return self._data_loader(self.dataset_test)
else:
return self._dali_loader(is_train=False)
def _data_loader(self, dataset: Dataset, shuffle: bool = False) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
# Spinning up worker is slow if we use archive dataset
# When we don't use archive dataset, I get crashes if I don't set this to True
# https://github.com/PyTorchLightning/pytorch-lightning/issues/4471
# https://github.com/PyTorchLightning/pytorch-lightning/issues/8821
# TD [2021-09-01] I think this bug is finally fixed in pytorch-lightning 1.4.5
# https://github.com/PyTorchLightning/pytorch-lightning/pull/9239
persistent_workers=True
)
def _dali_loader(self, is_train: bool, shuffle: bool = False) -> DataLoader:
from src.datamodules.imagenet_dali_loader import get_dali_loader
# (TD): [2021-08-28] I'm not sure but I think these DALI settings only work with DDP
device_id = self.trainer.local_rank
shard_id = self.trainer.global_rank
num_shards = self.trainer.world_size
return get_dali_loader(data_dir=self.data_dir / ('train' if is_train else 'val'),
crop=self.image_size,
size=self.image_size + 32,
is_train=is_train,
batch_size=self.batch_size,
shuffle=shuffle,
drop_last=self.drop_last,
num_threads=self.num_workers,
device_id=device_id,
shard_id=shard_id,
num_shards=num_shards,
dali_cpu=self.dali == 'cpu')
| fly-master | src/datamodules/imagenet.py |
fly-master | src/datamodules/__init__.py |
|
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pickle
import logging
from typing import Any, List, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchtext
from datasets import load_dataset, DatasetDict
from pytorch_lightning import LightningDataModule
# LRA tokenizer renames ']' to 'X' and delete parentheses as their tokenizer removes
# non-alphanumeric characters.
# https://github.com/google-research/long-range-arena/blob/264227cbf9591e39dd596d2dc935297a2070bdfe/lra_benchmarks/listops/input_pipeline.py#L46
def listops_tokenizer(s):
return s.translate({ord(']'): ord('X'), ord('('): None, ord(')'): None}).split()
class ListOps(LightningDataModule):
num_classes = 10
def __init__(self, data_dir=current_dir, cache_dir=None, max_length=2000, append_bos=False,
append_eos=False, batch_size=32, num_workers=1, shuffle=False, pin_memory=False,
drop_last=False, **kwargs):
super().__init__(**kwargs)
self.data_dir = Path(data_dir).expanduser()
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
self.append_bos = append_bos
self.append_eos = append_eos
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
def prepare_data(self):
if self.cache_dir is None:
for split in ['train', 'val', 'test']:
split_path = self.data_dir / f'basic_{split}.tsv'
if not split_path.is_file():
raise FileNotFoundError(f"""
File {str(split_path)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then point data_dir to the listops-1000 directory.
""")
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
if stage == 'test' and hasattr(self, 'dataset_test'):
return
dataset, self.tokenizer, self.vocab = self.process_dataset()
self.vocab_size = len(self.vocab)
dataset.set_format(type='torch', columns=['input_ids', 'Target'])
self.dataset_train, self.dataset_val, self.dataset_test = (
dataset['train'], dataset['val'], dataset['test']
)
def collate_batch(batch):
xs, ys = zip(*[(data['input_ids'], data['Target']) for data in batch])
lengths = torch.tensor([len(x) for x in xs])
xs = nn.utils.rnn.pad_sequence(xs, padding_value=self.vocab['<pad>'], batch_first=True)
ys = torch.tensor(ys)
return xs, ys, lengths
self.collate_fn = collate_batch
def process_dataset(self):
cache_dir = None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
dataset = load_dataset('csv',
data_files={'train': str(self.data_dir / 'basic_train.tsv'),
'val': str(self.data_dir / 'basic_val.tsv'),
'test': str(self.data_dir / 'basic_test.tsv')},
delimiter='\t',
keep_in_memory=True)
tokenizer = listops_tokenizer
# Account for <bos> and <eos> tokens
max_length = self.max_length - int(self.append_bos) - int(self.append_eos)
tokenize = lambda example: {'tokens': tokenizer(example['Source'])[:max_length]}
dataset = dataset.map(tokenize, remove_columns=['Source'], keep_in_memory=True,
load_from_cache_file=False, num_proc=max(self.num_workers, 1))
vocab = torchtext.vocab.build_vocab_from_iterator(
dataset['train']['tokens'],
specials=(['<pad>', '<unk>']
+ (['<bos>'] if self.append_bos else [])
+ (['<eos>'] if self.append_eos else []))
)
vocab.set_default_index(vocab['<unk>'])
numericalize = lambda example: {'input_ids': vocab(
(['<bos>'] if self.append_bos else [])
+ example['tokens']
+ (['<eos>'] if self.append_eos else [])
)}
dataset = dataset.map(numericalize, remove_columns=['tokens'], keep_in_memory=True,
load_from_cache_file=False, num_proc=max(self.num_workers, 1))
if cache_dir is not None:
self._save_to_cache(dataset, tokenizer, vocab, cache_dir)
return dataset, tokenizer, vocab
def _save_to_cache(self, dataset, tokenizer, vocab, cache_dir):
cache_dir = self.cache_dir / self._cache_dir_name
logger = logging.getLogger(__name__)
logger.info(f'Saving to cache at {str(cache_dir)}')
dataset.save_to_disk(str(cache_dir))
with open(cache_dir / 'tokenizer.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
with open(cache_dir / 'vocab.pkl', 'wb') as f:
pickle.dump(vocab, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger = logging.getLogger(__name__)
logger.info(f'Load from cache at {str(cache_dir)}')
dataset = DatasetDict.load_from_disk(str(cache_dir))
with open(cache_dir / 'tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
with open(cache_dir / 'vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
return dataset, tokenizer, vocab
@property
def _cache_dir_name(self):
return f'max_length-{self.max_length}-append_bos-{self.append_bos}-append_eos-{self.append_eos}'
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
return self._data_loader(self.dataset_train, shuffle=self.shuffle)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test)
def _data_loader(self, dataset: Dataset, shuffle: bool = False) -> DataLoader:
return DataLoader(
dataset,
collate_fn=self.collate_fn,
batch_size=self.batch_size,
shuffle=shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
| fly-master | src/datamodules/listops.py |
import torch
from timm.data import Mixup
from timm.data.mixup import mixup_target
class TimmMixup(Mixup):
""" Wrap timm.data.Mixup that avoids the assert that batch size must be even.
"""
def __call__(self, x, target):
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
# We move the assert from the beginning of the function to here
assert len(x) % 2 == 0, 'Batch size should be even when using this'
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
# Another change is to set the right device here
target = mixup_target(target, self.num_classes, lam, self.label_smoothing,
device=target.device)
return x, target
| fly-master | src/datamodules/timm_mixup.py |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pickle
import logging
from typing import Any, List, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchtext
from datasets import load_dataset, DatasetDict
from pytorch_lightning import LightningDataModule
# Adapted from https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/2_lstm.ipynb
class IMDB(LightningDataModule):
dataset_name = 'imdb'
num_classes = 2
def __init__(self, data_dir=current_dir, cache_dir=None, max_length=512, tokenizer_type='word',
vocab_min_freq=1, append_bos=False, append_eos=False, val_split=0.0,
batch_size=32, num_workers=1, seed=42, shuffle=False, pin_memory=False,
drop_last=False, **kwargs):
"""If cache_dir is not None, we'll cache the processed dataset there.
"""
super().__init__(**kwargs)
self.data_dir = Path(data_dir).expanduser()
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
assert tokenizer_type in ['word', 'char'], f'tokenizer_type {tokenizer_type} not supported'
self.tokenizer_type = tokenizer_type
self.vocab_min_freq = vocab_min_freq
self.append_bos = append_bos
self.append_eos = append_eos
self.val_split = val_split
self.batch_size = batch_size
self.num_workers = num_workers
self.seed = seed
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
def prepare_data(self):
if self.cache_dir is None: # Just download the dataset
load_dataset(self.dataset_name, cache_dir=self.data_dir)
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
if stage == 'test' and hasattr(self, 'dataset_test'):
return
dataset, self.tokenizer, self.vocab = self.process_dataset()
self.vocab_size = len(self.vocab)
dataset.set_format(type='torch', columns=['input_ids', 'label'])
# Create all splits
dataset_train, dataset_test = dataset['train'], dataset['test']
if self.val_split == 0.0: # Use test set as val set, as done in the LRA paper
self.dataset_train, self.dataset_val = dataset_train, dataset_test
else:
train_val = dataset_train.train_test_split(test_size=self.val_split, seed=self.seed)
self.dataset_train, self.dataset_val = train_val['train'], train_val['test']
self.dataset_test = dataset_test
def collate_batch(batch):
xs, ys = zip(*[(data['input_ids'], data['label']) for data in batch])
lengths = torch.tensor([len(x) for x in xs])
xs = nn.utils.rnn.pad_sequence(xs, padding_value=self.vocab['<pad>'], batch_first=True)
ys = torch.tensor(ys)
return xs, ys, lengths
self.collate_fn = collate_batch
def process_dataset(self):
cache_dir = None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
dataset = load_dataset(self.dataset_name, cache_dir=self.data_dir)
dataset = DatasetDict(train=dataset['train'], test=dataset['test'])
if self.tokenizer_type == 'word':
tokenizer = torchtext.data.utils.get_tokenizer('spacy', language='en_core_web_sm')
else: # self.tokenizer_type == 'char'
tokenizer = list # Just convert a string to a list of chars
# Account for <bos> and <eos> tokens
max_length = self.max_length - int(self.append_bos) - int(self.append_eos)
tokenize = lambda example: {'tokens': tokenizer(example['text'])[:max_length]}
dataset = dataset.map(tokenize, remove_columns=['text'], keep_in_memory=True,
load_from_cache_file=False, num_proc=max(self.num_workers, 1))
vocab = torchtext.vocab.build_vocab_from_iterator(
dataset['train']['tokens'],
min_freq=self.vocab_min_freq,
specials=(['<pad>', '<unk>']
+ (['<bos>'] if self.append_bos else [])
+ (['<eos>'] if self.append_eos else []))
)
vocab.set_default_index(vocab['<unk>'])
numericalize = lambda example: {'input_ids': vocab(
(['<bos>'] if self.append_bos else [])
+ example['tokens']
+ (['<eos>'] if self.append_eos else [])
)}
dataset = dataset.map(numericalize, remove_columns=['tokens'], keep_in_memory=True,
load_from_cache_file=False, num_proc=max(self.num_workers, 1))
if cache_dir is not None:
self._save_to_cache(dataset, tokenizer, vocab, cache_dir)
return dataset, tokenizer, vocab
def _save_to_cache(self, dataset, tokenizer, vocab, cache_dir):
cache_dir = self.cache_dir / self._cache_dir_name
logger = logging.getLogger(__name__)
logger.info(f'Saving to cache at {str(cache_dir)}')
dataset.save_to_disk(str(cache_dir))
with open(cache_dir / 'tokenizer.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
with open(cache_dir / 'vocab.pkl', 'wb') as f:
pickle.dump(vocab, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger = logging.getLogger(__name__)
logger.info(f'Load from cache at {str(cache_dir)}')
dataset = DatasetDict.load_from_disk(str(cache_dir))
with open(cache_dir / 'tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
with open(cache_dir / 'vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
return dataset, tokenizer, vocab
@property
def _cache_dir_name(self):
return f'max_length-{self.max_length}-tokenizer_type-{self.tokenizer_type}-vocab_min_freq-{self.vocab_min_freq}-append_bos-{self.append_bos}-append_eos-{self.append_eos}'
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
return self._data_loader(self.dataset_train, shuffle=self.shuffle)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test)
def _data_loader(self, dataset: Dataset, shuffle: bool = False) -> DataLoader:
return DataLoader(
dataset,
collate_fn=self.collate_fn,
batch_size=self.batch_size,
shuffle=shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
| fly-master | src/datamodules/imdb.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSampler):
def __init__(self, *args, generator=None, **kwargs):
# generator = torch.Generator().manual_seed(seed)
# super().__init__(*args, generator=generator, **kwargs)
# TD [2022-07-17]: We don't force the seed to be zero. We generate random seed,
# which should be reproducible if pl.seed_everything was called before hand.
# This means that changing the seed of the experiment will also change the
# sampling order.
if generator is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator().manual_seed(seed)
super().__init__(*args, generator=generator, **kwargs)
self.counter = 0
self.restarting = False
def state_dict(self):
return {"random_state": self.state, "counter": self.counter}
def load_state_dict(self, state_dict):
self.generator.set_state(state_dict.get("random_state"))
self.counter = state_dict["counter"]
self.restarting = True
def __len__(self):
return len(self.data_source) - self.counter
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
self.state = self.generator.get_state()
indices = torch.randperm(n, generator=self.generator).tolist()
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
for index in indices:
self.counter += 1
yield index
self.counter = 0
class FaultTolerantDistributedSampler(DistributedSampler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
self.restarting = False
def state_dict(self):
return {"epoch": self.epoch, "counter": self.counter}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
self.counter = state_dict["counter"]
self.restarting = True
def __len__(self) -> int:
return self.num_samples - self.counter
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
for index in indices:
self.counter += 1
yield index
# class DistributedSampler(Sampler):
# r"""Sampler that restricts data loading to a subset of the dataset.
# It is especially useful in conjunction with
# :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
# process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a
# :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
# original dataset that is exclusive to it.
# .. note::
# Dataset is assumed to be of constant size and that any instance of it always
# returns the same elements in the same order.
# Args:
# dataset: Dataset used for sampling.
# num_replicas (int, optional): Number of processes participating in
# distributed training. By default, :attr:`world_size` is retrieved from the
# current distributed group.
# rank (int, optional): Rank of the current process within :attr:`num_replicas`.
# By default, :attr:`rank` is retrieved from the current distributed
# group.
# shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
# indices.
# seed (int, optional): random seed used to shuffle the sampler if
# :attr:`shuffle=True`. This number should be identical across all
# processes in the distributed group. Default: ``0``.
# drop_last (bool, optional): if ``True``, then the sampler will drop the
# tail of the data to make it evenly divisible across the number of
# replicas. If ``False``, the sampler will add extra indices to make
# the data evenly divisible across the replicas. Default: ``False``.
# .. warning::
# In distributed mode, calling the :meth:`set_epoch` method at
# the beginning of each epoch **before** creating the :class:`DataLoader` iterator
# is necessary to make shuffling work properly across multiple epochs. Otherwise,
# the same ordering will be always used.
# Example::
# >>> sampler = DistributedSampler(dataset) if is_distributed else None
# >>> loader = DataLoader(dataset, shuffle=(sampler is None),
# ... sampler=sampler)
# >>> for epoch in range(start_epoch, n_epochs):
# ... if is_distributed:
# ... sampler.set_epoch(epoch)
# ... train(loader)
# """
# def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None,
# rank: Optional[int] = None, shuffle: bool = True,
# seed: int = 0, drop_last: bool = False) -> None:
# if num_replicas is None:
# if not dist.is_available():
# raise RuntimeError("Requires distributed package to be available")
# num_replicas = dist.get_world_size()
# if rank is None:
# if not dist.is_available():
# raise RuntimeError("Requires distributed package to be available")
# rank = dist.get_rank()
# if rank >= num_replicas or rank < 0:
# raise ValueError(
# "Invalid rank {}, rank should be in the interval"
# " [0, {}]".format(rank, num_replicas - 1))
# self.dataset = dataset
# self.num_replicas = num_replicas
# self.rank = rank
# self.epoch = 0
# self.drop_last = drop_last
# # If the dataset length is evenly divisible by # of replicas, then there
# # is no need to drop any data, since the dataset will be split equally.
# if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
# # Split to nearest available length that is evenly divisible.
# # This is to ensure each rank receives the same amount of data when
# # using this Sampler.
# self.num_samples = math.ceil(
# (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
# )
# else:
# self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
# self.total_size = self.num_samples * self.num_replicas
# self.shuffle = shuffle
# self.seed = seed
# def __iter__(self) -> Iterator[T_co]:
# if self.shuffle:
# # deterministically shuffle based on epoch and seed
# g = torch.Generator()
# g.manual_seed(self.seed + self.epoch)
# indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
# else:
# indices = list(range(len(self.dataset))) # type: ignore[arg-type]
# if not self.drop_last:
# # add extra samples to make it evenly divisible
# padding_size = self.total_size - len(indices)
# if padding_size <= len(indices):
# indices += indices[:padding_size]
# else:
# indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
# else:
# # remove tail of data to make it evenly divisible.
# indices = indices[:self.total_size]
# assert len(indices) == self.total_size
# # subsample
# indices = indices[self.rank:self.total_size:self.num_replicas]
# assert len(indices) == self.num_samples
# return iter(indices)
# def __len__(self) -> int:
# return self.num_samples
# class RandomSampler(Sampler[int]):
# r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
# If with replacement, then user can specify :attr:`num_samples` to draw.
# Args:
# data_source (Dataset): dataset to sample from
# replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False``
# num_samples (int): number of samples to draw, default=`len(dataset)`.
# generator (Generator): Generator used in sampling.
# """
# data_source: Sized
# def __init__(self, data_source: Sized, num_samples: Optional[int] = None, generator=None) -> None:
# self.data_source = data_source
# self._num_samples = num_samples
# self.generator = generator
# @property
# def num_samples(self) -> int:
# # dataset size might change at runtime
# if self._num_samples is None:
# return len(self.data_source)
# return self._num_samples
# def __iter__(self) -> Iterator[int]:
# n = len(self.data_source)
# if self.generator is None:
# seed = int(torch.empty((), dtype=torch.int64).random_().item())
# generator = torch.Generator()
# generator.manual_seed(seed)
# else:
# generator = self.generator
# for _ in range(self.num_samples // n):
# yield from torch.randperm(n, generator=generator).tolist()
# yield from torch.randperm(n, generator=generator).tolist()[:self.num_samples % n]
# def __len__(self) -> int:
# return self.num_samples
# @pytest.mark.parametrize(
# ["train_dataset_cls", "val_dataset_cls"],
# [
# ([RandomFaultTolerantDataset, RandomFaultTolerantDataset], [RandomFaultTolerantDataset]),
# ],
# )
# @pytest.mark.parametrize("val_check_interval", [0.5])
# @mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "2"})
# def test_fault_tolerant_manual_mode(val_check_interval, train_dataset_cls, val_dataset_cls, tmpdir):
# class TestModel(BoringModel):
# def __init__(self, should_fail: bool = False):
# super().__init__()
# self.layer = torch.nn.Linear(1, 2)
# self.should_fail = should_fail
# self.batches = []
# def training_step(self, batch, batch_idx):
# if self.should_fail and batch_idx == 7:
# raise CustomException
# self.batches.append(batch)
# losses = []
# for b in batch:
# losses.append(super().training_step(b, batch_idx)["loss"])
# return torch.stack(losses).mean()
# def validation_step(self, batch, batch_idx, dataloader_idx=0):
# pass
# validation_epoch_end = None
# def _create_dataloader_kwargs(self, dataset_class, dataset_len, seed, num_workers):
# dl_kwargs = {}
# dl_kwargs["dataset"] = dataset_class(dataset_len, 1, seed=seed)
# dl_kwargs["sampler"] = RandomFaultTolerantSampler(dl_kwargs["dataset"], seed=seed)
# dl_kwargs["num_workers"] = num_workers
# dl_kwargs["batch_size"] = 1
# return dl_kwargs
# def train_dataloader(self):
# return [
# DataLoader(
# **self._create_dataloader_kwargs(
# dataset_class, 10, seed, seed + 1 if val_check_interval == 1.0 else 0
# )
# )
# for seed, dataset_class in enumerate(train_dataset_cls)
# ]
# def val_dataloader(self):
# return [
# DataLoader(**self._create_dataloader_kwargs(dataset_class, 1, seed, 0))
# for seed, dataset_class in enumerate(val_dataset_cls)
# ]
# def configure_optimizers(self):
# optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.001)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
# return [optimizer], [lr_scheduler]
# seed_everything(42)
# model = TestModel()
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=val_check_interval)
# trainer.fit(model)
# total_batches = model.batches
# total_weight = deepcopy(model.layer.weight)
# trainer.train_dataloader = None
# seed_everything(42)
# model = TestModel(should_fail=True)
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=val_check_interval)
# with pytest.raises(CustomException):
# trainer.fit(model)
# trainer.train_dataloader = None
# failed_batches = model.batches
# failed_weight = deepcopy(model.layer.weight)
# checkpoint_path = str(tmpdir / ".pl_auto_save.ckpt")
# assert os.path.exists(checkpoint_path)
# seed_everything(42)
# model = TestModel()
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=val_check_interval)
# trainer.fit(model, ckpt_path=checkpoint_path)
# trainer.train_dataloader = None
# restart_batches = model.batches
# torch_test_assert_close(total_batches, failed_batches + restart_batches)
# assert not torch.equal(total_weight, failed_weight)
# assert torch.equal(total_weight, model.layer.weight)
| fly-master | src/datamodules/fault_tolerant_sampler.py |
from typing import Optional, Tuple
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split
from torchvision.datasets import MNIST
from torchvision.transforms import transforms
class MNISTDataModule(LightningDataModule):
"""
Example of LightningDataModule for MNIST dataset.
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
data_dir: str = "data/",
train_val_test_split: Tuple[int, int, int] = (55_000, 5_000, 10_000),
batch_size: int = 64,
num_workers: int = 0,
pin_memory: bool = False,
):
super().__init__()
self.data_dir = data_dir
self.train_val_test_split = train_val_test_split
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
# self.dims is returned when you call datamodule.size()
self.dims = (1, 28, 28)
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
@property
def num_classes(self) -> int:
return 10
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage: Optional[str] = None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning separately when using `trainer.fit()` and `trainer.test()`!
The `stage` can be used to differentiate whether the `setup()` is called before trainer.fit()` or `trainer.test()`."""
if not self.data_train or not self.data_val or not self.data_test:
trainset = MNIST(self.data_dir, train=True, transform=self.transforms)
testset = MNIST(self.data_dir, train=False, transform=self.transforms)
dataset = ConcatDataset(datasets=[trainset, testset])
self.data_train, self.data_val, self.data_test = random_split(
dataset, self.train_val_test_split, generator=torch.Generator().manual_seed(42)
)
def train_dataloader(self):
return DataLoader(
dataset=self.data_train,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
dataset=self.data_val,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
def test_dataloader(self):
return DataLoader(
dataset=self.data_test,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
| fly-master | src/datamodules/mnist_datamodule.py |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
# [2021-06-30] TD: Somehow I get segfault if I import pl_bolts *after* torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from torchvision import transforms, datasets
from src.utils.utils import get_logger
from src.utils.tuples import to_2tuple
# From https://github.com/PyTorchLightning/lightning-bolts/blob/bd392ad858039290c72c20cc3f10df39384e90b9/pl_bolts/transforms/dataset_normalizations.py#L20
def cifar10_normalization():
return transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
)
def cifar10_grayscale_normalization():
return transforms.Normalize(mean=122.6 / 255.0, std=61.0 / 255.0)
def cifar100_normalization():
return transforms.Normalize(
mean=[x / 255.0 for x in [129.3, 124.1, 112.4]],
std=[x / 255.0 for x in [68.2, 65.4, 70.4]],
)
def cifar100_grayscale_normalization():
return transforms.Normalize(mean=124.3 / 255.0, std=63.9 / 255.0)
# Adapted from https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/datamodules/cifar10_datamodule.py
class CIFAR10(CIFAR10DataModule):
default_image_size = (32, 32)
def __init__(self, data_dir=current_dir, sequential=False, grayscale=False,
data_augmentation=None, image_size=32, to_int=False, **kwargs):
super().__init__(data_dir, **kwargs)
self.data_augmentation = data_augmentation
self.grayscale = grayscale
self.sequential = sequential
self.to_int = to_int
self.image_size = to_2tuple(image_size)
logger = get_logger()
logger.info(f'Datamodule {self.__class__}: normalize={self.normalize}')
if to_int:
assert not self.normalize, 'to_int option is not compatible with normalize option'
self._set_augmentation()
self.dims = self._calculate_dimensions()
if to_int and grayscale:
self.vocab_size = 256
def default_transforms(self):
transform_list = [] if not self.grayscale else [transforms.Grayscale()]
transform_list.append(transforms.ToTensor())
if self.normalize:
transform_list.append(self.normalize_fn())
if self.to_int:
transform_list.append(transforms.Lambda(lambda x: (x * 255).long()))
if self.sequential:
# If grayscale and to_int, it makes more sense to get rid of the channel dimension
transform_list.append(Rearrange('1 h w -> (h w)') if self.grayscale and self.to_int
else Rearrange('c h w -> (h w) c'))
return transforms.Compose(transform_list)
def normalize_fn(self):
return cifar10_normalization() if not self.grayscale else cifar10_grayscale_normalization()
def _set_augmentation(self, data_augmentation=None):
assert data_augmentation in [None, 'standard', 'autoaugment']
augment_list = []
if self.image_size != self.default_image_size:
augment_list.append(transforms.Resize(self.image_size))
self.val_transforms = self.test_transforms = transforms.Compose(
augment_list + self.default_transforms().transforms
)
if data_augmentation is not None:
if data_augmentation == 'standard':
augment_list += [
transforms.RandomCrop(self.image_size, padding=4),
transforms.RandomHorizontalFlip(),
]
elif data_augmentation == 'autoaugment':
from src.utils.autoaug import CIFAR10Policy
augment_list += [CIFAR10Policy()]
# By default it only converts to Tensor and normalizes
self.train_transforms = transforms.Compose(augment_list
+ self.default_transforms().transforms)
def _calculate_dimensions(self):
nchannels = 3 if not self.grayscale else 1
if not self.sequential:
return (nchannels, self.image_size[0], self.image_size[1])
else:
length = self.image_size[0] * self.image_size[1]
return (length, nchannels) if not (self.grayscale and self.to_int) else (length,)
class CIFAR100(CIFAR10):
dataset_cls = datasets.CIFAR100
@property
def num_classes(self):
return 100
def normalize_fn(self):
return (cifar100_normalization() if not self.grayscale
else cifar100_grayscale_normalization())
| fly-master | src/datamodules/cifar.py |
# Adapted from https://github.com/NVIDIA/DALI/blob/main/docs/examples/use_cases/pytorch/resnet50/main.py
# and https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Classification/ConvNets/image_classification/dataloaders.py
# and https://docs.nvidia.com/deeplearning/dali/user-guide/docs/examples/frameworks/pytorch/pytorch-lightning.html
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
except ImportError:
raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI.")
@pipeline_def
def create_dali_pipeline(data_dir, crop, size, shard_id, num_shards, dali_cpu=False, is_train=True,
shuffle=False):
images, labels = fn.readers.file(file_root=data_dir,
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=shuffle,
pad_last_batch=True,
name="Reader")
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
# ask nvJPEG to preallocate memory for the biggest sample in ImageNet for CPU and GPU to avoid reallocations in runtime
device_memory_padding = 211025920 if decoder_device == 'mixed' else 0
host_memory_padding = 140544512 if decoder_device == 'mixed' else 0
# ask HW NVJPEG to allocate memory ahead for the biggest image in the data set to avoid reallocations in runtime
preallocate_width_hint = 5980 if decoder_device == 'mixed' else 0
preallocate_height_hint = 6430 if decoder_device == 'mixed' else 0
if is_train:
images = fn.decoders.image_random_crop(images,
device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
preallocate_width_hint=preallocate_width_hint,
preallocate_height_hint=preallocate_height_hint,
random_aspect_ratio=[0.75, 4.0 / 3.0],
random_area=[0.08, 1.0],
num_attempts=100)
# INTERP_TRIANGULAR produces results much closer to torchvision default (bilinear) mode.
# For example, on T2T-ViT-7, I get 71.576 if using torchvision loader, 71.574 if using
# INTERP_TRIANGULAR, and 71.0 if using INTERP_LINEAR.
images = fn.resize(images,
device=dali_device,
resize_x=crop,
resize_y=crop,
interp_type=types.INTERP_TRIANGULAR)
mirror = fn.random.coin_flip(probability=0.5)
else:
images = fn.decoders.image(images,
device=decoder_device,
output_type=types.RGB)
images = fn.resize(images,
device=dali_device,
size=size,
mode="not_smaller",
interp_type=types.INTERP_TRIANGULAR)
mirror = False
images = fn.crop_mirror_normalize(images.gpu(),
dtype=types.FLOAT,
output_layout="CHW",
crop=(crop, crop),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
mirror=mirror)
labels = labels.gpu()
return images, labels
class DALIClassificationIteratorWrapper(DALIClassificationIterator):
"""Wrap it to return tuple instead of dictionary, and to squeeze the labels.
"""
def __init__(self, *kargs, is_train=True, **kvargs):
super().__init__(*kargs, **kvargs)
self._is_train = is_train
def __next__(self):
out = super().__next__()
# DDP is used so only one pipeline per process
# also we need to transform dict returned by DALIClassificationIterator to iterable
# and squeeze the labels
out = out[0]
# TD [2021-08-28] Without .clone(), I get garbage results (acc=0.1%).
# I think it's because DALI replaces the buffer content with the next batch before the
# backward pass.
return out['data'].clone(), out['label'].squeeze(-1).long().clone()
# HACK: TD [2021-08-29] Pytorch-lightning relies on the length of the dataloader to count
# how many times to advance the dataloader (but only for evaluation). However, DALI iterator
# requires advancing to the end every time before resetting.
# So there's a bug here. Suppose that there are 10 batches.
# PL will only advance 10 times, but DALI iterator requires calling __next__ 11 times. On the 11th
# time, the DALI iterator will raise StopIteration. This means that from PL's perspective,
# the val loader has 10 batches on epoch 1, but 0 batches on epoch 2, and 10 batches on epoch 3,
# etc. As a result, validation is run every 2 epochs instead of every epoch.
# We fake the length (increase by 1) to trick PL into calling __next__ 11 times each epoch, so
# that it plays well with DALI iterator.
def __len__(self):
return super().__len__() + int(not self._is_train)
def get_dali_loader(data_dir, crop, size, is_train, batch_size, shuffle, drop_last, num_threads,
device_id, shard_id, num_shards, dali_cpu):
pipe = create_dali_pipeline(data_dir=data_dir, crop=crop, size=size, is_train=is_train,
batch_size=batch_size, shuffle=shuffle, seed=12 + device_id,
num_threads=num_threads, device_id=device_id, shard_id=shard_id,
num_shards=num_shards, dali_cpu=dali_cpu)
pipe.build()
last_batch_policy = LastBatchPolicy.DROP if drop_last else LastBatchPolicy.PARTIAL
return DALIClassificationIteratorWrapper(pipe, is_train=is_train, reader_name="Reader",
last_batch_policy=last_batch_policy, auto_reset=True)
| fly-master | src/datamodules/imagenet_dali_loader.py |
# Copied from https://github.com/jotaf98/simple-tar-dataset/blob/master/tardataset.py
import tarfile
from io import BytesIO
from PIL import Image, ImageFile
from torch.utils.data import Dataset, get_worker_info
try: # make torchvision optional
from torchvision.transforms.functional import to_tensor
except:
to_tensor = None
ImageFile.LOAD_TRUNCATED_IMAGES = True
class TarDataset(Dataset):
"""Dataset that supports Tar archives (uncompressed).
Args:
archive (string or TarDataset): Path to the Tar file containing the dataset.
Alternatively, pass in a TarDataset object to reuse its cached information;
this is useful for loading different subsets within the same archive.
extensions (tuple): Extensions (strings starting with a dot), only files
with these extensions will be iterated. Default: png/jpg/jpeg.
is_valid_file (callable): Optional function that takes file information as
input (tarfile.TarInfo) and outputs True for files that need to be
iterated; overrides extensions argument.
Example: lambda m: m.isfile() and m.name.endswith('.png')
transform (callable): Function applied to each image by __getitem__ (see
torchvision.transforms). Default: ToTensor (convert PIL image to tensor).
Attributes:
members_by_name (dict): Members (files and folders) found in the Tar archive,
with their names as keys and their tarfile.TarInfo structures as values.
samples (list): Items to iterate (can be ignored by overriding __getitem__
and __len__).
Author: Joao F. Henriques
"""
def __init__(self, archive, transform=to_tensor, extensions=('.png', '.jpg', '.jpeg'),
is_valid_file=None):
if not isinstance(archive, TarDataset):
# open tar file. in a multiprocessing setting (e.g. DataLoader workers), we
# have to open one file handle per worker (stored as the tar_obj dict), since
# when the multiprocessing method is 'fork', the workers share this TarDataset.
# we want one file handle per worker because TarFile is not thread-safe.
worker = get_worker_info()
worker = worker.id if worker else None
self.tar_obj = {worker: tarfile.open(archive)}
self.archive = archive
# store headers of all files and folders by name
members = sorted(self.tar_obj[worker].getmembers(), key=lambda m: m.name)
self.members_by_name = {m.name: m for m in members}
else:
# passed a TarDataset into the constructor, reuse the same tar contents.
# no need to copy explicitly since this dict will not be modified again.
self.members_by_name = archive.members_by_name
self.archive = archive.archive # the original path to the Tar file
self.tar_obj = {} # will get filled by get_file on first access
# also store references to the iterated samples (a subset of the above)
self.filter_samples(is_valid_file, extensions)
self.transform = transform
def filter_samples(self, is_valid_file=None, extensions=('.png', '.jpg', '.jpeg')):
"""Filter the Tar archive's files/folders to obtain the list of samples.
Args:
extensions (tuple): Extensions (strings starting with a dot), only files
with these extensions will be iterated. Default: png/jpg/jpeg.
is_valid_file (callable): Optional function that takes file information as
input (tarfile.TarInfo) and outputs True for files that need to be
iterated; overrides extensions argument.
Example: lambda m: m.isfile() and m.name.endswith('.png')
"""
# by default, filter files by extension
if is_valid_file is None:
def is_valid_file(m):
return (m.isfile() and m.name.lower().endswith(extensions))
# filter the files to create the samples list
self.samples = [m.name for m in self.members_by_name.values() if is_valid_file(m)]
def __getitem__(self, index):
"""Return a single sample.
Should be overriden by a subclass to support custom data other than images (e.g.
class labels). The methods get_image/get_file can be used to read from the Tar
archive, and a dict of files/folders is held in the property members_by_name.
By default, this simply applies the given transforms or converts the image to
a tensor if none are specified.
Args:
index (int): Index of item.
Returns:
Tensor: The image.
"""
image = self.get_image(self.samples[index], pil=True)
image = image.convert('RGB') # if it's grayscale, convert to RGB
if self.transform: # apply any custom transforms
image = self.transform(image)
return image
def __len__(self):
"""Return the length of the dataset (length of self.samples)
Returns:
int: Number of samples.
"""
return len(self.samples)
def get_image(self, name, pil=False):
"""Read an image from the Tar archive, returned as a PIL image or PyTorch tensor.
Args:
name (str): File name to retrieve.
pil (bool): If true, a PIL image is returned (default is a PyTorch tensor).
Returns:
Image or Tensor: The image, possibly in PIL format.
"""
image = Image.open(BytesIO(self.get_file(name).read()))
if pil:
return image
return to_tensor(image)
def get_text_file(self, name, encoding='utf-8'):
"""Read a text file from the Tar archive, returned as a string.
Args:
name (str): File name to retrieve.
encoding (str): Encoding of file, default is utf-8.
Returns:
str: Content of text file.
"""
return self.get_file(name).read().decode(encoding)
def get_file(self, name):
"""Read an arbitrary file from the Tar archive.
Args:
name (str): File name to retrieve.
Returns:
io.BufferedReader: Object used to read the file's content.
"""
# ensure a unique file handle per worker, in multiprocessing settings
worker = get_worker_info()
worker = worker.id if worker else None
if worker not in self.tar_obj:
self.tar_obj[worker] = tarfile.open(self.archive)
return self.tar_obj[worker].extractfile(self.members_by_name[name])
def __del__(self):
"""Close the TarFile file handles on exit."""
for o in self.tar_obj.values():
o.close()
def __getstate__(self):
"""Serialize without the TarFile references, for multiprocessing compatibility."""
state = dict(self.__dict__)
state['tar_obj'] = {}
return state
| fly-master | src/datamodules/datasets/tardataset.py |
# Copied from https://github.com/stanford-crfm/mistral/blob/main/src/corpora/detokenization.py
# Which was originally from https://github.com/NVIDIA/Megatron-LM/blob/aed2f75e209e525c842aec7c044af7acae2a4614/tasks/zeroshot_gpt/detokenizer.py
"""
Handle detokenization for different dataset for zero-shot LM evaluation.
"""
import re
def wikitext_detokenize(string: str) -> str:
"""
Wikitext is whitespace tokenized and we remove these whitespaces.
Taken from https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt2/detokenizer.py
"""
# Contractions
string = string.replace("s '", "s'")
string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
# Number Separators
string = string.replace(" @-@ ", "-")
string = string.replace(" @,@ ", ",")
string = string.replace(" @.@ ", ".")
# Punctuation
string = string.replace(" : ", ": ")
string = string.replace(" ; ", "; ")
string = string.replace(" . ", ". ")
string = string.replace(" ! ", "! ")
string = string.replace(" ? ", "? ")
string = string.replace(" , ", ", ")
# Double Brackets
string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
# Miscellaneous
string = string.replace("= = = =", "====")
string = string.replace("= = =", "===")
string = string.replace("= =", "==")
string = string.replace(" " + chr(176) + " ", chr(176))
string = string.replace(" \n", "\n")
string = string.replace("\n ", "\n")
string = string.replace(" N ", " 1 ")
string = string.replace(" 's", "'s")
return string
# Set Registry for Various Datasets
DATASET_TOKENIZATION_REGISTRY = {"wikitext": wikitext_detokenize}
| fly-master | src/datamodules/datasets/detokenizer.py |
from typing import Any, List, Dict, Tuple, Optional, Callable, cast
import logging
import time
import pickle
from pathlib import Path, PurePath
from PIL import Image
from fs.tarfs import TarFS
from fs.zipfs import ZipFS
from torch.utils.data import get_worker_info
from torchvision.datasets import ImageFolder
from torchvision.datasets.folder import has_file_allowed_extension
# https://www.python.org/dev/peps/pep-0616/
def removeprefix(self: str, prefix: str, /) -> str:
if self.startswith(prefix):
return self[len(prefix):]
else:
return self[:]
class ArchiveImageFolder(ImageFolder):
"""Dataset that supports Tar/Zip archives (uncompressed), with a folder per class.
Similarly to torchvision.datasets.ImageFolder, assumes that the images inside
the Tar archive are arranged in this way by default:
root/dog/xxx.png
root/dog/xxy.png
root/dog/[...]/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/[...]/asd932_.png
Args:
archive (string or TarDataset): Path to the Tar file containing the dataset.
Alternatively, pass in a TarDataset object to reuse its cached information;
this is useful for loading different subsets within the same archive.
root_in_archive (string): Root folder within the archive, directly below
the folders with class names.
extensions (tuple): Extensions (strings starting with a dot), only files
with these extensions will be iterated. Default: png/jpg/jpeg.
is_valid_file (callable): Optional function that takes file information as
input (tarfile.TarInfo) and outputs True for files that need to be
iterated; overrides extensions argument.
Example: lambda m: m.isfile() and m.name.endswith('.png')
transform (callable): Function applied to each image by __getitem__ (see
torchvision.transforms). Default: ToTensor (convert PIL image to tensor).
Attributes:
samples (list): Image file names to iterate.
targets (list): Numeric label corresponding to each image.
class_to_idx (dict): Maps class names to numeric labels.
idx_to_class (dict): Maps numeric labels to class names.
"""
def __init__(
self,
archive: str,
cache_dir: Optional[str] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
root_in_archive: str = '',
) -> None:
assert archive.endswith('.tar') or archive.endswith('.zip'), 'Only .tar and .zip are supported'
self._fs_cls = TarFS if archive.endswith('.tar') else ZipFS
self.root_in_archive = PurePath(root_in_archive)
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
# open tar/zip file. in a multiprocessing setting (e.g. DataLoader workers), we
# have to open one file handle per worker (stored as the tar_obj dict), since
# when the multiprocessing method is 'fork', the workers share this TarDataset.
# we want one file handle per worker because TarFile is not thread-safe.
# As done in https://github.com/jotaf98/simple-tar-dataset/blob/master/tardataset.py
logger = logging.getLogger(__name__)
logger.info(f'Reading archive headers from {str(archive)} with root_in_archive {root_in_archive}')
t = time.time()
worker = get_worker_info()
worker = worker.id if worker else None
self.archive_fs = {worker: self._fs_cls(str(Path(archive).expanduser()))}
super().__init__(archive, loader=None, transform=transform,
target_transform=target_transform, is_valid_file=is_valid_file)
logger.info(f'Done in {float(time.time() - t):.1f} seconds.')
def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]:
"""Finds the class folders in a dataset.
See :class:`DatasetFolder` for details.
"""
# We ignore directory and assume that directory == self.root
if self.cache_dir is not None:
try:
return load_from_cache(self.cache_dir / self._cache_dir_name / 'classes.pkl')
except FileNotFoundError:
pass
archive_fs = self.get_archive_fs().opendir(str(self.root_in_archive))
classes = sorted(entry.name for entry in archive_fs.scandir('/') if entry.is_dir)
if not classes:
raise FileNotFoundError(f"Couldn't find any class folder in {str(self.root_in_archive)} inside {self.root}.")
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
if self.cache_dir is not None:
save_to_cache(self.cache_dir / self._cache_dir_name / 'classes.pkl',
(classes, class_to_idx))
return classes, class_to_idx
# Adapted from https://github.com/pytorch/vision/blob/main/torchvision/datasets/folder.py
def make_dataset(
self,
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
Args:
directory (str): archive dataset directory, corresponding to ``self.archive``.
class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
extensions (optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``class_to_idx`` is empty.
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
FileNotFoundError: In case no valid file was found for any class.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
# We ignore directory and assume that directory == self.root
if self.cache_dir is not None:
try:
return load_from_cache(self.cache_dir / self._cache_dir_name / 'samples.pkl')
except FileNotFoundError:
pass
if class_to_idx is None:
_, class_to_idx = self.find_classes(directory)
elif not class_to_idx:
raise ValueError("'class_to_index' must have at least one entry to collect any samples.")
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
archive_fs = self.get_archive_fs().opendir(str(self.root_in_archive))
instances = []
available_classes = set()
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir_info = archive_fs.getinfo(target_class)
if not target_dir_info.is_dir:
continue
for root, _, fnames in sorted(archive_fs.walk(target_class)):
# root starts with '/' because it's the root in this directory
# That messes up the path joining, so we remove the '/'
root = removeprefix(root, '/')
for fname in sorted(fnames, key=lambda info: info.name):
if is_valid_file(fname.name):
path = self.root_in_archive / root / fname.name
item = str(path), class_index
instances.append(item)
if target_class not in available_classes:
available_classes.add(target_class)
empty_classes = set(class_to_idx.keys()) - available_classes
if empty_classes:
msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. "
if extensions is not None:
msg += f"Supported extensions are: {', '.join(extensions)}"
raise FileNotFoundError(msg)
if self.cache_dir is not None:
save_to_cache(self.cache_dir / self._cache_dir_name / 'samples.pkl', instances)
return instances
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
with self.get_archive_fs().openbin(path) as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
@property
def _cache_dir_name(self):
return f'root_in_archive-{str(self.root_in_archive)}'
def get_archive_fs(self):
worker = get_worker_info()
worker = worker.id if worker else None
if worker not in self.archive_fs:
self.archive_fs[worker] = self._fs_cls(str(Path(self.root).expanduser()))
return self.archive_fs[worker]
def __del__(self):
"""Close the TarFile file handles on exit."""
for o in self.archive_fs.values():
o.close()
def __getstate__(self):
"""Serialize without the TarFile references, for multiprocessing compatibility."""
state = dict(self.__dict__)
state['archive_fs'] = {}
return state
def save_to_cache(path, obj):
path = Path(path)
logger = logging.getLogger(__name__)
logger.info(f'Saving to cache at {str(path)}')
path.parent.mkdir(exist_ok=True)
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_from_cache(path):
path = Path(path)
if not path.is_file():
raise FileNotFoundError(f'File {str(path)} not found')
logger = logging.getLogger(__name__)
logger.info(f'Load from cache at {str(path)}')
with open(path, 'rb') as f:
return pickle.load(f)
| fly-master | src/datamodules/datasets/archive_imagefolder.py |
fly-master | src/datamodules/datasets/__init__.py |
|
# Copied from https://github.com/jotaf98/simple-tar-dataset/blob/master/tarimagefolder.py
from .tardataset import TarDataset
try: # make torchvision optional
from torchvision.transforms.functional import to_tensor
except:
to_tensor = None
class TarImageFolder(TarDataset):
"""Dataset that supports Tar archives (uncompressed), with a folder per class.
Similarly to torchvision.datasets.ImageFolder, assumes that the images inside
the Tar archive are arranged in this way by default:
root/dog/xxx.png
root/dog/xxy.png
root/dog/[...]/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/[...]/asd932_.png
Args:
archive (string or TarDataset): Path to the Tar file containing the dataset.
Alternatively, pass in a TarDataset object to reuse its cached information;
this is useful for loading different subsets within the same archive.
root_in_archive (string): Root folder within the archive, directly below
the folders with class names.
extensions (tuple): Extensions (strings starting with a dot), only files
with these extensions will be iterated. Default: png/jpg/jpeg.
is_valid_file (callable): Optional function that takes file information as
input (tarfile.TarInfo) and outputs True for files that need to be
iterated; overrides extensions argument.
Example: lambda m: m.isfile() and m.name.endswith('.png')
transform (callable): Function applied to each image by __getitem__ (see
torchvision.transforms). Default: ToTensor (convert PIL image to tensor).
Attributes:
samples (list): Image file names to iterate.
targets (list): Numeric label corresponding to each image.
class_to_idx (dict): Maps class names to numeric labels.
idx_to_class (dict): Maps numeric labels to class names.
members_by_name (dict): Members (files and folders) found in the Tar archive,
with their names as keys and their tarfile.TarInfo structures as values.
Author: Joao F. Henriques
"""
def __init__(self, archive, transform=to_tensor, extensions=('.png', '.jpg', '.jpeg'),
is_valid_file=None, root_in_archive=''):
# ensure the root path ends with a slash
if root_in_archive and not root_in_archive.endswith('/'):
root_in_archive = root_in_archive + '/'
self.root_in_archive = root_in_archive
# load the archive meta information, and filter the samples
super().__init__(archive=archive, transform=transform, is_valid_file=is_valid_file)
# assign a label to each image, based on its top-level folder name
self.class_to_idx = {}
self.targets = []
for filename in self.samples:
# extract the class name from the file's path inside the Tar archive
if self.root_in_archive:
assert filename.startswith(root_in_archive) # sanity check (filter_samples should ensure this)
filename = filename[len(root_in_archive):] # make path relative to root
(class_name, _, _) = filename.partition('/') # first folder level
# assign increasing label indexes to each class name
label = self.class_to_idx.setdefault(class_name, len(self.class_to_idx))
self.targets.append(label)
if len(self.class_to_idx) == 0:
raise IOError("No classes (top-level folders) were found with the given criteria. The given\n"
"extensions, is_valid_file or root_in_archive are too strict, or the archive is empty.")
elif len(self.class_to_idx) == 1:
raise IOError(f"Only one class (top-level folder) was found: {next(iter(self.class_to_idx))}.\n"
f"To choose the correct path in the archive where the label folders are located, specify\n"
f"root_in_archive in the TarImageFolder's constructor.")
# the inverse mapping is often useful
self.idx_to_class = {v: k for k, v in self.class_to_idx.items()}
def filter_samples(self, is_valid_file=None, extensions=('.png', '.jpg', '.jpeg')):
"""In addition to TarDataset's filtering by extension (or user-supplied),
filter further to select only samples within the given root path."""
super().filter_samples(is_valid_file, extensions)
self.samples = [filename for filename in self.samples if filename.startswith(self.root_in_archive)]
def __getitem__(self, index):
"""Return a single sample.
By default, this simply applies the given transforms or converts the image to
a tensor if none are specified.
Args:
index (int): Index of item.
Returns:
tuple[Tensor, int]: The image and the corresponding label index.
"""
image = self.get_image(self.samples[index], pil=True)
image = image.convert('RGB') # if it's grayscale, convert to RGB
if self.transform: # apply any custom transforms
image = self.transform(image)
label = self.targets[index]
return (image, label)
| fly-master | src/datamodules/datasets/tarimagefolder.py |
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self, tokens, seq_len, drop_last=True):
"""tokens should be a numpy array
"""
self.seq_len = seq_len
ntokens = len(tokens)
if drop_last:
ntokens = ((ntokens - 1) // seq_len) * seq_len + 1
self.ntokens = ntokens
# We're careful not to slice tokens, since it could be a memmap'ed array or H5 dataset,
# and slicing would load it to memory.
self.tokens = tokens
self.total_sequences = math.ceil((self.ntokens - 1) / self.seq_len)
def __len__(self):
return self.total_sequences
def __getitem__(self, idx):
start_idx = idx * self.seq_len
seq_len = min(self.seq_len, self.ntokens - 1 - start_idx)
data = torch.as_tensor(self.tokens[start_idx:(start_idx + seq_len + 1)].astype(np.int64))
return data[:-1], data[1:].clone()
| fly-master | src/datamodules/datasets/lm_dataset.py |
# Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/vocabulary.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from collections import Counter
from collections import OrderedDict
import torch
from src.utils.distributed import sync_workers
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose:
print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
# Class OpenAIVocab has been adapted from
# https://github.com/cybertronai/transformer-xl/blob/master/utils/vocabulary.py
class OpenAIVocab(Vocab):
def __init__(self, max_size=None, vocab_file=None):
from transformers import GPT2Tokenizer
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
self.EOT = self.tokenizer.encoder['<|endoftext|>']
self.max_size = max_size
self.vocab_file = vocab_file
pad = 8
vocab_size = len(self.tokenizer)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:09d}'
self.tokenizer.add_tokens([token])
def __len__(self):
return len(self.tokenizer)
def count_file(self, path, verbose=False, add_eos=False):
# TODO: train from scratch, respect self.max_size
pass
def build_vocab(self):
pass
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False) -> torch.LongTensor:
cached = path + '.bpe'
if os.path.exists(cached):
return torch.load(cached)
print(f'encoding file {path} ...')
assert os.path.exists(path), f"{path} doesn't exist"
with open(path, encoding='utf-8') as f:
# Suppress warnings about length.
with open(os.devnull, "w") as devnull, contextlib.redirect_stderr(devnull):
out = torch.LongTensor(self.tokenizer.encode(f.read()) + [self.EOT])
with sync_workers() as rank:
if rank == 0:
torch.save(out, cached)
return out
def tokenize(self, line, add_eos=False, add_double_eos=False):
return self.tokenizer.encode(line)
def convert_to_tensor(self, symbols):
return torch.LongTensor(symbols)
| fly-master | src/datamodules/datasets/vocabulary.py |
import torch
from torch.optim import Optimizer
def InvSqrt(optimizer: Optimizer, num_warmup_steps: int):
""" Originally used for Transformer (in Attention is all you need)
We use the formula from the original paper.
Refer to other implementations:
- Nvidia: https://github.com/NVIDIA/DeepLearningExamples/blob/233287038c96734bf5c94a3adf5f3d08f54838d8/PyTorch/LanguageModeling/Transformer-XL/pytorch/train.py#L915
- LRA: https://github.com/google-research/long-range-arena/blob/264227cbf9591e39dd596d2dc935297a2070bdfe/lra_benchmarks/utils/train_utils.py#L87
Note that the max learning rate is then original_lr / num_warmup_steps ** 0.5,
*not* original_lr.
Fairseq has a different implementation where the max learning rate is original_lr (I think):
https://github.com/pytorch/fairseq/blob/master/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
"""
def lr_lambda(current_step):
# return a multiplier instead of a learning rate
if current_step == 0 and num_warmup_steps == 0:
return 1.
else:
return (1. / (current_step ** 0.5) if current_step > num_warmup_steps
else current_step / (num_warmup_steps ** 1.5))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
| fly-master | src/optim/lr_scheduler.py |
import inspect
import torch.nn as nn
import hydra
try:
from apex.contrib.layer_norm import FastLayerNorm
except ImportError:
FastLayerNorm = None
from src.models.modules.seq_common import PositionalEncoding
def group_parameters_for_optimizer(model, optimizer_cfg, bias_weight_decay=False,
normalization_weight_decay=False):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Get the weight decay from the config, or from the default value of the optimizer constructor
# if it's not specified in the config.
if 'weight_decay' in optimizer_cfg:
weight_decay = optimizer_cfg.weight_decay
else:
# https://stackoverflow.com/questions/12627118/get-a-function-arguments-default-value
signature = inspect.signature(hydra.utils.get_class(optimizer_cfg._target_))
if 'weight_decay' in signature.parameters:
weight_decay = signature.parameters['weight_decay'].default
if weight_decay is inspect.Parameter.empty:
weight_decay = 0.0
else:
weight_decay = 0.0
# If none of the parameters have weight decay anyway, and there are no parameters with special
# optimization params
if weight_decay == 0.0 and not any(hasattr(p, '_optim') for p in model.parameters()):
return model.parameters()
skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay') else set()
skip_keywords = (model.no_weight_decay_keywords() if hasattr(model, 'no_weight_decay_keywords')
else set())
# Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L134
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
special = set()
whitelist_weight_modules = (nn.Linear, )
blacklist_weight_modules = (nn.Embedding, PositionalEncoding)
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
if FastLayerNorm is not None:
blacklist_weight_modules += (FastLayerNorm,)
param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
# In case of parameter sharing, some parameters show up here but are not in
# param_dict.keys()
if not p.requires_grad or fpn not in param_dict:
continue # frozen weights
if hasattr(p, '_optim'):
special.add(fpn)
elif fpn in skip or any(skip_keyword in fpn for skip_keyword in skip_keywords):
no_decay.add(fpn)
elif getattr(p, '_no_weight_decay', False):
no_decay.add(fpn)
elif not bias_weight_decay and pn.endswith('bias'):
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
decay |= (param_dict.keys() - no_decay - special)
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, f"Parameters {str(inter_params)} made it into both decay/no_decay sets!"
assert len(param_dict.keys() - special - union_params) == 0, f"parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
if weight_decay == 0.0 or not no_decay:
param_groups = [{"params": [param_dict[pn] for pn in sorted(list(no_decay | decay))],
"weight_decay": weight_decay}]
else:
# We need sorted(list()) so that the order is deterministic. Otherwise when we resume
# the order could change and resume will fail. [H/t Albert]
param_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# Add parameters with special hyperparameters
# Unique dicts
hps = [dict(s) for s in set(frozenset(param_dict[pn]._optim.items()) for pn in special)]
for hp in hps:
params = [param_dict[pn] for pn in sorted(list(special)) if param_dict[pn]._optim == hp]
param_groups.append({"params": params, **hp})
return param_groups
| fly-master | src/optim/param_grouping.py |
import torch
from torch.optim import Optimizer
from timm.scheduler import CosineLRScheduler
# We need to subclass torch.optim.lr_scheduler._LRScheduler, or Pytorch-lightning will complain
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
""" Wrap timm.scheduler.CosineLRScheduler so we can call scheduler.step() without passing in epoch.
It supports resuming as well.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_epoch = -1
self.step(epoch=0)
def step(self, epoch=None):
if epoch is None:
self._last_epoch += 1
else:
self._last_epoch = epoch
# We call either step or step_update, depending on whether we're using the scheduler every
# epoch or every step.
# Otherwise, lightning will always call step (i.e., meant for each epoch), and if we set
# scheduler interval to "step", then the learning rate update will be wrong.
if self.t_in_epochs:
super().step(epoch=self._last_epoch)
else:
super().step_update(num_updates=self._last_epoch)
| fly-master | src/optim/timm_lr_scheduler.py |
# Credits to DeepVoltaire
# github:DeepVoltaire/AutoAugment
from PIL import Image, ImageEnhance, ImageOps
import random
class ShearX(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=self.fillcolor)
class ShearY(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=self.fillcolor)
class TranslateX(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, 0, magnitude * x.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=self.fillcolor)
class TranslateY(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * x.size[1] * random.choice([-1, 1])),
fillcolor=self.fillcolor)
class Rotate(object):
# from https://stackoverflow.com/questions/
# 5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def __call__(self, x, magnitude):
rot = x.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(x.mode)
class Color(object):
def __call__(self, x, magnitude):
return ImageEnhance.Color(x).enhance(1 + magnitude * random.choice([-1, 1]))
class Posterize(object):
def __call__(self, x, magnitude):
return ImageOps.posterize(x, magnitude)
class Solarize(object):
def __call__(self, x, magnitude):
return ImageOps.solarize(x, magnitude)
class Contrast(object):
def __call__(self, x, magnitude):
return ImageEnhance.Contrast(x).enhance(1 + magnitude * random.choice([-1, 1]))
class Sharpness(object):
def __call__(self, x, magnitude):
return ImageEnhance.Sharpness(x).enhance(1 + magnitude * random.choice([-1, 1]))
class Brightness(object):
def __call__(self, x, magnitude):
return ImageEnhance.Brightness(x).enhance(1 + magnitude * random.choice([-1, 1]))
class AutoContrast(object):
def __call__(self, x, magnitude):
return ImageOps.autocontrast(x)
class Equalize(object):
def __call__(self, x, magnitude):
return ImageOps.equalize(x)
class Invert(object):
def __call__(self, x, magnitude):
return ImageOps.invert(x)
| fly-master | src/utils/transforms.py |
from itertools import repeat
import collections.abc
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/helpers.py
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
| fly-master | src/utils/tuples.py |
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
| fly-master | src/utils/gpu_affinity.py |
import re
from pathlib import Path
import torch
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
is_deepspeed = False
if path.is_dir(): # DeepSpeed checkpoint
is_deepspeed = True
latest_path = path / 'latest'
if latest_path.is_file():
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
path /= f'{tag}/mp_rank_00_model_states.pt'
state_dict = torch.load(path, map_location=device)
if is_deepspeed:
state_dict = state_dict['module']
# Replace the names of some of the submodules
def key_mapping(key):
return re.sub(r'^module.model.', '', key)
state_dict = {key_mapping(k): v for k, v in state_dict.items()}
return state_dict
def blockdiag_to_dense_mlp_bert(state_dict):
from src.ops.blockdiag_multiply import blockdiag_weight_to_dense_weight
names = {name for name in state_dict
if re.match('bert.encoder.layer.(\d+).(mlp.fc(1|2)|(intermediate|output).dense).weight',
name)}
for name in names:
state_dict[name] = blockdiag_weight_to_dense_weight(state_dict[name])
return state_dict
| fly-master | src/utils/checkpoint.py |
# Copied from https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py
from __future__ import division
from __future__ import unicode_literals
from typing import Iterable, Optional
import weakref
import copy
import contextlib
import torch
def to_float_maybe(x):
return x.float() if x.dtype in [torch.float16, torch.bfloat16] else x
# Partially based on:
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter` (typically from
`model.parameters()`).
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
def __init__(
self,
parameters: Iterable[torch.nn.Parameter],
decay: float,
use_num_updates: bool = True
):
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
parameters = list(parameters)
self.shadow_params = [to_float_maybe(p.clone().detach())
for p in parameters if p.requires_grad]
self.collected_params = None
# By maintaining only a weakref to each parameter,
# we maintain the old GC behaviour of ExponentialMovingAverage:
# if the model goes out of scope but the ExponentialMovingAverage
# is kept, no references to the model or its parameters will be
# maintained, and the model will be cleaned up.
self._params_refs = [weakref.ref(p) for p in parameters]
def _get_parameters(
self,
parameters: Optional[Iterable[torch.nn.Parameter]]
) -> Iterable[torch.nn.Parameter]:
if parameters is None:
parameters = [p() for p in self._params_refs]
if any(p is None for p in parameters):
raise ValueError(
"(One of) the parameters with which this "
"ExponentialMovingAverage "
"was initialized no longer exists (was garbage collected);"
" please either provide `parameters` explicitly or keep "
"the model to which they belong from being garbage "
"collected."
)
return parameters
else:
parameters = list(parameters)
if len(parameters) != len(self.shadow_params):
raise ValueError(
"Number of parameters passed as argument is different "
"from number of shadow parameters maintained by this "
"ExponentialMovingAverage"
)
return parameters
def update(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = self._get_parameters(parameters)
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(
decay,
(1 + self.num_updates) / (10 + self.num_updates)
)
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
torch.lerp(s_param, param.to(dtype=s_param.dtype), one_minus_decay, out=s_param)
def copy_to(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Copy current averaged parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = self._get_parameters(parameters)
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored. If `None`, the parameters of with which this
`ExponentialMovingAverage` was initialized will be used.
"""
parameters = self._get_parameters(parameters)
self.collected_params = [
param.clone()
for param in parameters
if param.requires_grad
]
def restore(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
if self.collected_params is None:
raise RuntimeError(
"This ExponentialMovingAverage has no `store()`ed weights "
"to `restore()`"
)
parameters = self._get_parameters(parameters)
for c_param, param in zip(self.collected_params, parameters):
if param.requires_grad:
param.data.copy_(c_param.data)
@contextlib.contextmanager
def average_parameters(
self,
parameters: Optional[Iterable[torch.nn.Parameter]] = None
):
r"""
Context manager for validation/inference with averaged parameters.
Equivalent to:
ema.store()
ema.copy_to()
try:
...
finally:
ema.restore()
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = self._get_parameters(parameters)
self.store(parameters)
self.copy_to(parameters)
try:
yield
finally:
self.restore(parameters)
def to(self, device=None, dtype=None) -> None:
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
Args:
device: like `device` argument to `torch.Tensor.to`
"""
# .to() on the tensors handles None correctly
self.shadow_params = [
p.to(device=device, dtype=dtype)
if p.is_floating_point()
else p.to(device=device)
for p in self.shadow_params
]
if self.collected_params is not None:
self.collected_params = [
p.to(device=device, dtype=dtype)
if p.is_floating_point()
else p.to(device=device)
for p in self.collected_params
]
return
def state_dict(self) -> dict:
r"""Returns the state of the ExponentialMovingAverage as a dict."""
# Following PyTorch conventions, references to tensors are returned:
# "returns a reference to the state and not its copy!" -
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict
return {
"decay": self.decay,
"num_updates": self.num_updates,
"shadow_params": self.shadow_params,
"collected_params": self.collected_params
}
def load_state_dict(self, state_dict: dict) -> None:
r"""Loads the ExponentialMovingAverage state.
Args:
state_dict (dict): EMA state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = copy.deepcopy(state_dict)
self.decay = state_dict["decay"]
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.num_updates = state_dict["num_updates"]
assert self.num_updates is None or isinstance(self.num_updates, int), \
"Invalid num_updates"
self.shadow_params = state_dict["shadow_params"]
assert isinstance(self.shadow_params, list), \
"shadow_params must be a list"
assert all(
isinstance(p, torch.Tensor) for p in self.shadow_params
), "shadow_params must all be Tensors"
self.collected_params = state_dict["collected_params"]
if self.collected_params is not None:
assert isinstance(self.collected_params, list), \
"collected_params must be a list"
assert all(
isinstance(p, torch.Tensor) for p in self.collected_params
), "collected_params must all be Tensors"
assert len(self.collected_params) == len(self.shadow_params), \
"collected_params and shadow_params had different lengths"
if len(self.shadow_params) == len(self._params_refs):
# Consistent with torch.optim.Optimizer, cast things to consistent
# device and dtype with the parameters
params = [p() for p in self._params_refs]
# If parameters have been garbage collected, just load the state
# we were given without change.
if not any(p is None for p in params):
# ^ parameter references are still good
for i, p in enumerate(params):
self.shadow_params[i] = to_float_maybe(self.shadow_params[i].to(
device=p.device, dtype=p.dtype
))
if self.collected_params is not None:
self.collected_params[i] = self.collected_params[i].to(
device=p.device, dtype=p.dtype
)
else:
raise ValueError(
"Tried to `load_state_dict()` with the wrong number of "
"parameters in the saved state."
)
| fly-master | src/utils/ema.py |
fly-master | src/utils/__init__.py |
|
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
import torch
try:
from deepspeed.profiling.flops_profiler import get_model_profile
has_deepspeed_profiling = True
except ImportError as e:
has_deepspeed_profiling = False
try:
from fvcore.nn import FlopCountAnalysis, flop_count_str, flop_count_table
from fvcore.nn import ActivationCountAnalysis
has_fvcore_profiling = True
except ImportError as e:
FlopCountAnalysis = None
ActivationCountAnalysis = None
has_fvcore_profiling = False
def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False):
macs, _ = get_model_profile(
model=model,
input_res=(batch_size,) + input_size, # input shape or input to the input_constructor
input_constructor=None, # if specified, a constructor taking input_res is used as input to the model
print_profile=detailed, # prints the model graph with the measured profile attached to each module
detailed=detailed, # print the detailed profile
warm_up=10, # the number of warm-ups before measuring the time of each module
as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k)
output_file=None, # path to the output file. If None, the profiler prints to stdout.
ignore_modules=None) # the list of modules to ignore in the profiling
return macs, 0 # no activation count in DS
def profile_fvcore(model, input_size=(3, 224, 224), input_dtype=torch.float32, max_depth=4,
batch_size=1, detailed=False, force_cpu=False):
if force_cpu:
model = model.to('cpu')
device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
example_input = torch.ones((batch_size,) + input_size, device=device, dtype=input_dtype)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
print(flop_count_table(fca, max_depth=max_depth))
return fca, fca.total(), aca, aca.total()
| fly-master | src/utils/flops.py |
# Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/distributed.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
import torch
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
torch.distributed.init_process_group(backend=backend,
init_method='env://')
assert torch.distributed.is_initialized()
return distributed
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def all_reduce_item(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor.item()
else:
ret = value
return ret
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
| fly-master | src/utils/distributed.py |
import logging
import warnings
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
def get_logger(name=__name__) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
def extras(config: DictConfig) -> None:
"""A couple of optional utilities, controlled by main config file:
- disabling warnings
- forcing debug friendly configuration
- verifying experiment name is set when running in experiment mode
Modifies DictConfig in place.
Args:
config (DictConfig): Configuration composed by Hydra.
"""
log = get_logger(__name__)
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
# verify experiment name is set when running in experiment mode
if config.get("experiment_mode") and not config.get("name"):
log.info(
"Running in experiment mode without the experiment name specified! "
"Use `python run.py mode=exp name=experiment_name`"
)
log.info("Exiting...")
exit()
# force debugger friendly configuration if <config.trainer.fast_dev_run=True>
# debuggers don't like GPUs and multiprocessing
if config.trainer.get("fast_dev_run"):
log.info("Forcing debugger friendly configuration! <config.trainer.fast_dev_run=True>")
if config.trainer.get("gpus"):
config.trainer.gpus = 0
if config.datamodule.get("pin_memory"):
config.datamodule.pin_memory = False
if config.datamodule.get("num_workers"):
config.datamodule.num_workers = 0
@rank_zero_only
def print_config(
config: DictConfig,
fields: Sequence[str] = (
"trainer",
"model",
"datamodule",
"train",
"eval",
"callbacks",
"logger",
"seed",
"name",
),
resolve: bool = True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
with open("config_tree.txt", "w") as fp:
rich.print(tree, file=fp)
def finish(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
) -> None:
"""Makes sure everything closed properly."""
# without this sweeps with wandb logger might crash!
for lg in logger:
if isinstance(lg, pl.loggers.wandb.WandbLogger):
import wandb
wandb.finish()
| fly-master | src/utils/utils.py |
# Credits to DeepVoltaire
# github:DeepVoltaire/AutoAugment
import numpy as np
from src.utils.transforms import *
class ImageNetPolicy(object):
""" Randomly choose one of the best 24 Sub-policies on ImageNet.
Example:
>>> policy = ImageNetPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> ImageNetPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor),
SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor),
SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor),
SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor),
SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor),
SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor),
SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor),
SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor),
SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor),
SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor),
SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment ImageNet Policy"
class CIFAR10Policy(object):
""" Randomly choose one of the best 25 Sub-policies on CIFAR10.
Example:
>>> policy = CIFAR10Policy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> CIFAR10Policy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor),
SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor),
SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor),
SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor),
SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor),
SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor),
SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor),
SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor),
SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor),
SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor),
SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor),
SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor),
SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor),
SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor),
SubPolicy(0.2, "equalize", 8, 0.6, "equalize", 4, fillcolor),
SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor),
SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor),
SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor),
SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor),
SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor),
SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment CIFAR10 Policy"
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
func = {
"shearX": ShearX(fillcolor=fillcolor),
"shearY": ShearY(fillcolor=fillcolor),
"translateX": TranslateX(fillcolor=fillcolor),
"translateY": TranslateY(fillcolor=fillcolor),
"rotate": Rotate(),
"color": Color(),
"posterize": Posterize(),
"solarize": Solarize(),
"contrast": Contrast(),
"sharpness": Sharpness(),
"brightness": Brightness(),
"autocontrast": AutoContrast(),
"equalize": Equalize(),
"invert": Invert()
}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1:
img = self.operation1(img, self.magnitude1)
if random.random() < self.p2:
img = self.operation2(img, self.magnitude2)
return img
| fly-master | src/utils/autoaug.py |
import torch
import torch.nn.functional as F
# Adapted from https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/autopadder.py
def pad_to_multiple(tensor, multiple, dims=-1, value=0):
try:
dims = list(dims) # If dims is an iterable (e.g., List, Tuple)
except:
dims = [dims]
# convert dims from negative to positive
dims = [d if d >= 0 else tensor.ndim + d for d in dims]
padding = [0] * (2 * tensor.ndim)
for d in dims:
size = tensor.size(d)
# Pytorch's JIT doesn't like divmod
# m, remainder = divmod(size, multiple)
m = size // multiple
remainder = size - m * multiple
if remainder != 0:
padding[2 * (tensor.ndim - d - 1) + 1] = multiple - remainder
if all(p == 0 for p in padding):
return tensor
else:
return F.pad(tensor, tuple(padding), value=value)
| fly-master | src/utils/padding.py |
from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from torchmetrics.classification.accuracy import Accuracy
from src.models.modules.simple_dense_net import SimpleDenseNet
class MNISTLitModel(LightningModule):
"""
Example of LightningModule for MNIST classification.
A LightningModule organizes your PyTorch code into 5 sections:
- Computations (init).
- Train loop (training_step)
- Validation loop (validation_step)
- Test loop (test_step)
- Optimizers (configure_optimizers)
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html
"""
def __init__(
self,
input_size: int = 784,
lin1_size: int = 256,
lin2_size: int = 256,
lin3_size: int = 256,
output_size: int = 10,
lr: float = 0.001,
weight_decay: float = 0.0005,
):
super().__init__()
# this line ensures params passed to LightningModule will be saved to ckpt
# it also allows to access params with 'self.hparams' attribute
self.save_hyperparameters()
self.model = SimpleDenseNet(hparams=self.hparams)
# loss function
self.criterion = torch.nn.CrossEntropyLoss()
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
def forward(self, x: torch.Tensor):
return self.model(x)
def step(self, batch: Any):
x, y = batch
logits = self.forward(x)
loss = self.criterion(logits, y)
preds = torch.argmax(logits, dim=1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
# log train metrics
acc = self.train_accuracy(preds, targets)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
# we can return here dict with any tensors
# and then read it in some callback or in training_epoch_end() below
# remember to always return loss from training_step, or else backpropagation will fail!
return {"loss": loss, "preds": preds, "targets": targets}
def training_epoch_end(self, outputs: List[Any]):
# `outputs` is a list of dicts returned from `training_step()`
pass
def validation_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
# log val metrics
acc = self.val_accuracy(preds, targets)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("val/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def validation_epoch_end(self, outputs: List[Any]):
pass
def test_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
# log test metrics
acc = self.test_accuracy(preds, targets)
self.log("test/loss", loss, on_step=False, on_epoch=True)
self.log("test/acc", acc, on_step=False, on_epoch=True)
return {"loss": loss, "preds": preds, "targets": targets}
def test_epoch_end(self, outputs: List[Any]):
pass
def configure_optimizers(self):
"""Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
See examples here:
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
"""
return torch.optim.Adam(
params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay
)
| fly-master | src/models/mnist_model.py |
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import hydra
from einops import rearrange
if version.parse(torch.__version__) >= version.parse("1.6"):
is_amp_available = True
from torch.cuda.amp import autocast
else:
is_amp_available = False
import transformers
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from transformers.models.gpt2.modeling_gpt2 import GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING
from transformers.models.gpt2.modeling_gpt2 import PARALLELIZE_DOCSTRING, DEPARALLELIZE_DOCSTRING
from transformers.models.gpt2.modeling_gpt2 import load_tf_weights_in_gpt2
from src.ops.triton.softmax import softmax as softmax_triton
from src.models.layers.rotary import RotaryEmbedding
USE_TRITON_SOFTMAX = True
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "gpt2"
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
class GPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = getattr(config, 'head_dim', self.hidden_size // self.num_heads)
self.embed_dim = self.head_dim * self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.split_size:
raise ValueError(
f"`split_size` must be divisible by num_heads (got `split_size`: {self.split_size} and `num_heads`: {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
self.use_rotary_emb = getattr(config, 'use_rotary_emb', False)
if self.use_rotary_emb:
self.rotary_emb = RotaryEmbedding(self.head_dim)
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.hidden_size)
self.q_attn = Conv1D(self.embed_dim, self.hidden_size)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.hidden_size)
self.c_proj = Conv1D(self.hidden_size, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
q = rearrange(query, 'b h t d -> (b h) t d')
k = rearrange(key, 'b h s d -> (b h) d s')
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=query.dtype,
device=query.device)
attn_weights = rearrange(torch.baddbmm(attn_weights, q, k, beta=0, alpha=scale_factor),
'(b h) t s -> b h t s', h=num_heads)
if USE_TRITON_SOFTMAX:
attn_weights = softmax_triton(attn_weights, mask=attention_mask,
causal=not self.is_cross_attention)
else:
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
attn_weights.masked_fill_(~causal_mask, self.masked_bias)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
# Downcast (if necessary) back to V's dtype (if in mixed-precision)
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
if is_amp_available:
with autocast(enabled=False):
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
else:
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
if attn_weights.dtype != torch.float32:
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. "
"Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.use_rotary_emb:
query, key = self.rotary_emb(query, key)
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class GPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPT2Block(nn.Module):
def __init__(self, config, layer_idx=None, mlp_cfg=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPT2Attention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = GPT2Attention(config, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if mlp_cfg is None:
self.mlp = GPT2MLP(inner_dim, config)
else:
self.mlp = hydra.utils.instantiate(mlp_cfg, in_features=config.hidden_size,
hidden_features=inner_dim, drop=config.resid_pdrop,
_recursive_=False)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
"cross-attention layers by setting `config.add_cross_attention=True`"
)
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if "c_proj" in name and "weight" in name:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, GPT2Model):
module.gradient_checkpointing = value
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config, mlp_cfg=None):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.use_rotary_emb = getattr(config, 'use_rotary_emb', False)
if not self.use_rotary_emb:
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2Block(config, layer_idx=i, mlp_cfg=mlp_cfg)
for i in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self):
super().tie_weights()
if getattr(self.config, 'tie_weights', False):
group_size = getattr(self.config, 'tie_weights_group_size', len(self.h))
assert len(self.h) % group_size == 0
for block_idx, block in enumerate(self.h):
ref_block_idx = (block_idx // group_size) * group_size
ref_block = self.h[ref_block_idx]
block.attn.c_attn.weight = ref_block.attn.c_attn.weight
block.attn.c_proj.weight = ref_block.attn.c_proj.weight
block.mlp.fc1.weight = ref_block.mlp.fc1.weight
block.mlp.fc2.weight = ref_block.mlp.fc2.weight
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
self.wpe = self.wpe.to(self.first_device)
# Load onto devices
for k, v in self.device_map.items():
for block in v:
cuda_device = "cuda:" + str(k)
self.h[block] = self.h[block].to(cuda_device)
# ln_f to last
self.ln_f = self.ln_f.to(self.last_device)
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
self.wte = self.wte.to("cpu")
self.wpe = self.wpe.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
if not self.use_rotary_emb:
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
else:
hidden_states = inputs_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
GPT2_START_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
def __init__(self, config, mlp_cfg=None):
super().__init__(config)
self.transformer = GPT2Model(config, mlp_cfg=mlp_cfg)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
| fly-master | src/models/gpt2.py |
# Copied from https://github.com/HobbitLong/SupContrast/blob/master/networks/resnet_big.py
"""ResNet in PyTorch.
ImageNet-Style ResNet
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Adapted from: https://github.com/bearpaw/pytorch-classification
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_channel=3, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves
# like an identity. This improves the model by 0.2~0.3% according to:
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, layer=100):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
return out
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
model_dict = {
'resnet18': [resnet18, 512],
'resnet34': [resnet34, 512],
'resnet50': [resnet50, 2048],
'resnet101': [resnet101, 2048],
}
class LinearBatchNorm(nn.Module):
"""Implements BatchNorm1d by BatchNorm2d, for SyncBN purpose"""
def __init__(self, dim, affine=True):
super(LinearBatchNorm, self).__init__()
self.dim = dim
self.bn = nn.BatchNorm2d(dim, affine=affine)
def forward(self, x):
x = x.view(-1, self.dim, 1, 1)
x = self.bn(x)
x = x.view(-1, self.dim)
return x
class SupConResNet(nn.Module):
"""backbone + projection head"""
def __init__(self, name='resnet50', head='mlp', feat_dim=128):
super(SupConResNet, self).__init__()
model_fun, dim_in = model_dict[name]
self.encoder = model_fun()
if head == 'linear':
self.head = nn.Linear(dim_in, feat_dim)
elif head == 'mlp':
self.head = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim)
)
else:
raise NotImplementedError(
'head not supported: {}'.format(head))
def forward(self, x):
feat = self.encoder(x)
feat = F.normalize(self.head(feat), dim=1)
return feat
class SupCEResNet(nn.Module):
"""encoder + classifier"""
def __init__(self, name='resnet50', num_classes=10):
super(SupCEResNet, self).__init__()
model_fun, dim_in = model_dict[name]
self.encoder = model_fun()
self.fc = nn.Linear(dim_in, num_classes)
def forward(self, x):
return self.fc(self.encoder(x))
class LinearClassifier(nn.Module):
"""Linear classifier"""
def __init__(self, name='resnet50', num_classes=10):
super(LinearClassifier, self).__init__()
_, feat_dim = model_dict[name]
self.fc = nn.Linear(feat_dim, num_classes)
def forward(self, features):
return self.fc(features)
| fly-master | src/models/resnet_supcon.py |
import copy
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch import Tensor
from einops import rearrange
import hydra
from src.models.modules.masking import LengthMask
from src.models.modules.seq_common import ClassificationHead, PositionalEncoding, Mlp
from src.models.modules.s4 import S4
# Adapted from https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#S4EncoderLayer
class S4EncoderLayer(nn.Module):
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model, d_inner=2048, ffn_cfg=None, dropout=0.1, activation=F.relu,
layer_norm_eps=1e-5, batch_first=False, norm_first=False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.norm_first = norm_first
self.s4 = S4(H=d_model, l_max=1024, transposed=True, dropout=dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
# Implementation of Feedforward model
if ffn_cfg is None:
self.ff = Mlp(d_model, hidden_features=d_inner,
act_fn=self.activation, drop=dropout, **factory_kwargs)
else:
self.ff = hydra.utils.instantiate(ffn_cfg, **factory_kwargs, _recursive_=False)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
if not isinstance(self.ff, nn.Identity):
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout2d(dropout)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(S4EncoderLayer, self).__setstate__(state)
def forward(self, src: Tensor, **kwargs) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in S4Sequence class.
"""
x = src
if self.norm_first:
out, _ = self.s4(rearrange(self.norm1(x)), '... L d -> ... d L')
out = self.dropout1(out)
x = x + rearrange(out, '... d L -> ... L d')
if not isinstance(self.ff, nn.Identity):
x = x + self.ff(self.norm2(x))
else:
out, _ = self.s4(rearrange(x, '... L d -> ... d L'))
out = self.dropout1(out)
x = self.norm1(x + rearrange(out, '... d L -> ... L d'))
if not isinstance(self.ff, nn.Identity):
x = self.norm2(x + self.ff(x))
return x
class S4Encoder(nn.Module):
r"""S4Encoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the S4EncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.S4EncoderLayer(d_model=512)
>>> transformer_encoder = nn.S4Encoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
# self.layers = _get_clones(encoder_layer, num_layers)
self.layers = nn.ModuleList([encoder_layer() for i in range(num_layers)])
self.num_layers = num_layers
self.norm = norm
def forward(self, src: Tensor, **kwargs) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in S4Sequence class.
"""
output = src
for mod in self.layers:
output = mod(output, **kwargs)
if self.norm is not None:
output = self.norm(output)
return output
class S4Sequence(nn.Module):
r"""
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
n_layer: the number of sub-encoder-layers in the encoder (default=6).
d_inner: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of encoder/decoder intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
other attention and feedforward operations, otherwise after. Default: ``False`` (after).
Examples::
>>> transformer_model = nn.S4Sequence(n_layer=12)
>>> src = torch.rand((10, 32, 512))
>>> tgt = torch.rand((20, 32, 512))
>>> out = transformer_model(src, tgt)
Note: A full example to apply nn.S4Sequence module for the word language model is available in
https://github.com/pytorch/examples/tree/master/word_language_model
"""
def __init__(self, d_model: int = 512, n_layer: int = 6, d_inner: int = 2048,
ffn_cfg=None,
dropout: float = 0.1, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model
self.batch_first = batch_first
encoder_layer = lambda : S4EncoderLayer(d_model, d_inner=d_inner, ffn_cfg=ffn_cfg, dropout=dropout,
activation=activation, layer_norm_eps=layer_norm_eps,
batch_first=batch_first, norm_first=norm_first,
**factory_kwargs)
# encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
encoder_norm = None
self.encoder = S4Encoder(encoder_layer, n_layer, encoder_norm)
def forward(self, src: Tensor, **kwargs) -> Tensor:
r"""Take in and process masked source/target sequences.
Args:
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
src_mask: the additive mask for the src sequence (optional).
tgt_mask: the additive mask for the tgt sequence (optional).
memory_mask: the additive mask for the encoder output (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).
memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`, `(N, S, E)` if batch_first.
- tgt: :math:`(T, N, E)`, `(N, T, E)` if batch_first.
- src_mask: :math:`(S, S)`.
- tgt_mask: :math:`(T, T)`.
- memory_mask: :math:`(T, S)`.
- src_key_padding_mask: :math:`(N, S)`.
- tgt_key_padding_mask: :math:`(N, T)`.
- memory_key_padding_mask: :math:`(N, S)`.
Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
[src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- output: :math:`(T, N, E)`, `(N, T, E)` if batch_first.
Note: Due to the multi-head attention architecture in the transformer model,
the output sequence length of a transformer is same as the input sequence
(i.e. target) length of the decode.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
Examples:
>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
"""
output = self.encoder(src)
return output
class S4Classifier(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, num_classes: int,
ffn_cfg=None, embedding_cfg=None, classifier_cfg=None,
norm_first=False, dropout: float = 0.1, activation: str = "relu",
layer_norm_eps: float = 1e-5,
batch_first: bool = False, pooling_mode='MEAN') -> None:
super().__init__()
assert pooling_mode in ['MEAN', 'SUM'], 'pooling_mode not supported'
self.pooling_mode = pooling_mode
self.embedding = (nn.Identity() if embedding_cfg is None
else hydra.utils.instantiate(embedding_cfg, _recursive_=False))
self.batch_first = batch_first
self.s4seq = S4Sequence(d_model, n_layer, d_inner, ffn_cfg, dropout, activation,
layer_norm_eps, batch_first, norm_first)
if classifier_cfg is None:
self.classifier = ClassificationHead(d_model, d_inner, num_classes,
pooling_mode=pooling_mode, batch_first=batch_first)
else:
self.classifier = hydra.utils.instantiate(
classifier_cfg, d_model=d_model, d_inner=d_inner, num_classes=num_classes,
pooling_mode=pooling_mode, batch_first=batch_first, _recursive_=False
)
def forward_features(self, src: Tensor, lengths=None, **kwargs) -> Tensor:
if lengths is not None:
src_key_padding_mask = LengthMask(lengths,
max_len=src.size(1 if self.batch_first else 0),
device=src.device)
else:
src_key_padding_mask = None
src = self.embedding(src)
features = self.s4seq(src, **kwargs)
return features, src_key_padding_mask
def forward(self, src: Tensor, lengths=None, **kwargs) -> Tensor:
features, src_key_padding_mask = self.forward_features(src, lengths=lengths, **kwargs)
return self.classifier(features, key_padding_mask=src_key_padding_mask)
class S4DualClassifier(S4Classifier):
def forward(self, src1: Tensor, src2: Tensor,
lengths1=None, lengths2=None,
**kwargs) -> Tensor:
features1, src1_key_padding_mask = self.forward_features(src1, lengths=lengths1, **kwargs)
features2, src2_key_padding_mask = self.forward_features(src2, lengths=lengths2, **kwargs)
return self.classifier(features1, features2,
key_padding_mask1=src1_key_padding_mask,
key_padding_mask2=src2_key_padding_mask)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
| fly-master | src/models/s4_seq.py |
from torch import nn
class SimpleDenseNet(nn.Module):
def __init__(self, hparams: dict):
super().__init__()
self.model = nn.Sequential(
nn.Linear(hparams["input_size"], hparams["lin1_size"]),
nn.BatchNorm1d(hparams["lin1_size"]),
nn.ReLU(),
nn.Linear(hparams["lin1_size"], hparams["lin2_size"]),
nn.BatchNorm1d(hparams["lin2_size"]),
nn.ReLU(),
nn.Linear(hparams["lin2_size"], hparams["lin3_size"]),
nn.BatchNorm1d(hparams["lin3_size"]),
nn.ReLU(),
nn.Linear(hparams["lin3_size"], hparams["output_size"]),
)
def forward(self, x):
batch_size, channels, width, height = x.size()
# (batch, 1, width, height) -> (batch, 1*width*height)
x = x.view(batch_size, -1)
return self.model(x)
| fly-master | src/models/simple_dense_net.py |
fly-master | src/models/__init__.py |
|
# Adapted from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from torchvision.ops import StochasticDepth
from timm.models.layers import to_2tuple, trunc_normal_
from einops import rearrange
from src.models.modules.seq_common import Mlp
from src.models.swin_transformer import PatchMerging, PatchEmbed
class SwinMLPBlock(nn.Module):
r""" Swin MLP Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.padding = [self.window_size - self.shift_size, self.shift_size,
self.window_size - self.shift_size, self.shift_size] # P_l,P_r,P_t,P_b
self.norm1 = norm_layer(dim)
# use group convolution to implement multi-head MLP
self.spatial_mlp = nn.Conv1d(self.num_heads * self.window_size ** 2,
self.num_heads * self.window_size ** 2,
kernel_size=1,
groups=self.num_heads)
self.drop_path = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# shift
if self.shift_size > 0:
P_l, P_r, P_t, P_b = self.padding
shifted_x = F.pad(x, [0, 0, P_l, P_r, P_t, P_b], "constant", 0)
else:
shifted_x = x
_, _H, _W, _ = shifted_x.shape
# partition windows
x_windows_heads = rearrange(shifted_x, 'b (h_div_wsz wsz) (w_div_wsz wsz1) (nhead c)'
'-> (b h_div_wsz w_div_wsz) (nhead wsz wsz1) c',
wsz=self.window_size, wsz1=self.window_size, nhead=self.num_heads)
spatial_mlp_windows = self.spatial_mlp(x_windows_heads) # nW*B, nH*window_size*window_size, C//nH
# # merge windows
shifted_x = rearrange(spatial_mlp_windows, '(b h_div_wsz w_div_wsz) (nhead wsz wsz1) c'
'-> b (h_div_wsz wsz) (w_div_wsz wsz1) (nhead c)',
h_div_wsz=_H // self.window_size, w_div_wsz=_W // self.window_size,
wsz=self.window_size, wsz1=self.window_size, nhead=self.num_heads)
# reverse shift
if self.shift_size > 0:
P_l, P_r, P_t, P_b = self.padding
x = shifted_x[:, P_t:-P_b, P_l:-P_r, :].contiguous()
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# Window/Shifted-Window Spatial MLP
if self.shift_size > 0:
nW = (H / self.window_size + 1) * (W / self.window_size + 1)
else:
nW = H * W / self.window_size / self.window_size
flops += nW * self.dim * (self.window_size * self.window_size) * (self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class BasicLayer(nn.Module):
""" A basic Swin MLP layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., drop=0., drop_path=0.,
norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinMLPBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class SwinMLP(nn.Module):
r""" Swin MLP
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin MLP layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| fly-master | src/models/swin_mlp.py |
import functools
import math
import copy
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch import Tensor
from src.models.modules.masking import FullMask, LengthMask
from einops import repeat
import hydra
from src.models.modules.seq_common import ClassificationHead, PositionalEncoding, Mlp
from src.models.modules.multihead_attention import MultiheadAttention
# Adapted from https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
class TransformerEncoderLayer(nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
n_head: the number of heads in the multiheadattention models (required).
d_inner: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``.
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, n_head=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
Alternatively, when ``batch_first`` is ``True``:
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, n_head=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
"""
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model, n_head, d_inner=2048, mha_cfg=None, attn_cfg=None, ffn_cfg=None,
dropout=0.1, activation=F.relu,
layer_norm_eps=1e-5, batch_first=False, norm_first=False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.norm_first = norm_first
if mha_cfg is None:
mha_cfg = {}
if attn_cfg is None:
self.self_attn = nn.MultiheadAttention(d_model, n_head, dropout=dropout,
batch_first=batch_first, **mha_cfg,
**factory_kwargs)
self.attention_layer = None
else:
self.self_attn = MultiheadAttention(d_model, n_head, batch_first=batch_first,
**mha_cfg, **factory_kwargs)
self.attention_layer = hydra.utils.instantiate(attn_cfg, **factory_kwargs,
_recursive_=False)
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
# Implementation of Feedforward model
if ffn_cfg is None:
self.ff = Mlp(d_model, hidden_features=d_inner,
act_fn=self.activation, drop=dropout, **factory_kwargs)
else:
self.ff = hydra.utils.instantiate(ffn_cfg, **factory_kwargs, _recursive_=False)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: Tensor, src_mask=None,
src_key_padding_mask: Optional[Tensor] = None, **kwargs) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), attn_mask=src_mask,
key_padding_mask=src_key_padding_mask, **kwargs)
x = x + self.ff(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask, **kwargs))
x = self.norm2(x + self.ff(x))
return x
# self-attention block
def _sa_block(self, x: Tensor,
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor],
**kwargs) -> Tensor:
if self.attention_layer is None:
x, _ = self.self_attn(x, x, x, attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False, **kwargs)
else:
x, _ = self.self_attn(self.attention_layer, x, x, x, attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False, **kwargs)
return self.dropout1(x)
# Adapted from https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoder
class TransformerEncoder(nn.Module):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, n_head=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src: Tensor, mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None, **kwargs) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, **kwargs)
if self.norm is not None:
output = self.norm(output)
return output
# Adapted from https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#Transformer
class Transformer(nn.Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)
model with corresponding parameters.
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
n_head: the number of heads in the multiheadattention models (default=8).
n_layer: the number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
d_inner: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of encoder/decoder intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
other attention and feedforward operations, otherwise after. Default: ``False`` (after).
Examples::
>>> transformer_model = nn.Transformer(n_head=16, n_layer=12)
>>> src = torch.rand((10, 32, 512))
>>> tgt = torch.rand((20, 32, 512))
>>> out = transformer_model(src, tgt)
Note: A full example to apply nn.Transformer module for the word language model is available in
https://github.com/pytorch/examples/tree/master/word_language_model
"""
def __init__(self, d_model: int = 512, n_head: int = 8, n_layer: int = 6, d_inner: int = 2048,
mha_cfg=None, attn_cfg=None, ffn_cfg=None,
dropout: float = 0.1, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model
self.n_head = n_head
self.batch_first = batch_first
encoder_layer = TransformerEncoderLayer(d_model, n_head, d_inner=d_inner,
mha_cfg=mha_cfg, attn_cfg=attn_cfg, ffn_cfg=ffn_cfg,
dropout=dropout,
activation=activation,
layer_norm_eps=layer_norm_eps,
batch_first=batch_first,
norm_first=norm_first,
**factory_kwargs)
encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.encoder = TransformerEncoder(encoder_layer, n_layer, encoder_norm)
self._reset_parameters()
def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None, **kwargs) -> Tensor:
r"""Take in and process masked source/target sequences.
Args:
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
src_mask: the additive mask for the src sequence (optional).
tgt_mask: the additive mask for the tgt sequence (optional).
memory_mask: the additive mask for the encoder output (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).
memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`, `(N, S, E)` if batch_first.
- tgt: :math:`(T, N, E)`, `(N, T, E)` if batch_first.
- src_mask: :math:`(S, S)`.
- tgt_mask: :math:`(T, T)`.
- memory_mask: :math:`(T, S)`.
- src_key_padding_mask: :math:`(N, S)`.
- tgt_key_padding_mask: :math:`(N, T)`.
- memory_key_padding_mask: :math:`(N, S)`.
Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
[src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- output: :math:`(T, N, E)`, `(N, T, E)` if batch_first.
Note: Due to the multi-head attention architecture in the transformer model,
the output sequence length of a transformer is same as the input sequence
(i.e. target) length of the decode.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
Examples:
>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
"""
output = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
return output
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
class TransformerClassifier(nn.Module):
def __init__(self, d_model: int, n_head: int, n_layer: int, d_inner: int, num_classes: int,
mha_cfg=None, attn_cfg=None, ffn_cfg=None, embedding_cfg=None,
pos_encoding_cfg=None, classifier_cfg=None,
norm_first=False,
dropout: float = 0.1, activation: str = "relu", layer_norm_eps: float = 1e-5,
batch_first: bool = False, pooling_mode='MEAN') -> None:
super().__init__()
assert pooling_mode in ['MEAN', 'SUM', 'CLS'], 'pooling_mode not supported'
self.pooling_mode = pooling_mode
if pooling_mode == 'CLS':
self.cls = nn.Parameter(torch.zeros(1, 1, d_model))
if pos_encoding_cfg is not None and 'max_len' in pos_encoding_cfg:
pos_encoding_cfg.max_len += 1
self.word_emb = (nn.Identity() if embedding_cfg is None
else hydra.utils.instantiate(embedding_cfg, _recursive_=False))
if pos_encoding_cfg is None:
self.pos_encoder = PositionalEncoding(d_model, dropout, batch_first=batch_first)
else:
self.pos_encoder = hydra.utils.instantiate(pos_encoding_cfg, d_model,
batch_first=batch_first, _recursive_=False)
self.batch_first = batch_first
self.transformer = Transformer(d_model, n_head, n_layer, d_inner, mha_cfg, attn_cfg,
ffn_cfg, dropout, activation, layer_norm_eps,
batch_first, norm_first)
if classifier_cfg is None:
self.classifier = ClassificationHead(d_model, d_inner, num_classes,
pooling_mode=pooling_mode, batch_first=batch_first)
else:
self.classifier = hydra.utils.instantiate(
classifier_cfg, d_model=d_model, d_inner=d_inner, num_classes=num_classes,
pooling_mode=pooling_mode, batch_first=batch_first, _recursive_=False
)
def forward_features(self, src: Tensor, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None, lengths=None, **kwargs) -> Tensor:
if lengths is not None:
src_key_padding_mask = LengthMask(lengths,
max_len=src.size(1 if self.batch_first else 0),
device=src.device)
src = self.word_emb(src)
if self.pooling_mode == 'CLS':
cls = repeat(self.cls, '1 1 d -> b 1 d' if self.batch_first else '1 1 d -> 1 b d',
b=src.shape[0 if self.batch_first else 1])
src = torch.cat([cls, src], dim=1 if self.batch_first else 0)
# Adjust masks
if src_key_padding_mask is not None:
assert isinstance(src_key_padding_mask, LengthMask)
src_key_padding_mask = LengthMask(src_key_padding_mask._lengths + 1,
max_len=src_key_padding_mask._max_len + 1,
device=src_key_padding_mask._lengths.device)
if src_mask is not None:
assert isinstance(src_mask, FullMask)
src_mask = FullMask(F.pad(src_mask._mask, (1, 0, 1, 0), value=True))
src = self.pos_encoder(src)
features = self.transformer(src, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask,
**kwargs)
return features, src_key_padding_mask
def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None, lengths=None, **kwargs) -> Tensor:
features, src_key_padding_mask = self.forward_features(
src, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask, lengths=lengths,
**kwargs
)
return self.classifier(features, key_padding_mask=src_key_padding_mask)
class TransformerDualClassifier(TransformerClassifier):
def forward(self, src1: Tensor, src2: Tensor,
src1_mask: Optional[Tensor] = None,
src2_mask: Optional[Tensor] = None,
src1_key_padding_mask: Optional[Tensor] = None,
src2_key_padding_mask: Optional[Tensor] = None,
lengths1=None, lengths2=None,
**kwargs) -> Tensor:
features1, src1_key_padding_mask = self.forward_features(
src1, src_mask=src1_mask, src_key_padding_mask=src1_key_padding_mask, lengths=lengths1,
**kwargs
)
features2, src2_key_padding_mask = self.forward_features(
src2, src_mask=src2_mask, src_key_padding_mask=src2_key_padding_mask, lengths=lengths2,
**kwargs
)
return self.classifier(features1, features2,
key_padding_mask1=src1_key_padding_mask,
key_padding_mask2=src2_key_padding_mask)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
| fly-master | src/models/transformer.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
import torch.nn.functional as F
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from einops import rearrange, repeat
import transformers
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import load_tf_weights_in_bert
from transformers.models.bert.modeling_bert import BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING
import hydra
from src.ops.bert_padding import unpad_input, pad_input, index_first_axis
from src.ops.triton.softmax_dropout import softmax_dropout
from src.ops.fused_dropout_add import fused_dropout_add, fused_bias_dropout_add
from src.ops.gelu_activation import bias_gelu_impl, fast_gelu_impl
ACT2FN['fast_gelu'] = fast_gelu_impl
try:
from src.ops.fused_softmax_dropout import fused_softmax_dropout
except ImportError:
fused_softmax_dropout = None
try:
from src.ops.bert_fmha import fmha_func
except ImportError:
fmha_func = None
try:
from src.ops.fused_dense import FusedDenseMine
except ImportError:
FusedDenseMine = None
try:
from apex.contrib.layer_norm import FastLayerNorm as BertLayerNorm
except ImportError:
BertLayerNorm = nn.LayerNorm
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.softmax_impl = getattr(config, 'softmax_impl', 'pytorch')
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
else:
bsz, num_heads, q_seq_len, _ = query_layer.shape
_, _, k_seq_len, _ = key_layer.shape
q = rearrange(query_layer, 'b h t d -> (b h) t d')
k = rearrange(key_layer, 'b h s d -> (b h) d s')
# Preallocate attention_scores for `baddbmm`
attention_scores = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=q.dtype,
device=q.device)
attention_scores = torch.baddbmm(attention_scores, q, k, beta=0,
alpha=self.attention_head_size ** (-0.5)) # (b h) t s
attention_scores = rearrange(attention_scores, '(b h) t s -> b h t s',
h=self.num_attention_heads)
if self.softmax_impl == 'triton':
mask = None if attention_mask is None else attention_mask.to(dtype=attention_scores.dtype)
attention_probs = softmax_dropout(attention_scores,
self.dropout.p if self.training else 0.0,
mask=mask,
mask_type='bk')
elif self.softmax_impl == 'fused':
if fused_softmax_dropout is None:
raise ImportError('Failed to import fused_softmax_dropout. Please install the '
'softmaxlib CUDA extension')
mask = None if attention_mask is None else attention_mask.to(dtype=attention_scores.dtype)
attention_probs = fused_softmax_dropout(
attention_scores,
self.dropout.p if self.training else 0.0,
mask=mask
)
else:
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask.to(dtype=attention_scores.dtype)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=value_layer.dtype)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class FMHABertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.p_dropout = config.attention_probs_dropout_prob
self.fuse_bias = getattr(config, 'fused_bias_mha', False)
linear_cls = nn.Linear if not self.fuse_bias else FusedDenseMine
self.Wqkv = linear_cls(self.all_head_size, 3 * config.hidden_size)
def forward(self, hidden_states, cu_seqlens, max_seqlen_in_batch):
"""
Arguments:
hidden_states: (total_nnz, dim)
cu_seqlens: (batch + 1,), torch.int32
max_seqlen_in_batch: int
Return:
context: (total_nnz, dim)
"""
qkv = self.Wqkv(hidden_states) # (total_nnz, 3 * dim)
qkv = rearrange(qkv, 'nnz (t h d) -> nnz t h d', t=3, h=self.num_attention_heads)
context = fmha_func(qkv, cu_seqlens, self.p_dropout, max_seqlen_in_batch, self.training)
return rearrange(context, 'nnz h d -> nnz (h d)')
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.fuse_bias = getattr(config, 'fused_bias_mha', False)
self.fused_dropout_add = getattr(config, 'fused_dropout_add', False)
linear_cls = nn.Linear if not self.fuse_bias else FusedDenseMine
self.dense = linear_cls(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
if not self.fused_dropout_add:
hidden_states = self.dropout(hidden_states) + input_tensor
else:
hidden_states = fused_dropout_add(hidden_states, input_tensor, self.dropout.p,
self.training)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class FMHABertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = FMHABertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, cu_seqlens, max_s, subset_idx=None):
"""subset_idx: set of indices whose values we care about at the end of the layer
(e.g., the masked tokens, if this is the final layer).
"""
self_output = self.self(input_tensor, cu_seqlens, max_s)
if subset_idx is not None:
# return self.output(self_output[subset_idx], input_tensor[subset_idx])
return self.output(index_first_axis(self_output, subset_idx),
index_first_axis(input_tensor, subset_idx))
else:
return self.output(self_output, input_tensor)
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.fuse_bias = getattr(config, 'fused_bias_fc', False)
self.fused_gelu_bias = getattr(config, 'fused_gelu_bias', False)
linear_cfg = getattr(config, 'linear1_cfg', None)
if linear_cfg is None:
linear_cls = nn.Linear if not self.fuse_bias else FusedDenseMine
self.dense = linear_cls(config.hidden_size, config.intermediate_size)
else:
self.dense = hydra.utils.instantiate(linear_cfg, config.hidden_size,
config.intermediate_size)
if self.fused_gelu_bias:
self.intermediate_act_fn = bias_gelu_impl
assert isinstance(self.dense, nn.Linear) or hasattr(self.dense, 'forward_matmul')
else:
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
if not self.fused_gelu_bias:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
else:
if isinstance(self.dense, nn.Linear):
hidden_states = F.linear(hidden_states, self.dense.weight)
else:
hidden_states = self.dense.forward_matmul(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states, self.dense.bias)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.fuse_bias = getattr(config, 'fused_bias_fc', False)
self.fused_dropout_add = getattr(config, 'fused_dropout_add', False)
self.fused_bias_dropout_add = getattr(config, 'fused_bias_dropout_add', False)
linear_cfg = getattr(config, 'linear2_cfg', None)
if linear_cfg is None:
linear_cls = nn.Linear if not self.fuse_bias else FusedDenseMine
self.dense = linear_cls(config.intermediate_size, config.hidden_size)
else:
self.dense = hydra.utils.instantiate(linear_cfg, config.intermediate_size,
config.hidden_size)
if self.fused_bias_dropout_add:
assert isinstance(self.dense, nn.Linear) or hasattr(self.dense, 'forward_matmul')
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
if not self.fused_bias_dropout_add:
hidden_states = self.dense(hidden_states)
if not self.fused_dropout_add:
hidden_states = self.dropout(hidden_states) + input_tensor
else:
hidden_states = fused_dropout_add(hidden_states, input_tensor, self.dropout.p,
self.training)
else:
if isinstance(self.dense, nn.Linear):
hidden_states = F.linear(hidden_states, self.dense.weight)
else:
hidden_states = self.dense.forward_matmul(hidden_states)
hidden_states = fused_bias_dropout_add(hidden_states, self.dense.bias, input_tensor,
self.dropout.p, self.training)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(config, position_embedding_type="absolute")
if getattr(config, 'mlp_cfg', None) is None:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.mlp = None
else:
self.mlp = hydra.utils.instantiate(config.mlp_cfg, in_features=config.hidden_size,
hidden_features=config.intermediate_size,
drop=config.hidden_dropout_prob, _recursive_=False)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
if self.mlp is None:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
else:
layer_output = self.layer_norm(attention_output + self.mlp(attention_output))
return layer_output
class BertLayerUnpad(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = FMHABertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, seqlen, subset_idx=None):
"""subset_idx: set of indices whose values we care about at the end of the layer
(e.g., the masked tokens, if this is the final layer).
"""
attention_output = self.attention(hidden_states, attention_mask, seqlen, subset_idx)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.unpad_fmha = getattr(config, 'unpad_fmha', False)
self.config = config
layer_cls = BertLayer if not self.unpad_fmha else BertLayerUnpad
self.layer = nn.ModuleList([layer_cls(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
subset_mask=None
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
if self.unpad_fmha:
attention_mask_bool = rearrange(attention_mask, 'b 1 1 s -> b s') == 0.0
batch, seqlen = hidden_states.shape[:2]
hidden_states, indices, cu_seqlens, max_seqlen_in_batch = unpad_input(
hidden_states, attention_mask_bool
)
if subset_mask is None:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, cu_seqlens, max_seqlen_in_batch)
hidden_states = pad_input(hidden_states, indices, batch, seqlen)
else:
for layer_module in self.layer[:-1]:
hidden_states = layer_module(hidden_states, cu_seqlens, max_seqlen_in_batch)
subset_idx = torch.nonzero(subset_mask[attention_mask_bool], as_tuple=False).flatten()
hidden_states = self.layer[-1](hidden_states, cu_seqlens, max_seqlen_in_batch,
subset_idx=subset_idx)
else:
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states, pool=True):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0] if pool else hidden_states
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
# TD [2022-02-25] Somehow using FastLayerNorm here gives CUDA error: misaligned address
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.fused_fc = getattr(config, 'fused_bias_fc_loss_head', False)
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
linear_cls = nn.Linear if not self.fused_fc else FusedDenseMine
self.decoder = linear_cls(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, BertLayerNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BertEncoder):
module.gradient_checkpointing = value
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
masked_tokens_mask=None
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
if masked_tokens_mask is not None:
# We also need the first column for the CLS token
first_col_mask = torch.zeros(batch_size, seq_length, dtype=torch.bool, device=device)
first_col_mask[:, 0] = True
subset_mask = masked_tokens_mask | first_col_mask
else:
subset_mask = None
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
subset_mask=subset_mask
)
if masked_tokens_mask is None:
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
else:
# TD [2022-03-01]: the indexing here is very tricky.
attention_mask_bool = attention_mask > 0
subset_idx = subset_mask[attention_mask_bool]
sequence_output = encoder_outputs[0][masked_tokens_mask[attention_mask_bool][subset_idx]]
pool_input = encoder_outputs[0][first_col_mask[attention_mask_bool][subset_idx]]
pooled_output = (self.pooler(pool_input, pool=False)
if self.pooler is not None else None)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Padding for divisibility by 8
self.pad_vocab_size_multiple_8 = getattr(config, 'pad_vocab_size_multiple_8', False)
if self.pad_vocab_size_multiple_8:
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
# if dense_seq_output, prediction scores are only computed for masked tokens,
# and the (bs, seqlen) dimensions are flattened
self.dense_seq_output = getattr(config, 'dense_seq_output', False)
# If last_layer_subset, we only need the compute the last layer for a subset of tokens
# (e.g., the tokens we need to compute the masked LM loss and the next-sentence prediction).
self.last_layer_subset = getattr(config, 'last_layer_subset', False)
if self.last_layer_subset:
assert getattr(config, 'unpad_fmha', False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForPreTraining.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
masked_tokens_mask = labels >= 0 if (self.last_layer_subset and labels is not None) else None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
masked_tokens_mask=masked_tokens_mask
)
sequence_output, pooled_output = outputs[:2]
if self.dense_seq_output and labels is not None:
masked_token_idx = torch.nonzero(labels.flatten() >= 0, as_tuple=False).flatten()
if not self.last_layer_subset:
sequence_output = index_first_axis(rearrange(sequence_output, 'b s d -> (b s) d'),
masked_token_idx)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
if masked_token_idx is not None: # prediction_scores are already flattened
masked_lm_loss = loss_fct(prediction_scores, labels.flatten()[masked_token_idx])
else:
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be
in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100`
are ignored (masked), the loss is only computed for the tokens with labels n `[0, ...,
config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained("bert-base-cased", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top.""",
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForNextSentencePrediction.from_pretrained("bert-base-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| fly-master | src/models/bert.py |
# Adapted from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from torchvision.ops import StochasticDepth
from timm.models.layers import to_2tuple, trunc_normal_
from einops import rearrange
from src.models.modules.seq_common import Mlp
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = rearrange(img_mask, 'b (h_div_wsz wsz) (w_div_wsz wsz1) 1'
'-> (b h_div_wsz w_div_wsz) (wsz wsz1)',
wsz=self.window_size, wsz1=self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = rearrange(shifted_x, 'b (h_div_wsz wsz) (w_div_wsz wsz1) c'
'-> (b h_div_wsz w_div_wsz) (wsz wsz1) c',
wsz=self.window_size, wsz1=self.window_size)
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
shifted_x = rearrange(attn_windows, '(b h_div_wsz w_div_wsz) (wsz wsz1) c'
'-> b (h_div_wsz wsz) (w_div_wsz wsz1) c',
h_div_wsz=H // self.window_size, w_div_wsz=W // self.window_size,
wsz=self.window_size)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
# TD [2021-12-09] Idk why they swap the order of p and p1, but they should be equivalent.
x = rearrange(x, 'b (h p) (w p1) c -> b (h w) (p1 p c)', p=2, p1=2)
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| fly-master | src/models/swin_transformer.py |
# Copied from https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_1d.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.layers.spectral_conv import SpectralConv1d, SpectralConv2d
class FourierOperator1d(nn.Module):
def __init__(self, modes, width):
super().__init__()
self.modes = modes
self.width = width
self.conv = SpectralConv1d(self.width, self.width, self.modes)
self.w = nn.Conv1d(self.width, self.width, 1)
def forward(self, x):
return F.gelu(self.conv(x) + self.w(x))
class FNO1d(nn.Module):
def __init__(self, modes, width, nlayers=4, padding=0):
super(FNO1d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
self.nlayers = nlayers
self.padding = padding # pad the domain if input is non-periodic
self.fc0 = nn.Linear(2, self.width) # input channel is 2: (a(x), x)
self.layers = nn.Sequential(*[FourierOperator1d(self.modes1, self.width)
for _ in range(self.nlayers)])
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.stack([x, grid], dim=-1)
x = self.fc0(x)
x = rearrange(x, 'b x c -> b c x')
if self.padding != 0:
x = F.pad(x, [0,self.padding]) # pad the domain if input is non-periodic
# FNO code doesn't apply activation on the last block, but we do for code's simplicity.
# Performance seems about the same.
x = self.layers(x)
if self.padding != 0:
x = x[..., :-self.padding] # pad the domain if input is non-periodic
x = rearrange(x, 'b c x -> b x c')
x = self.fc2(F.gelu(self.fc1(x)))
return rearrange(x, 'b x 1 -> b x')
def get_grid(self, shape, device):
batchsize, size_x = shape[0], shape[1]
return repeat(torch.linspace(0, 1, size_x, dtype=torch.float, device=device),
'x -> b x', b=batchsize)
class FourierOperator2d(nn.Module):
def __init__(self, modes1, modes2, width):
super().__init__()
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.conv = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w = nn.Conv2d(self.width, self.width, 1)
def forward(self, x):
return F.gelu(self.conv(x) + self.w(x))
class FNO2d(nn.Module):
def __init__(self, modes1, modes2, width, nlayers=4, padding=0):
super(FNO2d, self).__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the coefficient function and locations (a(x, y), x, y)
input shape: (batchsize, x=s, y=s, c=3)
output: the solution
output shape: (batchsize, x=s, y=s, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.nlayers = nlayers
self.padding = padding # pad the domain if input is non-periodic
self.fc0 = nn.Linear(3, self.width) # input channel is 3: (a(x, y), x, y)
self.layers = nn.Sequential(*[FourierOperator2d(self.modes1, self.modes2, self.width)
for _ in range(self.nlayers)])
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((rearrange(x, 'b x y -> b x y 1'), grid), dim=-1)
x = self.fc0(x)
x = rearrange(x, 'b x y c -> b c x y')
if self.padding != 0:
x = F.pad(x, [0, self.padding, 0, self.padding])
# FNO code doesn't apply activation on the last block, but we do for code's simplicity.
# Performance seems about the same
x = self.layers(x)
if self.padding != 0:
x = x[..., :-self.padding, :-self.padding]
x = rearrange(x, 'b c x y -> b x y c')
x = self.fc2(F.gelu(self.fc1(x)))
return rearrange(x, 'b x y 1 -> b x y')
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = repeat(torch.linspace(0, 1, size_x, dtype=torch.float, device=device),
'x -> b x y', b=batchsize, y=size_y)
gridy = repeat(torch.linspace(0, 1, size_y, dtype=torch.float, device=device),
'y -> b x y', b=batchsize, x=size_x)
return torch.stack([gridx, gridy], dim=-1)
| fly-master | src/models/fno.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py
""" MLP-Mixer, ResMLP, and gMLP in PyTorch
This impl originally based on MLP-Mixer paper.
Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
@article{tolstikhin2021,
title={MLP-Mixer: An all-MLP Architecture for Vision},
author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner,
Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey},
journal={arXiv preprint arXiv:2105.01601},
year={2021}
}
Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP
Code: https://github.com/facebookresearch/deit
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
@misc{touvron2021resmlp,
title={ResMLP: Feedforward networks for image classification with data-efficient training},
author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and
Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou},
year={2021},
eprint={2105.03404},
}
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
@misc{liu2021pay,
title={Pay Attention to MLPs},
author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le},
year={2021},
eprint={2105.08050},
}
A thank you to paper authors for releasing code and weights.
Hacked together by / Copyright 2021 Ross Wightman
"""
import math
from copy import deepcopy
from functools import partial
import torch
import torch.nn as nn
from torchvision.ops import StochasticDepth
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, named_apply
from timm.models.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, lecun_normal_, to_2tuple
from timm.models.registry import register_model
import hydra
from src.models.layers.fastlinear import LowRank, SparseLRLinear
from src.models.layers.blocksparse_linear import BlockSparseLinear
from src.models.layers.blockdiag_linear import BlockdiagLinear
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = dict(
mixer_s32_224=_cfg(),
mixer_s16_224=_cfg(),
mixer_b32_224=_cfg(),
mixer_b16_224=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth',
),
mixer_b16_224_in21k=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth',
num_classes=21843
),
mixer_l32_224=_cfg(),
mixer_l16_224=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth',
),
mixer_l16_224_in21k=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth',
num_classes=21843
),
# Mixer ImageNet-21K-P pretraining
mixer_b16_224_miil_in21k=_cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil_in21k.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221,
),
mixer_b16_224_miil=_cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear',
),
gmixer_12_224=_cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
gmixer_24_224=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_12_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_24_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth',
#url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_36_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_big_24_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_12_distilled_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_24_distilled_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_36_distilled_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_big_24_distilled_224=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
resmlp_big_24_224_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
gmlp_ti16_224=_cfg(),
gmlp_s16_224=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth',
),
gmlp_b16_224=_cfg(),
)
class MixerBlock(nn.Module):
""" Residual Block w/ token mixing and channel MLPs
Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
def __init__(
self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp,
token_mlp_cfg=None, channel_mlp_cfg=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.):
super().__init__()
tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)]
self.norm1 = norm_layer(dim)
if token_mlp_cfg is None:
self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop)
else:
self.mlp_tokens = hydra.utils.instantiate(token_mlp_cfg, in_features=seq_len,
hidden_features=tokens_dim,
act_layer=act_layer, drop=drop,
_recursive_=False)
self.drop_path = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_layer(dim)
if channel_mlp_cfg is None:
self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop)
else:
self.mlp_channels = hydra.utils.instantiate(channel_mlp_cfg, in_features=dim,
hidden_features=channels_dim,
act_layer=act_layer, drop=drop,
_recursive_=False)
def forward(self, x):
x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2))
x = x + self.drop_path(self.mlp_channels(self.norm2(x)))
return x
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, x):
return torch.addcmul(self.beta, self.alpha, x)
class ResBlock(nn.Module):
""" Residual MLP block w/ LayerScale and Affine 'norm'
Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
def __init__(
self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine,
act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0.):
super().__init__()
channel_dim = int(dim * mlp_ratio)
self.norm1 = norm_layer(dim)
self.linear_tokens = nn.Linear(seq_len, seq_len)
self.drop_path = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop)
self.ls1 = nn.Parameter(init_values * torch.ones(dim))
self.ls2 = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2))
x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x)))
return x
class SpatialGatingUnit(nn.Module):
""" Spatial Gating Unit
Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm):
super().__init__()
gate_dim = dim // 2
self.norm = norm_layer(gate_dim)
self.proj = nn.Linear(seq_len, seq_len)
def init_weights(self):
# special init for the projection gate, called as override by base model init
nn.init.normal_(self.proj.weight, std=1e-6)
nn.init.ones_(self.proj.bias)
def forward(self, x):
u, v = x.chunk(2, dim=-1)
v = self.norm(v)
v = self.proj(v.transpose(-1, -2))
return u * v.transpose(-1, -2)
class SpatialGatingBlock(nn.Module):
""" Residual Block w/ Spatial Gating
Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
def __init__(
self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.):
super().__init__()
channel_dim = int(dim * mlp_ratio)
self.norm = norm_layer(dim)
sgu = partial(SpatialGatingUnit, seq_len=seq_len)
self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop)
self.drop_path = StochasticDepth(drop_path, mode='row')
def forward(self, x):
x = x + self.drop_path(self.mlp_channels(self.norm(x)))
return x
class MlpMixer(nn.Module):
def __init__(
self,
num_classes=1000,
img_size=224,
in_chans=3,
patch_size=16,
num_blocks=8,
embed_dim=512,
mlp_ratio=(0.5, 4.0),
block_layer=MixerBlock,
mlp_layer=Mlp,
token_mlp_cfg=None,
channel_mlp_cfg=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop_rate=0.,
drop_path_rate=0.,
nlhb=False,
stem_norm=False,
):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.stem = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None)
# FIXME drop_path (stochastic depth scaling rule or all the same?)
self.blocks = nn.Sequential(*[
block_layer(
embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer,
token_mlp_cfg=token_mlp_cfg, channel_mlp_cfg=channel_mlp_cfg,
norm_layer=norm_layer,
act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate)
for _ in range(num_blocks)])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(nlhb=nlhb)
def init_weights(self, nlhb=False):
head_bias = -math.log(self.num_classes) if nlhb else 0.
named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.norm(x)
x = x.mean(dim=1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):
""" Mixer weight initialization (trying to match Flax defaults)
"""
if isinstance(module, (nn.Linear, BlockSparseLinear, BlockdiagLinear, LowRank, SparseLRLinear)):
if name.startswith('head') and isinstance(module, nn.Linear):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
if flax:
# Flax defaults
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# like MLP init in vit (my original init)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
dense_init_fn_ = nn.init.xavier_uniform_
if isinstance(module, nn.Linear):
dense_init_fn_(module.weight)
elif isinstance(module, (BlockSparseLinear, BlockdiagLinear, LowRank)):
module.set_weights_from_dense_init(dense_init_fn_)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
# NOTE if a parent module contains init_weights method, it can override the init of the
# child modules as this will be called in depth-first order.
module.init_weights()
def checkpoint_filter_fn(state_dict, model):
""" Remap checkpoints if needed """
if 'patch_embed.proj.weight' in state_dict:
# Remap FB ResMlp models -> timm
out_dict = {}
for k, v in state_dict.items():
k = k.replace('patch_embed.', 'stem.')
k = k.replace('attn.', 'linear_tokens.')
k = k.replace('mlp.', 'mlp_channels.')
k = k.replace('gamma_', 'ls')
if k.endswith('.alpha') or k.endswith('.beta'):
v = v.reshape(1, 1, -1)
out_dict[k] = v
return out_dict
return state_dict
def _create_mixer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for MLP-Mixer models.')
model = build_model_with_cfg(
MlpMixer, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def mixer_s32_224(pretrained=False, **kwargs):
""" Mixer-S/32 224x224
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs)
model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_s16_224(pretrained=False, **kwargs):
""" Mixer-S/16 224x224
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs)
model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b32_224(pretrained=False, **kwargs):
""" Mixer-B/32 224x224
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b16_224(pretrained=False, **kwargs):
""" Mixer-B/16 224x224. ImageNet-1k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b16_224_in21k(pretrained=False, **kwargs):
""" Mixer-B/16 224x224. ImageNet-21k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_l32_224(pretrained=False, **kwargs):
""" Mixer-L/32 224x224.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_l16_224(pretrained=False, **kwargs):
""" Mixer-L/16 224x224. ImageNet-1k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_l16_224_in21k(pretrained=False, **kwargs):
""" Mixer-L/16 224x224. ImageNet-21k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b16_224_miil(pretrained=False, **kwargs):
""" Mixer-B/16 224x224. ImageNet-21k pretrained weights.
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b16_224_miil', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b16_224_miil_in21k(pretrained=False, **kwargs):
""" Mixer-B/16 224x224. ImageNet-1k pretrained weights.
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b16_224_miil_in21k', pretrained=pretrained, **model_args)
return model
@register_model
def gmixer_12_224(pretrained=False, **kwargs):
""" Glu-Mixer-12 224x224
Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer
"""
model_args = dict(
patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0),
mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs)
model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmixer_24_224(pretrained=False, **kwargs):
""" Glu-Mixer-24 224x224
Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer
"""
model_args = dict(
patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0),
mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs)
model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_12_224(pretrained=False, **kwargs):
""" ResMLP-12
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_24_224(pretrained=False, **kwargs):
""" ResMLP-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_36_224(pretrained=False, **kwargs):
""" ResMLP-36
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_big_24_224(pretrained=False, **kwargs):
""" ResMLP-B-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_12_distilled_224(pretrained=False, **kwargs):
""" ResMLP-12
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_24_distilled_224(pretrained=False, **kwargs):
""" ResMLP-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_36_distilled_224(pretrained=False, **kwargs):
""" ResMLP-36
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_36_distilled_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_big_24_distilled_224(pretrained=False, **kwargs):
""" ResMLP-B-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_big_24_distilled_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs):
""" ResMLP-B-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_big_24_224_in22ft1k', pretrained=pretrained, **model_args)
return model
@register_model
def gmlp_ti16_224(pretrained=False, **kwargs):
""" gMLP-Tiny
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
model_args = dict(
patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp, **kwargs)
model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmlp_s16_224(pretrained=False, **kwargs):
""" gMLP-Small
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
model_args = dict(
patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp, **kwargs)
model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmlp_b16_224(pretrained=False, **kwargs):
""" gMLP-Base
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
model_args = dict(
patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp, **kwargs)
model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args)
return model
| fly-master | src/models/mlp_mixer.py |
# Adapted from https://github.com/lucidrains/performer-pytorch/blob/main/performer_pytorch/performer_pytorch.py
import math
import torch
from torch import nn
from torch.cuda.amp import autocast
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
# helpers
@contextmanager
def null_context():
yield
# kernel functions
# transcribed from jax to pytorch from
# https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py
def softmax_kernel(data, *, projection_matrix, is_query, softmax_temp=None, eps=1e-4):
"""For key, we expect shape (b, h, s, d) where s is the sequence dimension
"""
b, h, _, d = data.shape
if softmax_temp is None:
softmax_temp = 1 / math.sqrt(d)
data_normalizer = math.sqrt(softmax_temp)
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.amax(data_dash, dim=-1, keepdim=True)) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.amax(data_dash, dim=(-1, -2), keepdim=True)) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(), kernel_epsilon=0.001,
normalize_data=True):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
# linear attention classes with softmax kernel
# non-causal linear attention
# By default Performer uses eps=0.0 here
def linear_attention(q, k, v, eps=0.0, need_weights=False):
k_cumsum = k.sum(dim=-2)
D_inv = 1. / (torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) + eps)
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
attn = None if not need_weights else torch.einsum('...te,...se,...s->...ts', q, k, D_inv)
return out, attn
# efficient causal linear attention, created by EPFL
def causal_linear_attention(q, k, v, eps=1e-6, need_weights=False):
from fast_transformers.causal_product import CausalDotProduct
autocast_enabled = torch.is_autocast_enabled()
is_half = isinstance(q, torch.cuda.HalfTensor)
assert not is_half or APEX_AVAILABLE, 'half tensors can only be used if nvidia apex is available'
cuda_context = null_context if not autocast_enabled else partial(autocast, enabled = False)
causal_dot_product_fn = amp.float_function(CausalDotProduct.apply) if is_half else CausalDotProduct.apply
k_cumsum = k.cumsum(dim=-2) + eps
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
with cuda_context():
if autocast_enabled:
q, k, v = map(lambda t: t.float(), (q, k, v))
out = causal_dot_product_fn(q, k, v)
if need_weights:
attn = torch.einsum('...te,...se,...s', q, k, D_inv)
causal_mask = torch.triu(torch.ones(q.shape[-2], k.shape[-2], dtype=torch.bool,
device=k.device), diagonal=1)
attn.masked_fill_(causal_mask, 0.0)
else:
attn = None
out = torch.einsum('...nd,...n->...nd', out, D_inv)
return out, None
# inefficient causal linear attention, without cuda code, for reader's reference
# not being used
def causal_linear_attention_noncuda(q, k, v, chunk_size = 128, eps = 1e-6, need_weights=False):
assert not need_weights, 'causal_linear_attention_noncuda does not support need_weights'
last_k_cumsum = 0
last_context_cumsum = 0
outs = []
for q, k, v in zip(*map(lambda t: t.chunk(chunk_size, dim = -2), (q, k, v))):
k_cumsum = last_k_cumsum + k.cumsum(dim=-2)
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q) + eps)
context = torch.einsum('...nd,...ne->...nde', k, v)
context_cumsum = last_context_cumsum + context.cumsum(dim=-3)
out = torch.einsum('...nde,...nd,...n->...ne', context_cumsum, q, D_inv)
last_k_cumsum = k_cumsum[:, :, -1:]
last_context_cumsum = context_cumsum[:, :, -1:]
outs.append(out)
return torch.cat(outs, dim = -2)
| fly-master | src/models/attention/performer_utils.py |
# Adapted from https://github.com/lucidrains/linformer/blob/master/linformer/linformer.py
# and https://github.com/tatp22/linformer-pytorch
import math
import torch
import torch.nn as nn
from einops import rearrange
class LinformerAttention(nn.Module):
"""
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, seq_len, k=256, share_kv=False,
softmax_temp=None, attention_dropout=0.0, device=None, dtype=None):
super().__init__()
self.seq_len = seq_len
self.share_kv = share_kv
self.proj_k = nn.Parameter(torch.empty(seq_len, k, device=device, dtype=dtype))
if not share_kv:
self.proj_v = nn.Parameter(torch.empty(seq_len, k, device=device, dtype=dtype))
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.reset_parameters()
def reset_parameters(self):
dim = self.proj_k.shape[-1]
# If we're using the random projection interpretation, then we should initialize as
# normal with std 1/sqrt(dim) (not 1/dim as in https://github.com/tatp22/linformer-pytorch/blob/master/linformer_pytorch/linformer_pytorch.py)
std = 1 / math.sqrt(dim)
nn.init.normal_(self.proj_k, mean=0.0, std=std)
if not self.share_kv:
nn.init.normal_(self.proj_v, mean=0.0, std=std)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
if attn_mask is not None:
raise NotImplementedError('Linformer does not support attn_mask')
# Extract some shapes and compute the temperature
B, T, H, E = query.shape
_, S_k, _, D = key.shape
_, S_v, _, D = value.shape
softmax_temp = self.softmax_temp or 1 / math.sqrt(E)
assert S_k <= self.seq_len and S_v <= self.seq_len, f'the sequence length of the key / value must be at most {self.seq_len}'
if key_padding_mask is not None and not key_padding_mask.all_ones:
key = key.masked_fill(rearrange(~key_padding_mask.bool_matrix, 'b s -> b s 1 1'), 0.0)
value = value.masked_fill(rearrange(~key_padding_mask.bool_matrix, 'b s -> b s 1 1'), 0.0)
# Scale the key instead of applying the softmax temperature to the
# dot products
key = torch.einsum('bshd,sk->bkhd', key, self.proj_k[:S_k] * softmax_temp)
value = torch.einsum('bshe,sk->bkhe', value,
self.proj_k[:S_v] if self.share_kv else self.proj_v[:S_v])
# Compute the unnormalized attention and apply the masks
QK = torch.einsum("bthe,bkhe->bhtk", query, key)
# Compute the attention and the weighted average
attn = torch.softmax(QK, dim=-1)
A = self.dropout(attn)
output = torch.einsum("bhtk,bkhd->bthd", A, value)
return output, attn if need_weights else None
| fly-master | src/models/attention/linformer_attention.py |
# This is a copy of https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/matmul.py
# with a one-line fix the bug https://github.com/openai/triton/issues/266
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
import torch
@triton.jit
def _kernel(
A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc, stride_hc,
stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta
):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K // TZ
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * stride_ka
inc_b = TK * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0)
##############
# MAIN API #
##############
class _matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes):
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current + d] = seg_max
if r < seg_min and not isempty:
segments[current + d - 1] += r
if r >= seg_min or isempty:
segments[current + d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _matmul.locks or \
size > _matmul.locks[dev].size(0):
_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, device):
# start_width = 128 // block
# [2021-09-23] TD: This seems to produce the wrong shape for certain cases
start_width = 1
layout = layout.type(torch.int32)
superblocks = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2], start_width)
luts, widths, packs = [], [], []
for size, nnz in superblocks:
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs):
# (A * B)^T = (B^T * A^T)
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
# Shape check
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
if a_inner != b_inner:
raise ValueError(f"Size of tensor A along the {_dim_to_name(a_dim)} dim ({a_inner}) must match size "
f"of tensor B along the {_dim_to_name(b_dim)} dim ({b_inner})")
if a_inner % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
batch_size = a.size(0)
a_outer = a.size(3 if trans_a else 2)
dtype = a.dtype
device = a.device
# create kernel
total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.zeros((batch_size, total_width, block, block), dtype=dtype, device=device)
for lut, width, pack in zip(luts, widths, packs):
num_lock = 1
# [2021-09-06] TD: This line is the fix for the bug where the result is wrong if
# block == 16 and the inner dimension is an odd multiple of 16.
# https://github.com/openai/triton/issues/266
TK = 16 if block == 16 and (a_inner // 16) % 2 == 1 else 32
meta = {'TM': block * pack, 'TN': block * pack, 'BLOCK': block, 'TK': TK, 'TZ': 1,
'SDD': True, 'DSD': False, 'DDS': False}
# create output
locks = _matmul.get_locks(2 * width * batch_size * num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
for off_width in range(0, width, max_width):
grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]
_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(0),
c.stride(2),
c.stride(3),
a_outer,
a_outer,
a_inner,
off_width,
lut,
locks,
num_lock,
num_warps=4,
**meta
)
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
# DENSE = SPARSE x DENSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _matmul.load_balance(sizes)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero(as_tuple=False)
else:
nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
idx = transform(nnz[:, 2] * block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx * block * block
wincs[1:] -= widx[:-1] * block * block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div - 1) * step
else:
wincs[:, 1:] = step * block
wincs[:, 0] -= (div - 1) * step * block
wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2 * div
segments *= div
# create header
width = column.size(0)
offsets += 6 * width
header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1,
'SDD': False, 'DSD': False, 'DDS': True}
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
AS2,
BS2,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta
)
return c
@staticmethod
def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS1 = block * spdims[2 if trans_a else 1]
BS0 = b.size(0)
BS1 = b.size(1)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1,
'SDD': False, 'DSD': True, 'DDS': False}
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
BS3,
AS1,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta
)
return c
fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}
@staticmethod
def forward(
ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs, da_lut, da_num_locks,
da_width, da_packs, db_lut, db_num_locks, db_width, db_packs
):
c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width, c_packs)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_packs = db_packs
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
da, db = None, None
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _matmul.fn[mode_da](
dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_num_locks, ctx.da_width,
ctx.da_packs
)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _matmul.fn[mode_db](
a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_num_locks, ctx.db_width,
ctx.db_packs
)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class matmul:
def make_lut(self, dtype, device):
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_sdd_lut(layout, block, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_a, device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_b, device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_sdd_lut(layout, block, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_a, device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_sdd_lut(layout, block, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,
da_lut, da_num_locks, da_width, da_packs,
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False):
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.block = block
self.mode = mode
self.trans_a = trans_a
self.trans_b = trans_b
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b, -2)
self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long() # Above code assumes the layout tensor is an integral type
self.layout = layout
self.spdims = layout.shape
def __call__(self, a, b):
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# If we don't check for invalid shapes, devices, & dtypes here, they will lead to undefined behavior
# and potential illegal memory accesses
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# execute
c = _matmul.apply(
a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut, c_num_locks, c_width,
c_packs, da_lut, da_num_locks, da_width, da_packs, db_lut, db_num_locks, db_width, db_packs
)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not a.is_cuda:
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError("Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
def _dim_to_name(x):
# assert x in (-1, -2)
return "last" if x == -1 else "second to last"
| fly-master | src/models/attention/blocksparse_matmul.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.