python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import json
import logging
import os
import time
from pathlib import Path
import torch
import torch.distributed as dist
import webdataset as wds
from lavis.common.dist_utils import (
download_cached_file,
get_rank,
get_world_size,
is_main_process,
main_process,
)
from lavis.common.registry import registry
from lavis.common.utils import is_url
from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split
from lavis.datasets.datasets.dataloader_utils import (
IterLoader,
MultiIterLoader,
PrefetchLoader,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.dataset import ChainDataset
@registry.register_runner("runner_base")
class RunnerBase:
"""
A runner class to train and evaluate a model given a task and datasets.
The runner uses pytorch distributed data parallel by default. Future release
will support other distributed frameworks.
"""
def __init__(self, cfg, task, model, datasets, job_id):
self.config = cfg
self.job_id = job_id
self.task = task
self.datasets = datasets
self._model = model
self._wrapped_model = None
self._device = None
self._optimizer = None
self._scaler = None
self._dataloaders = None
self._lr_sched = None
self.start_epoch = 0
# self.setup_seeds()
self.setup_output_dir()
@property
def device(self):
if self._device is None:
self._device = torch.device(self.config.run_cfg.device)
return self._device
@property
def use_distributed(self):
return self.config.run_cfg.distributed
@property
def model(self):
"""
A property to get the DDP-wrapped model on the device.
"""
# move model to device
if self._model.device != self.device:
self._model = self._model.to(self.device)
# distributed training wrapper
if self.use_distributed:
if self._wrapped_model is None:
self._wrapped_model = DDP(
self._model, device_ids=[self.config.run_cfg.gpu]
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
# TODO make optimizer class and configurations
if self._optimizer is None:
num_parameters = 0
p_wd, p_non_wd = [], []
for n, p in self.model.named_parameters():
if not p.requires_grad:
continue # frozen weights
if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n:
p_non_wd.append(p)
else:
p_wd.append(p)
num_parameters += p.data.nelement()
logging.info("number of trainable parameters: %d" % num_parameters)
optim_params = [
{
"params": p_wd,
"weight_decay": float(self.config.run_cfg.weight_decay),
},
{"params": p_non_wd, "weight_decay": 0},
]
beta2 = self.config.run_cfg.get("beta2", 0.999)
self._optimizer = torch.optim.AdamW(
optim_params,
lr=float(self.config.run_cfg.init_lr),
weight_decay=float(self.config.run_cfg.weight_decay),
betas=(0.9, beta2),
)
return self._optimizer
@property
def scaler(self):
amp = self.config.run_cfg.get("amp", False)
if amp:
if self._scaler is None:
self._scaler = torch.cuda.amp.GradScaler()
return self._scaler
@property
def lr_scheduler(self):
"""
A property to get and create learning rate scheduler by split just in need.
"""
if self._lr_sched is None:
lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)
# max_epoch = self.config.run_cfg.max_epoch
max_epoch = self.max_epoch
# min_lr = self.config.run_cfg.min_lr
min_lr = self.min_lr
# init_lr = self.config.run_cfg.init_lr
init_lr = self.init_lr
# optional parameters
decay_rate = self.config.run_cfg.get("lr_decay_rate", None)
warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1)
warmup_steps = self.config.run_cfg.get("warmup_steps", 0)
self._lr_sched = lr_sched_cls(
optimizer=self.optimizer,
max_epoch=max_epoch,
min_lr=min_lr,
init_lr=init_lr,
decay_rate=decay_rate,
warmup_start_lr=warmup_start_lr,
warmup_steps=warmup_steps,
)
return self._lr_sched
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = concat_datasets(datasets)
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
if hasattr(self.datasets[split_name], "__len__"):
# a single map-style dataset
num_records = len(self.datasets[split_name])
else:
# a single wds.DataPipeline
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
dataset_ratios=dataset_ratios,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
@property
def cuda_enabled(self):
return self.device.type == "cuda"
@property
def max_epoch(self):
return int(self.config.run_cfg.max_epoch)
@property
def log_freq(self):
log_freq = self.config.run_cfg.get("log_freq", 50)
return int(log_freq)
@property
def init_lr(self):
return float(self.config.run_cfg.init_lr)
@property
def min_lr(self):
return float(self.config.run_cfg.min_lr)
@property
def accum_grad_iters(self):
return int(self.config.run_cfg.get("accum_grad_iters", 1))
@property
def valid_splits(self):
valid_splits = self.config.run_cfg.get("valid_splits", [])
if len(valid_splits) == 0:
logging.info("No validation splits found.")
return valid_splits
@property
def test_splits(self):
test_splits = self.config.run_cfg.get("test_splits", [])
return test_splits
@property
def train_splits(self):
train_splits = self.config.run_cfg.get("train_splits", [])
if len(train_splits) == 0:
logging.info("Empty train splits.")
return train_splits
@property
def evaluate_only(self):
"""
Set to True to skip training.
"""
return self.config.run_cfg.evaluate
@property
def use_dist_eval_sampler(self):
return self.config.run_cfg.get("use_dist_eval_sampler", True)
@property
def resume_ckpt_path(self):
return self.config.run_cfg.get("resume_ckpt_path", None)
@property
def train_loader(self):
train_dataloader = self.dataloaders["train"]
return train_dataloader
def setup_output_dir(self):
lib_root = Path(registry.get_path("library_root"))
output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id
result_dir = output_dir / "result"
output_dir.mkdir(parents=True, exist_ok=True)
result_dir.mkdir(parents=True, exist_ok=True)
registry.register_path("result_dir", str(result_dir))
registry.register_path("output_dir", str(output_dir))
self.result_dir = result_dir
self.output_dir = output_dir
def train(self):
start_time = time.time()
best_agg_metric = 0
best_epoch = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for cur_epoch in range(self.start_epoch, self.max_epoch):
# training phase
if not self.evaluate_only:
logging.info("Start training")
train_stats = self.train_epoch(cur_epoch)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_epoch, best_agg_metric = cur_epoch, agg_metrics
self._save_checkpoint(cur_epoch, is_best=True)
val_log.update({"best_epoch": best_epoch})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each epoch.
if not self.evaluate_only:
self._save_checkpoint(cur_epoch, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch
self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def evaluate(self, cur_epoch="best", skip_reload=False):
test_logs = dict()
if len(self.test_splits) > 0:
for split_name in self.test_splits:
test_logs[split_name] = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload
)
return test_logs
def train_epoch(self, epoch):
# train
self.model.train()
return self.task.train_epoch(
epoch=epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@torch.no_grad()
def eval_epoch(self, split_name, cur_epoch, skip_reload=False):
"""
Evaluate the model on a given split.
Args:
split_name (str): name of the split to evaluate on.
cur_epoch (int): current epoch.
skip_reload_best (bool): whether to skip reloading the best checkpoint.
During training, we will reload the best checkpoint for validation.
During testing, we will use provided weights and skip reloading the best checkpoint .
"""
data_loader = self.dataloaders.get(split_name, None)
assert data_loader, "data_loader for split {} is None.".format(split_name)
# TODO In validation, you need to compute loss as well as metrics
# TODO consider moving to model.before_evaluation()
model = self.unwrap_dist_model(self.model)
if not skip_reload and cur_epoch == "best":
model = self._reload_best_model(model)
model.eval()
self.task.before_evaluation(
model=model,
dataset=self.datasets[split_name],
)
results = self.task.evaluation(model, data_loader)
if results is not None:
return self.task.after_evaluation(
val_result=results,
split_name=split_name,
epoch=cur_epoch,
)
def unwrap_dist_model(self, model):
if self.use_distributed:
return model.module
else:
return model
def create_loaders(
self,
datasets,
num_workers,
batch_sizes,
is_trains,
collate_fns,
dataset_ratios=None,
):
"""
Create dataloaders for training and validation.
"""
def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):
# create a single dataloader for each split
if isinstance(dataset, ChainDataset) or isinstance(
dataset, wds.DataPipeline
):
# wds.WebdDataset instance are chained together
# webdataset.DataPipeline has its own sampler and collate_fn
loader = iter(
DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
)
)
else:
# map-style dataset are concatenated together
# setup distributed sampler
if self.use_distributed:
sampler = DistributedSampler(
dataset,
shuffle=is_train,
num_replicas=get_world_size(),
rank=get_rank(),
)
if not self.use_dist_eval_sampler:
# e.g. retrieval evaluation
sampler = sampler if is_train else None
else:
sampler = None
loader = DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
sampler=sampler,
shuffle=sampler is None and is_train,
collate_fn=collate_fn,
drop_last=True if is_train else False,
)
loader = PrefetchLoader(loader)
if is_train:
loader = IterLoader(loader, use_distributed=self.use_distributed)
return loader
loaders = []
for dataset, bsz, is_train, collate_fn in zip(
datasets, batch_sizes, is_trains, collate_fns
):
if isinstance(dataset, list) or isinstance(dataset, tuple):
loader = MultiIterLoader(
loaders=[
_create_loader(d, num_workers, bsz, is_train, collate_fn[i])
for i, d in enumerate(dataset)
],
ratios=dataset_ratios,
)
else:
loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)
loaders.append(loader)
return loaders
@main_process
def _save_checkpoint(self, cur_epoch, is_best=False):
"""
Save the checkpoint at the current epoch.
"""
model_no_ddp = self.unwrap_dist_model(self.model)
param_grad_dic = {
k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()
}
state_dict = model_no_ddp.state_dict()
for k in list(state_dict.keys()):
if k in param_grad_dic.keys() and not param_grad_dic[k]:
# delete parameters that do not require gradient
del state_dict[k]
save_obj = {
"model": state_dict,
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"epoch": cur_epoch,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_epoch),
)
logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to))
torch.save(save_obj, save_to)
def _reload_best_model(self, model):
"""
Load the best checkpoint for evaluation.
"""
checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth")
logging.info("Loading checkpoint from {}.".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location="cpu")
try:
model.load_state_dict(checkpoint["model"])
except RuntimeError as e:
logging.warning(
"""
Key mismatch when loading checkpoint. This is expected if only part of the model is saved.
Trying to load the model with strict=False.
"""
)
model.load_state_dict(checkpoint["model"], strict=False)
return model
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_epoch = checkpoint["epoch"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@main_process
def log_stats(self, stats, split_name):
if isinstance(stats, dict):
log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}}
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(log_stats) + "\n")
elif isinstance(stats, list):
pass
@main_process
def log_config(self):
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
| EXA-1-master | exa/libraries/LAVIS/lavis/runners/runner_base.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import logging
import os
import time
import torch
import torch.distributed as dist
import webdataset as wds
from lavis.common.dist_utils import download_cached_file, is_main_process, main_process
from lavis.common.registry import registry
from lavis.common.utils import is_url
from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split
from lavis.runners.runner_base import RunnerBase
from torch.utils.data.dataset import ChainDataset
@registry.register_runner("runner_iter")
class RunnerIter(RunnerBase):
"""
Run training based on the number of iterations. This is common when
the training dataset size is large. Underhood logic is similar to
epoch-based training by considering every #iters_per_inner_epoch as an
inner epoch.
In iter-based runner, after every #iters_per_inner_epoch steps, we
1) do a validation epoch;
2) schedule the learning rate;
3) save the checkpoint.
We refer every #iters_per_inner_epoch steps as an inner epoch.
"""
def __init__(self, cfg, task, model, datasets, job_id):
super().__init__(cfg, task, model, datasets, job_id)
self.start_iters = 0
self.max_iters = int(self.config.run_cfg.get("max_iters", -1))
assert self.max_iters > 0, "max_iters must be greater than 0."
self.iters_per_inner_epoch = int(
self.config.run_cfg.get("iters_per_inner_epoch", -1)
)
assert (
self.iters_per_inner_epoch > 0
), "iters_per_inner_epoch must be greater than 0."
@property
def max_epoch(self):
return int(self.max_iters / self.iters_per_inner_epoch)
@property
def cur_epoch(self):
try:
return self.train_loader.epoch
except AttributeError:
# pipeline data (e.g. LAION) is streaming, have no concept of epoch
return 0
def _progress(self, cur_iters):
return "{}_iters={}".format(self.cur_epoch, cur_iters)
def train(self):
start_time = time.time()
best_agg_metric = 0
best_iters = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for start_iters in range(
self.start_iters, self.max_iters, self.iters_per_inner_epoch
):
end_iters = start_iters + self.iters_per_inner_epoch
# training phase
if not self.evaluate_only:
logging.info(
"Start training, max_iters={}, in total {} inner epochs.".format(
self.max_iters, int(self.max_iters / self.iters_per_inner_epoch)
)
)
train_stats = self.train_iters(self.cur_epoch, start_iters)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=self._progress(end_iters)
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = concat_datasets(datasets)
else:
# create multi-loader with the provided ratios, without concatenating or chaining
missing_keys = [k for k in dataset_ratios if k not in self.datasets]
if len(missing_keys) > 0:
raise ValueError(
"Datasets with the following split names are not found: {}".format(
missing_keys
)
)
unexpected_keys = [k for k in self.datasets if k not in dataset_ratios]
if len(unexpected_keys) > 0:
raise ValueError(
"Datasets with the following split names are not expected: {}".format(
unexpected_keys
)
)
dataset_ratios = [float(dataset_ratios[k]) for k in self.datasets]
self.datasets = reorg_datasets_by_split(self.datasets)
# to keep the same structure as return value of concat_datasets
self.datasets = {
k: v[0] if len(v) == 1 else v for k, v in datasets.items()
}
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
try:
# a single map-style dataset
num_records = len(self.datasets[split_name])
except TypeError:
# a single wds.DataPipeline or ChainDataset
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
dataset_ratios=dataset_ratios,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
| EXA-1-master | exa/libraries/LAVIS/lavis/runners/runner_iter.py |
from setuptools import setup, find_packages
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '1.11.0',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/x-transformers',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'torch>=1.6',
'einops>=0.6.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| EXA-1-master | exa/libraries/x-transformers/setup.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
from x_transformers.autoregressive_wrapper import top_p, top_k, eval_decorator
# helper functions
def exists(val):
return val is not None
def divisible_by(numer, denom):
return (numer % denom) == 0
# xl autoregressive wrapper class
class XLAutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
mems = None,
**kwargs
):
device, max_seq_len = start_tokens.device, self.max_seq_len
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
*all_leading_tokens, _ = start_tokens.split(max_seq_len, dim = -1)
# catch the memory up to the current segment
for leading_tokens in all_leading_tokens:
_, mems = self.net(
leading_tokens,
mems = mems,
return_mems = True,
**kwargs
)
# now start sampling from the current segment
curr_pos = len(all_leading_tokens) * max_seq_len
curr_mems = mems
out = start_tokens
for _ in range(seq_len):
curr_segment_len = out.shape[-1]
is_last_segment_tokens = divisible_by(curr_segment_len, max_seq_len)
x = out[:, curr_pos:]
logits, mems = self.net(
x,
mems = curr_mems,
return_mems = True,
**kwargs
)
logits = logits[:, -1]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
if is_last_segment_tokens:
curr_pos = curr_segment_len
curr_mems = mems
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(
self,
x,
mems = None,
**kwargs
):
ignore_index, max_seq_len = self.ignore_index, self.max_seq_len
x, labels = x[:, :-1], x[:, 1:]
seq_len = x.shape[1]
# prepare chunks
split_x = x.split(max_seq_len, dim = -1)
split_labels = labels.split(max_seq_len, dim = -1)
loss_weights = tuple(map(lambda t: t.shape[-1] / seq_len, split_x))
# go through each chunk and derive weighted losses
total_loss = 0.
for chunk, chunk_labels, loss_weight in zip(split_x, split_labels, loss_weights):
logits, mems = self.net(
chunk,
mems = mems,
return_mems = True,
**kwargs
)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
chunk_labels,
ignore_index = ignore_index
)
total_loss = total_loss + loss * loss_weight
return total_loss
| EXA-1-master | exa/libraries/x-transformers/x_transformers/xl_autoregressive_wrapper.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
device = start_tokens.device
num_dims = start_tokens.ndim
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
return loss
| EXA-1-master | exa/libraries/x-transformers/x_transformers/autoregressive_wrapper.py |
import math
from random import random
import torch
from torch import nn, einsum
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from collections import namedtuple
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
# constants
DEFAULT_DIM_HEAD = 64
Intermediates = namedtuple('Intermediates', [
'qk_similarities',
'pre_softmax_attn',
'post_softmax_attn'
])
LayerIntermediates = namedtuple('Intermediates', [
'hiddens',
'attn_intermediates'
])
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, qk_dots):
i, j, device = *qk_dots.shape[-2:], qk_dots.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return qk_dots + (bias * self.scale)
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
def forward(self, qk_dots):
n, device, dtype = qk_dots.shape[-1], qk_dots.device, qk_dots.dtype
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device, dtype = dtype)
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return qk_dots + bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, **kwargs):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, qk_dots):
h, i, j, device = *qk_dots.shape[-3:], qk_dots.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return qk_dots + self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent=False)
return qk_dots + self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads):
super().__init__(heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, qk_dots):
h, i, j, device = *qk_dots.shape[-3:], qk_dots.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim = -2)
if exists(self.bias) and self.bias.shape[-1] >= j:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent=False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return qk_dots + bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512
):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(self, dim_in, dim_out, activation):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate)
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
) if not glu else GLU(dim, inner_dim, activation)
self.ff = nn.Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False # https://arxiv.org/abs/2208.06061
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# dropout
self.dropout = nn.Dropout(dropout)
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# talking heads
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# attention softmax function
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, talking_heads, head_scale, scale, device, has_context = *x.shape, self.heads, self.talking_heads, self.head_scale, self.scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = default(context_mask, mask)
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
kv_einsum_eq = 'b h j d' if not self.one_kv_head else 'b j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
mask_value = max_neg_value(dots)
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(rel_pos):
dots = rel_pos(dots)
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
dots = dots.masked_fill(~input_mask, mask_value)
del input_mask
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
dots = dots.masked_fill(~attn_mask, mask_value)
if exists(self.max_attend_past):
i, j = dots.shape[-2:]
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
dots = dots.masked_fill(max_attend_past_mask, mask_value)
del max_attend_past_mask
if self.causal:
i, j = dots.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
dots = dots.masked_fill(causal_mask, mask_value)
del causal_mask
if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
top, _ = dots.topk(self.sparse_topk, dim = -1)
vk = rearrange(top[..., -1], '... -> ... 1')
sparse_topk_mask = dots < vk
dots = dots.masked_fill(sparse_topk_mask, mask_value)
del sparse_topk_mask
dtype = dots.dtype
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.dropout(attn)
if talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
if exists(r):
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
out = out * r + out
if head_scale:
out = out * self.head_scale_params
out = rearrange(out, 'b h n d -> b n (h d)')
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
alibi_learned = False,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_xpos_scale_base = 512,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
self.rel_pos = None
if rel_pos_bias:
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned else AlibiPositionalBias
self.rel_pos = alibi_pos_klass(heads = alibi_num_heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
self.cross_attend = cross_attend
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
norm_class = RMSNorm if use_rmsnorm else norm_class
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm and not is_last_layer else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
residual = x
pre_branch_norm, post_branch_norm, post_main_norm = norm
if exists(pre_branch_norm):
x = pre_branch_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
dropout = 0.,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
x = self.norm(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0.,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1. # GLM-130B and Cogview successfully used this, set at 0.1
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
x = self.norm(x)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class ContinuousTransformerWrapper(nn.Module):
def __init__(
self,
*,
max_seq_len,
attn_layers,
dim_in = None,
dim_out = None,
emb_dim = None,
post_emb_norm = False,
emb_dropout = 0.,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
self.max_seq_len = max_seq_len
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
def forward(
self,
x,
return_embeddings = False,
return_intermediates = False,
mask = None,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
**kwargs
):
x = self.project_in(x)
x = x + self.pos_emb(x, pos = pos)
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
_, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
x = self.emb_dropout(x)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
x = self.norm(x)
out = self.project_out(x) if not return_embeddings else x
if return_intermediates:
return out, intermediates
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class XTransformer(nn.Module):
def __init__(
self,
*,
dim,
tie_token_emb = False,
ignore_index = -100,
pad_value = 0,
deepnorm = False,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
if deepnorm:
enc_kwargs['scale_residual'] = True
dec_kwargs['scale_residual'] = True
enc_depth = enc_kwargs['depth']
dec_depth = dec_kwargs['depth']
enc_kwargs['scale_residual_constant'] = 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625
dec_kwargs['scale_residual_constant'] = (3 * dec_depth) ** 0.25
self.encoder = TransformerWrapper(
**enc_transformer_kwargs,
attn_layers = Encoder(dim = dim, **enc_kwargs)
)
self.decoder = TransformerWrapper(
**dec_transformer_kwargs,
attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
)
if deepnorm:
deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625)
deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25)
if tie_token_emb:
self.decoder.token_emb = self.encoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
@torch.no_grad()
def generate(self, seq_in, seq_out_start, seq_len, mask = None, attn_mask = None, **kwargs):
encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
if exists(src_prepend_embeds) and exists(mask):
mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
if self.training and self.cross_attn_tokens_dropout > 0:
enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
out = self.decoder(tgt, context = enc, context_mask = mask)
return out
| EXA-1-master | exa/libraries/x-transformers/x_transformers/x_transformers.py |
from x_transformers.x_transformers import XTransformer, Encoder, Decoder, CrossAttender, Attention, TransformerWrapper, ViTransformerWrapper, ContinuousTransformerWrapper
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
from x_transformers.nonautoregressive_wrapper import NonAutoregressiveWrapper
from x_transformers.continuous_autoregressive_wrapper import ContinuousAutoregressiveWrapper
from x_transformers.xl_autoregressive_wrapper import XLAutoregressiveWrapper
| EXA-1-master | exa/libraries/x-transformers/x_transformers/__init__.py |
import torch
from torch import nn
import torch.nn.functional as F
def exists(val):
return val is not None
class ContinuousAutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, **kwargs):
device = start_tokens.device
was_training = self.net.training
num_dims = len(start_tokens.shape)
assert num_dims >= 2, 'number of dimensions of your start tokens must be greater or equal to 2'
if num_dims == 2:
start_tokens = start_tokens[None, :]
b, t, _, device = *start_tokens.shape, start_tokens.device
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
last = self.net(x, **kwargs)[:, -1:]
out = torch.cat((out, last), dim = -2)
out = out[:, t:]
if num_dims == 2:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, **kwargs):
inp, target = x[:, :-1], x[:, 1:]
mask = kwargs.get('mask', None)
if exists(mask) and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs['mask'] = mask
out = self.net(inp, **kwargs)
loss = F.mse_loss(out, target, reduction = 'none')
if exists(mask):
loss = loss[mask]
return loss.mean()
| EXA-1-master | exa/libraries/x-transformers/x_transformers/continuous_autoregressive_wrapper.py |
import math
from random import random
from contextlib import nullcontext
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange, repeat, pack, unpack
from x_transformers.x_transformers import TransformerWrapper
from typing import Optional
# constants
Losses = namedtuple('Losses', ['loss', 'generator_loss', 'critic_loss'])
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# sampling helpers
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim = -1)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(2, ind, val)
return probs
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
# prob helpers
def sample_prob(prob):
return random() < prob
def coin_flip():
return sample_prob(0.5)
# tensor helpers
def get_mask_subset_prob(mask, prob, min_mask = 0):
batch, seq, device = *mask.shape, mask.device
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
# schedules
def linear_schedule(t):
return 1 - t
def cosine_schedule(t):
""" https://arxiv.org/abs/2202.04200 """
return torch.cos(t * math.pi / 2)
# self token critic
# inspired by Nijkamp et al. - https://aclanthology.org/2021.naacl-main.409/
class SelfCritic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
dim = net.attn_layers.dim
self.to_logits = nn.Linear(dim, 1)
def forward(self, x):
embed = self.net(x, return_embeddings = True)
return self.to_logits(embed)
class NonAutoregressiveWrapper(nn.Module):
"""
https://arxiv.org/abs/1904.09324
https://arxiv.org/abs/2202.04200
"""
def __init__(
self,
net,
*,
mask_id,
steps = 18,
self_cond = False,
self_cond_train_prob = 0.75,
no_replace_prob = 0.15, # which percentage of the tokens masked will stay the same, done in original MLM paper
random_token_prob = 0.1, # which percentage of tokens to be replaced with random token, done in original MLM paper
schedule = 'linear',
can_mask_prev_unmasked = False, # when unmasking, whether it can remask previously unmasked
token_critic: Optional[TransformerWrapper] = None,
self_token_critic = False,
critic_loss_weight = 1.
):
super().__init__()
assert not (self_token_critic and exists(token_critic))
self.net = net
dim = net.emb_dim
self.dim = dim
self.num_tokens = net.num_tokens
self.mask_id = mask_id
# afaict, maskgit paper did not do this
# but may help for self conditioning, as used successfully in original BERT
self.no_replace_prob = no_replace_prob
self.random_token_prob = random_token_prob
self.max_seq_len = net.max_seq_len
self.steps = steps
if callable(schedule):
self.schedule_fn = schedule
if schedule == 'linear':
self.schedule_fn = linear_schedule
elif schedule == 'cosine':
self.schedule_fn = cosine_schedule
else:
raise ValueError(f'invalid schedule {schedule}')
self.can_mask_prev_unmasked = can_mask_prev_unmasked
# self conditioning
self.self_cond = self_cond
if self_cond:
self.null_embed = nn.Parameter(torch.randn(dim))
self.to_self_cond = nn.Linear(dim, dim, bias = False) if self_cond else None
self.self_cond_train_prob = self_cond_train_prob
# token critic
self.token_critic = token_critic
if self_token_critic:
self.token_critic = SelfCritic(net)
self.critic_loss_weight = critic_loss_weight
@torch.no_grad()
def generate(
self,
batch_size = None,
start_temperature = 1.,
filter_thres = 0.7,
noise_level_scale = 1.,
**kwargs
):
sample_one = not exists(batch_size)
batch_size = default(batch_size, 1)
device = next(self.net.parameters()).device
was_training = self.training
self.eval()
times = torch.linspace(0., 1., self.steps + 1)
# sequence starts off as all masked
shape = (batch_size, self.max_seq_len)
seq = torch.full(shape, self.mask_id, device = device)
mask = torch.full(shape, True, device = device)
# slowly demask
all_mask_num_tokens = (self.schedule_fn(times[1:]) * self.max_seq_len).long()
# self conditioning
has_self_cond = self.self_cond
last_embed = self.null_embed if has_self_cond else None
for mask_num_tokens, steps_until_x0 in zip(all_mask_num_tokens.tolist(), reversed(range(self.steps))):
self_cond = self.to_self_cond(last_embed) if has_self_cond else None
logits, embeds = self.net(
seq,
sum_embeds = self_cond,
return_logits_and_embeddings = True,
**kwargs
)
if has_self_cond:
last_embed = embeds
if exists(filter_thres):
logits = top_k(logits, filter_thres)
annealing_scale = steps_until_x0 / self.steps
temperature = start_temperature * annealing_scale
probs = (logits / max(temperature, 1e-3)).softmax(dim = -1)
sampled_ids = gumbel_sample(logits, temperature = max(temperature, 1e-3))
seq = torch.where(mask, sampled_ids, seq)
if exists(self.token_critic):
scores = self.token_critic(seq)
scores = rearrange(scores, 'b n 1 -> b n')
scores = scores + noise_level_scale * gumbel_noise(scores) * annealing_scale
else:
scores = 1 - logits.softmax(dim = -1)
scores = scores.gather(2, rearrange(sampled_ids, 'b n -> b n 1'))
scores = rearrange(scores, 'b n 1 -> b n')
if mask_num_tokens == 0:
pass
if not self.can_mask_prev_unmasked:
scores = scores.masked_fill(~mask, -torch.finfo(scores.dtype).max)
mask_indices = scores.topk(mask_num_tokens, dim = -1).indices
mask = torch.zeros_like(scores, dtype = torch.bool).scatter(1, mask_indices, True)
seq = seq.masked_fill(mask, self.mask_id)
self.train(was_training)
if sample_one:
seq = rearrange(seq, '1 n -> n')
return seq
def forward(
self,
x,
only_train_generator = False,
only_train_critic = False,
generator_sample_temperature = None,
**kwargs
):
b, n, device = *x.shape, x.device
assert n == self.max_seq_len
orig_seq = x.clone()
rand_times = torch.empty(b, device = device).uniform_(0, 1)
batched_randperm = torch.rand((b, n), device = device).argsort(dim = -1).float()
rand_probs = self.schedule_fn(rand_times)
num_tokens_mask = (rand_probs * n).clamp(min = 1.)
mask = batched_randperm < rearrange(num_tokens_mask, 'b -> b 1')
# to ensure all tokens produce embeddings, instead of just the ones with [mask] input, as done in seminal BERT MLM paper
# potentially needed for self-conditioning (on embedding) to work well
replace_mask_id_mask = mask.clone()
frac_seq_left = 1.
if self.no_replace_prob > 0. and coin_flip():
frac_seq_left -= self.no_replace_prob
no_replace_prob_mask = get_mask_subset_prob(mask, self.no_replace_prob)
replace_mask_id_mask &= ~no_replace_prob_mask
if self.random_token_prob > 0. and coin_flip():
random_token_prob_mask = get_mask_subset_prob(replace_mask_id_mask, self.random_token_prob * frac_seq_left)
random_tokens = torch.randint(0, self.num_tokens, (b, n), device = device)
x = torch.where(random_token_prob_mask, random_tokens, x)
replace_mask_id_mask &= ~random_token_prob_mask
masked = torch.where(replace_mask_id_mask, self.mask_id, x)
# self conditioning
if self.self_cond:
self_cond = self.null_embed
if sample_prob(self.self_cond_train_prob):
with torch.no_grad():
self_cond = self.net(masked, return_embeddings = True, **kwargs).detach()
kwargs.update(sum_embeds = self.to_self_cond(self_cond))
# logits
context = torch.no_grad if only_train_critic else nullcontext
with context():
logits = self.net(masked, **kwargs)
# cross entropy loss
loss = F.cross_entropy(
logits[mask],
orig_seq[mask]
)
if not exists(self.token_critic) or only_train_generator:
return Losses(loss, loss, None)
sampled_ids = gumbel_sample(logits, temperature = default(generator_sample_temperature, random()))
generated = torch.where(mask, sampled_ids, orig_seq)
critic_logits = self.token_critic(generated)
critic_labels = (sampled_ids != orig_seq).float()
critic_loss = F.binary_cross_entropy_with_logits(
rearrange(critic_logits, '... 1 -> ...'),
critic_labels
)
# determine losses to be returned based on what researcher wants to train
if only_train_critic:
total_loss = critic_loss
loss = None
else:
total_loss = loss + critic_loss * self.critic_loss_weight
return Losses(total_loss, loss, critic_loss)
| EXA-1-master | exa/libraries/x-transformers/x_transformers/nonautoregressive_wrapper.py |
import tqdm
import torch
import torch.optim as optim
from x_transformers import XTransformer
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 3e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, src.shape[1]).bool().cuda()
yield (src, tgt, src_mask)
# instantiate model
model = XTransformer(
dim = 512,
tie_token_emb = True,
return_tgt_loss = True,
enc_num_tokens=NUM_TOKENS,
enc_depth = 3,
enc_heads = 8,
enc_max_seq_len = ENC_SEQ_LEN,
dec_num_tokens = NUM_TOKENS,
dec_depth = 3,
dec_heads = 8,
dec_max_seq_len = DEC_SEQ_LEN
).cuda()
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
src, tgt, src_mask = next(cycle())
loss = model(src, tgt, mask=src_mask)
loss.backward()
print(f'{i}: {loss.item()}')
optim.step()
optim.zero_grad()
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
src, _, src_mask = next(cycle())
src, src_mask = src[:1], src_mask[:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
sample = model.generate(src, start_tokens, ENC_SEQ_LEN, mask = src_mask)
incorrects = (src != sample).abs().sum()
print(f"input: ", src)
print(f"predicted output: ", sample)
print(f"incorrects: {incorrects}")
| EXA-1-master | exa/libraries/x-transformers/examples/toy_tasks/enc_dec_copy.py |
from x_transformers import (
TransformerWrapper,
Encoder,
NonAutoregressiveWrapper
)
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e8)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 250
SEQ_LEN = 256
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
model = TransformerWrapper(
num_tokens = 256 + 1,
logits_dim = 256,
max_seq_len = SEQ_LEN,
attn_layers = Encoder(
dim = 512,
depth = 8,
heads = 8,
dynamic_pos_bias = True
)
)
model = NonAutoregressiveWrapper(
model,
steps = 18,
schedule = 'cosine',
mask_id = 256, # mask id is last token, which is why num_tokens above has a +1 (special token)
self_token_critic = True
)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader)).loss
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
val_data = next(val_loader)
loss = model(val_data).loss
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
sample = model.generate()
output_str = decode_tokens(sample)
print(output_str)
| EXA-1-master | exa/libraries/x-transformers/examples/enwik8_simple/train_nar.py |
from x_transformers import TransformerWrapper, Decoder
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TransformerWrapper(
num_tokens = 256,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 512, depth = 6, heads = 8)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE, drop_last = True))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| EXA-1-master | exa/libraries/x-transformers/examples/enwik8_simple/train.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import logging
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
from utils import AverageMeter, accuracy, transform_time
from utils import load_pretrained_model, save_checkpoint
from utils import create_exp_dir, count_parameters_in_MB
from network import define_tsnet
from kd_losses import *
parser = argparse.ArgumentParser(description='train boundary supporting sample (bss)')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
parser.add_argument('--s_init', type=str, required=True, help='initial parameters of student model')
parser.add_argument('--t_model', type=str, required=True, help='path name of teacher model')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=50, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# others
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--note', type=str, default='try', help='note for this run')
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset') # cifar10/cifar100
parser.add_argument('--t_name', type=str, required=True, help='name of teacher') # resnet20/resnet110
parser.add_argument('--s_name', type=str, required=True, help='name of student') # resnet20/resnet110
# hyperparameter
parser.add_argument('--lambda_kd', type=float, default=2.0, help='trade-off parameter for kd loss')
parser.add_argument('--T', type=float, default=3.0, help='temperature for bss')
parser.add_argument('--attack_size', type=int, default=32, help='num of samples for bss attack')
args, unparsed = parser.parse_known_args()
args.save_root = os.path.join(args.save_root, args.note)
create_exp_dir(args.save_root)
log_format = '%(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
fh = logging.FileHandler(os.path.join(args.save_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
logging.info('----------- Network Initialization --------------')
snet = define_tsnet(name=args.s_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.s_init)
load_pretrained_model(snet, checkpoint['net'])
logging.info('Student: %s', snet)
logging.info('Student param size = %fMB', count_parameters_in_MB(snet))
tnet = define_tsnet(name=args.t_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.t_model)
load_pretrained_model(tnet, checkpoint['net'])
tnet.eval()
for param in tnet.parameters():
param.requires_grad = False
logging.info('Teacher: %s', tnet)
logging.info('Teacher param size = %fMB', count_parameters_in_MB(tnet))
logging.info('-----------------------------------------------')
# initialize optimizer
optimizer = torch.optim.SGD(snet.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# define attacker
attacker = BSSAttacker(step_alpha=0.3, num_steps=10, eps=1e-4)
# define loss functions
criterionKD = BSS(args.T)
if args.cuda:
criterionCls = torch.nn.CrossEntropyLoss().cuda()
else:
criterionCls = torch.nn.CrossEntropyLoss()
# define transforms
if args.data_name == 'cifar10':
dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('Invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# warp nets and criterions for train and test
nets = {'snet':snet, 'tnet':tnet}
criterions = {'criterionCls':criterionCls, 'criterionKD':criterionKD}
best_top1 = 0
best_top5 = 0
for epoch in range(1, args.epochs+1):
adjust_lr(optimizer, epoch)
# train one epoch
epoch_start_time = time.time()
train(train_loader, nets, optimizer, criterions, attacker, epoch)
# evaluate on testing set
logging.info('Testing the models......')
test_top1, test_top5 = test(test_loader, nets, criterions, epoch)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
# save model
is_best = False
if test_top1 > best_top1:
best_top1 = test_top1
best_top5 = test_top5
is_best = True
logging.info('Saving models......')
save_checkpoint({
'epoch': epoch,
'snet': snet.state_dict(),
'tnet': tnet.state_dict(),
'prec@1': test_top1,
'prec@5': test_top5,
}, is_best, args.save_root)
def train(train_loader, nets, optimizer, criterions, attacker, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
# warmup for the first 10 epoch
lambda_kd = 0 if epoch <= 10 else max(args.lambda_kd * (1 - 5 / 4 * (epoch-1) / args.epochs), 0)
snet.train()
end = time.time()
for i, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
_, _, _, _, _, out_s = snet(img)
_, _, _, _, _, out_t = tnet(img)
cls_loss = criterionCls(out_s, target)
kd_loss = None
if lambda_kd > 0:
condition1 = target == out_s.sort(dim=1, descending=True)[1][:,0]
condition2 = target == out_t.sort(dim=1, descending=True)[1][:,0]
attack_flag = condition1 & condition2
if attack_flag.sum():
# base sample selection
attack_idx = attack_flag.nonzero().view(-1)
if attack_idx.shape[0] > args.attack_size:
diff = (F.softmax(out_t[attack_idx,:], 1) - F.softmax(out_s[attack_idx,:], 1)) ** 2
score = diff.sum(dim=1) - diff.gather(1, target[attack_idx].unsqueeze(1)).squeeze()
attack_idx = attack_idx[score.sort(descending=True)[1][:args.attack_size]]
# attack class selection
attack_class = out_t.sort(dim=1, descending=True)[1][:,1][attack_idx]
class_score, class_idx = F.softmax(out_t, 1)[attack_idx, :].sort(dim=1, descending=True)
class_score = class_score[:, 1:]
class_idx = class_idx[:, 1:]
rand_size = attack_idx.shape[0]
rand_seed = torch.rand([rand_size]).cuda() if args.cuda else torch.rand([rand_size])
rand_seed = class_score.sum(dim=1) * rand_seed
prob = class_score.cumsum(dim=1)
for k in range(attack_idx.shape[0]):
for c in range(prob.shape[1]):
if (prob[k,c] >= rand_seed[k]).cpu().numpy():
attack_class[k] = class_idx[k,c]
break
# forward adversarial samples
attacked_img = attacker.attack(tnet,
img[attack_idx, ...],
target[attack_idx],
attack_class)
_, _, _, _, _, attacked_out_s = snet(attacked_img)
_, _, _, _, _, attacked_out_t = tnet(attacked_img)
kd_loss = criterionKD(attacked_out_s, attacked_out_t) * lambda_kd
if kd_loss is None:
kd_loss = torch.zeros(1).cuda() if args.cuda else torch.zeros(1)
loss = cls_loss + kd_loss
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls:{cls_losses.val:.4f}({cls_losses.avg:.4f}) '
'KD:{kd_losses.val:.4f}({kd_losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
cls_losses=cls_losses, kd_losses=kd_losses, top1=top1, top5=top5))
logging.info(log_str)
def test(test_loader, nets, criterions, epoch):
cls_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
criterionCls = criterions['criterionCls']
snet.eval()
end = time.time()
for i, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
_, _, _, _, _, out_s = snet(img)
cls_loss = criterionCls(out_s, target)
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
f_l = [cls_losses.avg, top1.avg, top5.avg]
logging.info('Cls: {:.4f}, Prec@1: {:.2f}, Prec@5: {:.2f}'.format(*f_l))
return top1.avg, top5.avg
def adjust_lr(optimizer, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
logging.info('Epoch: {} lr: {:.3f}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main() | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/train_bss.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import logging
import argparse
import numpy as np
from itertools import chain
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
from utils import AverageMeter, accuracy, transform_time
from utils import load_pretrained_model, save_checkpoint
from utils import create_exp_dir, count_parameters_in_MB
from network import define_tsnet
from kd_losses import *
parser = argparse.ArgumentParser(description='train kd')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
parser.add_argument('--s_init', type=str, required=True, help='initial parameters of student model')
parser.add_argument('--t_model', type=str, required=True, help='path name of teacher model')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=50, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# others
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--note', type=str, default='try', help='note for this run')
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset') # cifar10/cifar100
parser.add_argument('--t_name', type=str, required=True, help='name of teacher') # resnet20/resnet110
parser.add_argument('--s_name', type=str, required=True, help='name of student') # resnet20/resnet110
# hyperparameter
parser.add_argument('--kd_mode', type=str, required=True, help='mode of kd, which can be:'
'logits/st/at/fitnet/nst/pkt/fsp/rkd/ab/'
'sp/sobolev/cc/lwm/irg/vid/ofd/afd')
parser.add_argument('--lambda_kd', type=float, default=1.0, help='trade-off parameter for kd loss')
parser.add_argument('--T', type=float, default=4.0, help='temperature for ST')
parser.add_argument('--p', type=float, default=2.0, help='power for AT')
parser.add_argument('--w_dist', type=float, default=25.0, help='weight for RKD distance')
parser.add_argument('--w_angle', type=float, default=50.0, help='weight for RKD angle')
parser.add_argument('--m', type=float, default=2.0, help='margin for AB')
parser.add_argument('--gamma', type=float, default=0.4, help='gamma in Gaussian RBF for CC')
parser.add_argument('--P_order', type=int, default=2, help='P-order Taylor series of Gaussian RBF for CC')
parser.add_argument('--w_irg_vert', type=float, default=0.1, help='weight for IRG vertex')
parser.add_argument('--w_irg_edge', type=float, default=5.0, help='weight for IRG edge')
parser.add_argument('--w_irg_tran', type=float, default=5.0, help='weight for IRG transformation')
parser.add_argument('--sf', type=float, default=1.0, help='scale factor for VID, i.e. mid_channels = sf * out_channels')
parser.add_argument('--init_var', type=float, default=5.0, help='initial variance for VID')
parser.add_argument('--att_f', type=float, default=1.0, help='attention factor of mid_channels for AFD')
args, unparsed = parser.parse_known_args()
args.save_root = os.path.join(args.save_root, args.note)
create_exp_dir(args.save_root)
log_format = '%(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
fh = logging.FileHandler(os.path.join(args.save_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
logging.info('----------- Network Initialization --------------')
snet = define_tsnet(name=args.s_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.s_init)
load_pretrained_model(snet, checkpoint['net'])
logging.info('Student: %s', snet)
logging.info('Student param size = %fMB', count_parameters_in_MB(snet))
tnet = define_tsnet(name=args.t_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.t_model)
load_pretrained_model(tnet, checkpoint['net'])
tnet.eval()
for param in tnet.parameters():
param.requires_grad = False
logging.info('Teacher: %s', tnet)
logging.info('Teacher param size = %fMB', count_parameters_in_MB(tnet))
logging.info('-----------------------------------------------')
# define loss functions
if args.kd_mode == 'logits':
criterionKD = Logits()
elif args.kd_mode == 'st':
criterionKD = SoftTarget(args.T)
elif args.kd_mode == 'at':
criterionKD = AT(args.p)
elif args.kd_mode == 'fitnet':
criterionKD = Hint()
elif args.kd_mode == 'nst':
criterionKD = NST()
elif args.kd_mode == 'pkt':
criterionKD = PKTCosSim()
elif args.kd_mode == 'fsp':
criterionKD = FSP()
elif args.kd_mode == 'rkd':
criterionKD = RKD(args.w_dist, args.w_angle)
elif args.kd_mode == 'ab':
criterionKD = AB(args.m)
elif args.kd_mode == 'sp':
criterionKD = SP()
elif args.kd_mode == 'sobolev':
criterionKD = Sobolev()
elif args.kd_mode == 'cc':
criterionKD = CC(args.gamma, args.P_order)
elif args.kd_mode == 'lwm':
criterionKD = LwM()
elif args.kd_mode == 'irg':
criterionKD = IRG(args.w_irg_vert, args.w_irg_edge, args.w_irg_tran)
elif args.kd_mode == 'vid':
s_channels = snet.module.get_channel_num()[1:4]
t_channels = tnet.module.get_channel_num()[1:4]
criterionKD = []
for s_c, t_c in zip(s_channels, t_channels):
criterionKD.append(VID(s_c, int(args.sf*t_c), t_c, args.init_var))
criterionKD = [c.cuda() for c in criterionKD] if args.cuda else criterionKD
criterionKD = [None] + criterionKD # None is a placeholder
elif args.kd_mode == 'ofd':
s_channels = snet.module.get_channel_num()[1:4]
t_channels = tnet.module.get_channel_num()[1:4]
criterionKD = []
for s_c, t_c in zip(s_channels, t_channels):
criterionKD.append(OFD(s_c, t_c).cuda() if args.cuda else OFD(s_c, t_c))
criterionKD = [None] + criterionKD # None is a placeholder
elif args.kd_mode == 'afd':
# t_channels is same with s_channels
s_channels = snet.module.get_channel_num()[1:4]
t_channels = tnet.module.get_channel_num()[1:4]
criterionKD = []
for t_c in t_channels:
criterionKD.append(AFD(t_c, args.att_f).cuda() if args.cuda else AFD(t_c, args.att_f))
criterionKD = [None] + criterionKD # None is a placeholder
# # t_chws is same with s_chws
# s_chws = snet.module.get_chw_num()[1:4]
# t_chws = tnet.module.get_chw_num()[1:4]
# criterionKD = []
# for t_chw in t_chws:
# criterionKD.append(AFD(t_chw).cuda() if args.cuda else AFD(t_chw))
# criterionKD = [None] + criterionKD # None is a placeholder
else:
raise Exception('Invalid kd mode...')
if args.cuda:
criterionCls = torch.nn.CrossEntropyLoss().cuda()
else:
criterionCls = torch.nn.CrossEntropyLoss()
# initialize optimizer
if args.kd_mode in ['vid', 'ofd', 'afd']:
optimizer = torch.optim.SGD(chain(snet.parameters(),
*[c.parameters() for c in criterionKD[1:]]),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
else:
optimizer = torch.optim.SGD(snet.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# define transforms
if args.data_name == 'cifar10':
dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('Invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# warp nets and criterions for train and test
nets = {'snet':snet, 'tnet':tnet}
criterions = {'criterionCls':criterionCls, 'criterionKD':criterionKD}
# first initilizing the student nets
if args.kd_mode in ['fsp', 'ab']:
logging.info('The first stage, student initialization......')
train_init(train_loader, nets, optimizer, criterions, 50)
args.lambda_kd = 0.0
logging.info('The second stage, softmax training......')
best_top1 = 0
best_top5 = 0
for epoch in range(1, args.epochs+1):
adjust_lr(optimizer, epoch)
# train one epoch
epoch_start_time = time.time()
train(train_loader, nets, optimizer, criterions, epoch)
# evaluate on testing set
logging.info('Testing the models......')
test_top1, test_top5 = test(test_loader, nets, criterions, epoch)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
# save model
is_best = False
if test_top1 > best_top1:
best_top1 = test_top1
best_top5 = test_top5
is_best = True
logging.info('Saving models......')
save_checkpoint({
'epoch': epoch,
'snet': snet.state_dict(),
'tnet': tnet.state_dict(),
'prec@1': test_top1,
'prec@5': test_top5,
}, is_best, args.save_root)
def train_init(train_loader, nets, optimizer, criterions, total_epoch):
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
snet.train()
for epoch in range(1, total_epoch+1):
adjust_lr_init(optimizer, epoch)
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
epoch_start_time = time.time()
end = time.time()
for i, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
stem_s, rb1_s, rb2_s, rb3_s, feat_s, out_s = snet(img)
stem_t, rb1_t, rb2_t, rb3_t, feat_t, out_t = tnet(img)
cls_loss = criterionCls(out_s, target) * 0.0
if args.kd_mode in ['fsp']:
kd_loss = (criterionKD(stem_s[1], rb1_s[1], stem_t[1].detach(), rb1_t[1].detach()) +
criterionKD(rb1_s[1], rb2_s[1], rb1_t[1].detach(), rb2_t[1].detach()) +
criterionKD(rb2_s[1], rb3_s[1], rb2_t[1].detach(), rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['ab']:
kd_loss = (criterionKD(rb1_s[0], rb1_t[0].detach()) +
criterionKD(rb2_s[0], rb2_t[0].detach()) +
criterionKD(rb3_s[0], rb3_t[0].detach())) / 3.0 * args.lambda_kd
else:
raise Exception('Invalid kd mode...')
loss = cls_loss + kd_loss
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls:{cls_losses.val:.4f}({cls_losses.avg:.4f}) '
'KD:{kd_losses.val:.4f}({kd_losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
cls_losses=cls_losses, kd_losses=kd_losses, top1=top1, top5=top5))
logging.info(log_str)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
def train(train_loader, nets, optimizer, criterions, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
snet.train()
if args.kd_mode in ['vid', 'ofd']:
for i in range(1,4):
criterionKD[i].train()
end = time.time()
for i, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.kd_mode in ['sobolev', 'lwm']:
img.requires_grad = True
stem_s, rb1_s, rb2_s, rb3_s, feat_s, out_s = snet(img)
stem_t, rb1_t, rb2_t, rb3_t, feat_t, out_t = tnet(img)
cls_loss = criterionCls(out_s, target)
if args.kd_mode in ['logits', 'st']:
kd_loss = criterionKD(out_s, out_t.detach()) * args.lambda_kd
elif args.kd_mode in ['fitnet', 'nst']:
kd_loss = criterionKD(rb3_s[1], rb3_t[1].detach()) * args.lambda_kd
elif args.kd_mode in ['at', 'sp']:
kd_loss = (criterionKD(rb1_s[1], rb1_t[1].detach()) +
criterionKD(rb2_s[1], rb2_t[1].detach()) +
criterionKD(rb3_s[1], rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['pkt', 'rkd', 'cc']:
kd_loss = criterionKD(feat_s, feat_t.detach()) * args.lambda_kd
elif args.kd_mode in ['fsp']:
kd_loss = (criterionKD(stem_s[1], rb1_s[1], stem_t[1].detach(), rb1_t[1].detach()) +
criterionKD(rb1_s[1], rb2_s[1], rb1_t[1].detach(), rb2_t[1].detach()) +
criterionKD(rb2_s[1], rb3_s[1], rb2_t[1].detach(), rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['ab']:
kd_loss = (criterionKD(rb1_s[0], rb1_t[0].detach()) +
criterionKD(rb2_s[0], rb2_t[0].detach()) +
criterionKD(rb3_s[0], rb3_t[0].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['sobolev']:
kd_loss = criterionKD(out_s, out_t, img, target) * args.lambda_kd
elif args.kd_mode in ['lwm']:
kd_loss = criterionKD(out_s, rb2_s[1], out_t, rb2_t[1], target) * args.lambda_kd
elif args.kd_mode in ['irg']:
kd_loss = criterionKD([rb2_s[1], rb3_s[1], feat_s, out_s],
[rb2_t[1].detach(),
rb3_t[1].detach(),
feat_t.detach(),
out_t.detach()]) * args.lambda_kd
elif args.kd_mode in ['vid', 'afd']:
kd_loss = (criterionKD[1](rb1_s[1], rb1_t[1].detach()) +
criterionKD[2](rb2_s[1], rb2_t[1].detach()) +
criterionKD[3](rb3_s[1], rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['ofd']:
kd_loss = (criterionKD[1](rb1_s[0], rb1_t[0].detach()) +
criterionKD[2](rb2_s[0], rb2_t[0].detach()) +
criterionKD[3](rb3_s[0], rb3_t[0].detach())) / 3.0 * args.lambda_kd
else:
raise Exception('Invalid kd mode...')
loss = cls_loss + kd_loss
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls:{cls_losses.val:.4f}({cls_losses.avg:.4f}) '
'KD:{kd_losses.val:.4f}({kd_losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
cls_losses=cls_losses, kd_losses=kd_losses, top1=top1, top5=top5))
logging.info(log_str)
def test(test_loader, nets, criterions, epoch):
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
snet.eval()
if args.kd_mode in ['vid', 'ofd']:
for i in range(1,4):
criterionKD[i].eval()
end = time.time()
for i, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.kd_mode in ['sobolev', 'lwm']:
img.requires_grad = True
stem_s, rb1_s, rb2_s, rb3_s, feat_s, out_s = snet(img)
stem_t, rb1_t, rb2_t, rb3_t, feat_t, out_t = tnet(img)
else:
with torch.no_grad():
stem_s, rb1_s, rb2_s, rb3_s, feat_s, out_s = snet(img)
stem_t, rb1_t, rb2_t, rb3_t, feat_t, out_t = tnet(img)
cls_loss = criterionCls(out_s, target)
if args.kd_mode in ['logits', 'st']:
kd_loss = criterionKD(out_s, out_t.detach()) * args.lambda_kd
elif args.kd_mode in ['fitnet', 'nst']:
kd_loss = criterionKD(rb3_s[1], rb3_t[1].detach()) * args.lambda_kd
elif args.kd_mode in ['at', 'sp']:
kd_loss = (criterionKD(rb1_s[1], rb1_t[1].detach()) +
criterionKD(rb2_s[1], rb2_t[1].detach()) +
criterionKD(rb3_s[1], rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['pkt', 'rkd', 'cc']:
kd_loss = criterionKD(feat_s, feat_t.detach()) * args.lambda_kd
elif args.kd_mode in ['fsp']:
kd_loss = (criterionKD(stem_s[1], rb1_s[1], stem_t[1].detach(), rb1_t[1].detach()) +
criterionKD(rb1_s[1], rb2_s[1], rb1_t[1].detach(), rb2_t[1].detach()) +
criterionKD(rb2_s[1], rb3_s[1], rb2_t[1].detach(), rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['ab']:
kd_loss = (criterionKD(rb1_s[0], rb1_t[0].detach()) +
criterionKD(rb2_s[0], rb2_t[0].detach()) +
criterionKD(rb3_s[0], rb3_t[0].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['sobolev']:
kd_loss = criterionKD(out_s, out_t, img, target) * args.lambda_kd
elif args.kd_mode in ['lwm']:
kd_loss = criterionKD(out_s, rb2_s[1], out_t, rb2_t[1], target) * args.lambda_kd
elif args.kd_mode in ['irg']:
kd_loss = criterionKD([rb2_s[1], rb3_s[1], feat_s, out_s],
[rb2_t[1].detach(),
rb3_t[1].detach(),
feat_t.detach(),
out_t.detach()]) * args.lambda_kd
elif args.kd_mode in ['vid', 'afd']:
kd_loss = (criterionKD[1](rb1_s[1], rb1_t[1].detach()) +
criterionKD[2](rb2_s[1], rb2_t[1].detach()) +
criterionKD[3](rb3_s[1], rb3_t[1].detach())) / 3.0 * args.lambda_kd
elif args.kd_mode in ['ofd']:
kd_loss = (criterionKD[1](rb1_s[0], rb1_t[0].detach()) +
criterionKD[2](rb2_s[0], rb2_t[0].detach()) +
criterionKD[3](rb3_s[0], rb3_t[0].detach())) / 3.0 * args.lambda_kd
else:
raise Exception('Invalid kd mode...')
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
f_l = [cls_losses.avg, kd_losses.avg, top1.avg, top5.avg]
logging.info('Cls: {:.4f}, KD: {:.4f}, Prec@1: {:.2f}, Prec@5: {:.2f}'.format(*f_l))
return top1.avg, top5.avg
def adjust_lr_init(optimizer, epoch):
scale = 0.1
lr_list = [args.lr*scale] * 30
lr_list += [args.lr*scale*scale] * 10
lr_list += [args.lr*scale*scale*scale] * 10
lr = lr_list[epoch-1]
logging.info('Epoch: {} lr: {:.4f}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_lr(optimizer, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
logging.info('Epoch: {} lr: {:.3f}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main() | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/train_kd.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import logging
import argparse
import numpy as np
from itertools import chain
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
from utils import AverageMeter, accuracy, transform_time
from utils import load_pretrained_model, save_checkpoint
from utils import create_exp_dir, count_parameters_in_MB
from network import define_tsnet, define_paraphraser, define_translator
from kd_losses import *
parser = argparse.ArgumentParser(description='factor transfer')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
parser.add_argument('--s_init', type=str, required=True, help='initial parameters of student model')
parser.add_argument('--t_model', type=str, required=True, help='path name of teacher model')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=50, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# others
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--note', type=str, default='try', help='note for this run')
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset') # cifar10/cifar100
parser.add_argument('--t_name', type=str, required=True, help='name of teacher') # resnet20/resnet110
parser.add_argument('--s_name', type=str, required=True, help='name of student') # resnet20/resnet110
# hyperparameter
parser.add_argument('--lambda_kd', type=float, default=200.0)
parser.add_argument('--k', type=float, default=0.5)
args, unparsed = parser.parse_known_args()
args.save_root = os.path.join(args.save_root, args.note)
create_exp_dir(args.save_root)
log_format = '%(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
fh = logging.FileHandler(os.path.join(args.save_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
logging.info('----------- Network Initialization --------------')
snet = define_tsnet(name=args.s_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.s_init)
load_pretrained_model(snet, checkpoint['net'])
logging.info('Student: %s', snet)
logging.info('Student param size = %fMB', count_parameters_in_MB(snet))
tnet = define_tsnet(name=args.t_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.t_model)
load_pretrained_model(tnet, checkpoint['net'])
tnet.eval()
for param in tnet.parameters():
param.requires_grad = False
logging.info('Teacher: %s', tnet)
logging.info('Teacher param size = %fMB', count_parameters_in_MB(tnet))
use_bn = True if args.data_name == 'cifar10' else False
in_channels_t = tnet.module.get_channel_num()[3]
in_channels_s = snet.module.get_channel_num()[3]
paraphraser = define_paraphraser(in_channels_t, args.k, use_bn, args.cuda)
logging.info('Paraphraser: %s', paraphraser)
logging.info('Paraphraser param size = %fMB', count_parameters_in_MB(paraphraser))
translator = define_translator(in_channels_s, in_channels_t, args.k, use_bn, args.cuda)
logging.info('Translator: %s', paraphraser)
logging.info('Translator param size = %fMB', count_parameters_in_MB(translator))
logging.info('-----------------------------------------------')
# initialize optimizer
optimizer_para = torch.optim.SGD(paraphraser.parameters(),
lr = args.lr * 0.1,
momentum = args.momentum,
weight_decay = args.weight_decay)
optimizer = torch.optim.SGD(chain(snet.parameters(),translator.parameters()),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# define loss functions
criterionKD = FT()
if args.cuda:
criterionCls = torch.nn.CrossEntropyLoss().cuda()
criterionPara = torch.nn.MSELoss().cuda()
else:
criterionCls = torch.nn.CrossEntropyLoss()
criterionPara = torch.nn.MSELoss()
# define transforms
if args.data_name == 'cifar10':
dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('Invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# warp nets and criterions for train and test
nets = {'snet':snet, 'tnet':tnet, 'paraphraser':paraphraser, 'translator':translator}
criterions = {'criterionCls':criterionCls, 'criterionKD':criterionKD}
# first training the paraphraser
logging.info('The first stage, training the paraphraser......')
train_para(train_loader, nets, optimizer_para, criterionPara, 30)
paraphraser.eval()
for param in paraphraser.parameters():
param.requires_grad = False
logging.info('The second stage, training the student network......')
best_top1 = 0
best_top5 = 0
for epoch in range(1, args.epochs+1):
adjust_lr(optimizer, epoch)
# train one epoch
epoch_start_time = time.time()
train(train_loader, nets, optimizer, criterions, epoch)
# evaluate on testing set
logging.info('Testing the models......')
test_top1, test_top5 = test(test_loader, nets, criterions)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
# save model
is_best = False
if test_top1 > best_top1:
best_top1 = test_top1
best_top5 = test_top5
is_best = True
logging.info('Saving models......')
save_checkpoint({
'epoch': epoch,
'snet': snet.state_dict(),
'tnet': tnet.state_dict(),
'prec@1': test_top1,
'prec@5': test_top5,
}, is_best, args.save_root)
def train_para(train_loader, nets, optimizer_para, criterionPara, total_epoch):
tnet = nets['tnet']
paraphraser = nets['paraphraser']
paraphraser.train()
for epoch in range(1, total_epoch+1):
batch_time = AverageMeter()
data_time = AverageMeter()
para_losses = AverageMeter()
epoch_start_time = time.time()
end = time.time()
for i, (img, _) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda()
_, _, _, rb3_t, _, _ = tnet(img)
_, rb3_t_rec = paraphraser(rb3_t[1].detach())
para_loss = criterionPara(rb3_t_rec, rb3_t[1].detach())
para_losses.update(para_loss.item(), img.size(0))
optimizer_para.zero_grad()
para_loss.backward()
optimizer_para.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Para:{para_losses.val:.4f}({para_losses.avg:.4f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
para_losses=para_losses))
logging.info(log_str)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
def train(train_loader, nets, optimizer, criterions, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
paraphraser = nets['paraphraser']
translator = nets['translator']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
snet.train()
translator.train()
end = time.time()
for i, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
_, _, _, rb3_s, _, out_s = snet(img)
_, _, _, rb3_t, _, _ = tnet(img)
factor_s = translator(rb3_s[1])
factor_t, _ = paraphraser(rb3_t[1])
cls_loss = criterionCls(out_s, target)
kd_loss = criterionKD(factor_s, factor_t.detach()) * args.lambda_kd
loss = cls_loss + kd_loss
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls:{cls_losses.val:.4f}({cls_losses.avg:.4f}) '
'FT:{kd_losses.val:.4f}({kd_losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
cls_losses=cls_losses, kd_losses=kd_losses, top1=top1, top5=top5))
logging.info(log_str)
def test(test_loader, nets, criterions):
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
paraphraser = nets['paraphraser']
translator = nets['translator']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
snet.eval()
translator.eval()
end = time.time()
for i, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
_, _, _, rb3_s, _, out_s = snet(img)
_, _, _, rb3_t, _, _ = tnet(img)
factor_s = translator(rb3_s[1])
factor_t, _ = paraphraser(rb3_t[1])
cls_loss = criterionCls(out_s, target)
kd_loss = criterionKD(factor_s, factor_t.detach()) * args.lambda_kd
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
f_l = [cls_losses.avg, kd_losses.avg, top1.avg, top5.avg]
logging.info('Cls: {:.4f}, KD: {:.4f}, Prec@1: {:.2f}, Prec@5: {:.2f}'.format(*f_l))
return top1.avg, top5.avg
def adjust_lr(optimizer, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
logging.info('epoch: {} lr: {:.3f}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main() | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/train_ft.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import logging
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
from utils import AverageMeter, accuracy, transform_time
from utils import load_pretrained_model, save_checkpoint
from utils import create_exp_dir, count_parameters_in_MB
from network import define_tsnet
from kd_losses import *
parser = argparse.ArgumentParser(description='deep mutual learning (only two nets)')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
parser.add_argument('--net1_init', type=str, required=True, help='initial parameters of net1')
parser.add_argument('--net2_init', type=str, required=True, help='initial parameters of net2')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=50, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# others
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--note', type=str, default='try', help='note for this run')
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset') # cifar10/cifar100
parser.add_argument('--net1_name', type=str, required=True, help='name of net1') # resnet20/resnet110
parser.add_argument('--net2_name', type=str, required=True, help='name of net2') # resnet20/resnet110
# hyperparameter lambda
parser.add_argument('--lambda_kd', type=float, default=1.0)
args, unparsed = parser.parse_known_args()
args.save_root = os.path.join(args.save_root, args.note)
create_exp_dir(args.save_root)
log_format = '%(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
fh = logging.FileHandler(os.path.join(args.save_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
logging.info('----------- Network Initialization --------------')
net1 = define_tsnet(name=args.net1_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.net1_init)
load_pretrained_model(net1, checkpoint['net'])
logging.info('Net1: %s', net1)
logging.info('Net1 param size = %fMB', count_parameters_in_MB(net1))
net2 = define_tsnet(name=args.net2_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.net2_init)
load_pretrained_model(net2, checkpoint['net'])
logging.info('Net2: %s', net1)
logging.info('Net2 param size = %fMB', count_parameters_in_MB(net2))
logging.info('-----------------------------------------------')
# initialize optimizer
optimizer1 = torch.optim.SGD(net1.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
optimizer2 = torch.optim.SGD(net2.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# define loss functions
criterionKD = DML()
if args.cuda:
criterionCls = torch.nn.CrossEntropyLoss().cuda()
else:
criterionCls = torch.nn.CrossEntropyLoss()
# define transforms
if args.data_name == 'cifar10':
dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('Invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# warp nets and criterions for train and test
nets = {'net1':net1, 'net2':net2}
criterions = {'criterionCls':criterionCls, 'criterionKD':criterionKD}
optimizers = {'optimizer1':optimizer1, 'optimizer2':optimizer2}
best_top1 = 0
best_top5 = 0
for epoch in range(1, args.epochs+1):
adjust_lr(optimizers, epoch)
# train one epoch
epoch_start_time = time.time()
train(train_loader, nets, optimizers, criterions, epoch)
# evaluate on testing set
logging.info('Testing the models......')
test_top11, test_top15, test_top21, test_top25 = test(test_loader, nets, criterions)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
# save model
is_best = False
if max(test_top11, test_top21) > best_top1:
best_top1 = max(test_top11, test_top21)
best_top5 = max(test_top15, test_top25)
is_best = True
logging.info('Saving models......')
save_checkpoint({
'epoch': epoch,
'net1': net1.state_dict(),
'net2': net2.state_dict(),
'prec1@1': test_top11,
'prec1@5': test_top15,
'prec2@1': test_top21,
'prec2@5': test_top25,
}, is_best, args.save_root)
def train(train_loader, nets, optimizers, criterions, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
cls1_losses = AverageMeter()
kd1_losses = AverageMeter()
cls2_losses = AverageMeter()
kd2_losses = AverageMeter()
top11 = AverageMeter()
top15 = AverageMeter()
top21 = AverageMeter()
top25 = AverageMeter()
net1 = nets['net1']
net2 = nets['net2']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
optimizer1 = optimizers['optimizer1']
optimizer2 = optimizers['optimizer2']
net1.train()
net2.train()
end = time.time()
for i, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
_, _, _, _, _, out1 = net1(img)
_, _, _, _, _, out2 = net2(img)
# for net1
cls1_loss = criterionCls(out1, target)
kd1_loss = criterionKD(out1, out2.detach()) * args.lambda_kd
net1_loss = cls1_loss + kd1_loss
prec11, prec15 = accuracy(out1, target, topk=(1,5))
cls1_losses.update(cls1_loss.item(), img.size(0))
kd1_losses.update(kd1_loss.item(), img.size(0))
top11.update(prec11.item(), img.size(0))
top15.update(prec15.item(), img.size(0))
# for net2
cls2_loss = criterionCls(out2, target)
kd2_loss = criterionKD(out2, out1.detach()) * args.lambda_kd
net2_loss = cls2_loss + kd2_loss
prec21, prec25 = accuracy(out2, target, topk=(1,5))
cls2_losses.update(cls2_loss.item(), img.size(0))
kd2_losses.update(kd2_loss.item(), img.size(0))
top21.update(prec21.item(), img.size(0))
top25.update(prec25.item(), img.size(0))
# update net1 & net2
optimizer1.zero_grad()
net1_loss.backward()
optimizer1.step()
optimizer2.zero_grad()
net2_loss.backward()
optimizer2.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls1:{cls1_losses.val:.4f}({cls1_losses.avg:.4f}) '
'KD1:{kd1_losses.val:.4f}({kd1_losses.avg:.4f}) '
'Cls2:{cls2_losses.val:.4f}({cls2_losses.avg:.4f}) '
'KD2:{kd2_losses.val:.4f}({kd2_losses.avg:.4f}) '
'prec1@1:{top11.val:.2f}({top11.avg:.2f}) '
'prec1@5:{top15.val:.2f}({top15.avg:.2f}) '
'prec2@1:{top21.val:.2f}({top21.avg:.2f}) '
'prec2@5:{top25.val:.2f}({top25.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
cls1_losses=cls1_losses, kd1_losses=kd1_losses, top11=top11, top15=top15,
cls2_losses=cls2_losses, kd2_losses=kd2_losses, top21=top21, top25=top25))
logging.info(log_str)
def test(test_loader, nets, criterions):
cls1_losses = AverageMeter()
kd1_losses = AverageMeter()
cls2_losses = AverageMeter()
kd2_losses = AverageMeter()
top11 = AverageMeter()
top15 = AverageMeter()
top21 = AverageMeter()
top25 = AverageMeter()
net1 = nets['net1']
net2 = nets['net2']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
net1.eval()
net2.eval()
end = time.time()
for i, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
_, _, _, _, _, out1 = net1(img)
_, _, _, _, _, out2 = net2(img)
# for net1
cls1_loss = criterionCls(out1, target)
kd1_loss = criterionKD(out1, out2.detach()) * args.lambda_kd
prec11, prec15 = accuracy(out1, target, topk=(1,5))
cls1_losses.update(cls1_loss.item(), img.size(0))
kd1_losses.update(kd1_loss.item(), img.size(0))
top11.update(prec11.item(), img.size(0))
top15.update(prec15.item(), img.size(0))
# for net2
cls2_loss = criterionCls(out2, target)
kd2_loss = criterionKD(out2, out1.detach()) * args.lambda_kd
prec21, prec25 = accuracy(out2, target, topk=(1,5))
cls2_losses.update(cls2_loss.item(), img.size(0))
kd2_losses.update(kd2_loss.item(), img.size(0))
top21.update(prec21.item(), img.size(0))
top25.update(prec25.item(), img.size(0))
f_l = [cls1_losses.avg, kd1_losses.avg, top11.avg, top15.avg]
f_l += [cls2_losses.avg, kd2_losses.avg, top21.avg, top25.avg]
logging.info('Cls1: {:.4f}, KD1: {:.4f}, Prec1@1: {:.2f}, Prec1@5: {:.2f}'
'Cls2: {:.4f}, KD2: {:.4f}, Prec2@1: {:.2f}, Prec2@5: {:.2f}'.format(*f_l))
return top11.avg, top15.avg, top21.avg, top25.avg
def adjust_lr(optimizers, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
logging.info('epoch: {} lr: {:.3f}'.format(epoch, lr))
for param_group in optimizers['optimizer1'].param_groups:
param_group['lr'] = lr
for param_group in optimizers['optimizer2'].param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main() | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/train_dml.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import logging
import argparse
import numpy as np
from itertools import chain
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
from utils import AverageMeter, accuracy, transform_time
from utils import load_pretrained_model, save_checkpoint
from utils import create_exp_dir, count_parameters_in_MB
from dataset import CIFAR10IdxSample, CIFAR100IdxSample
from network import define_tsnet
from kd_losses import CRD
parser = argparse.ArgumentParser(description='contrastive representation distillation')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
parser.add_argument('--s_init', type=str, required=True, help='initial parameters of student model')
parser.add_argument('--t_model', type=str, required=True, help='path name of teacher model')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=50, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# others
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--note', type=str, default='try', help='note for this run')
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset') # cifar10/cifar100
parser.add_argument('--t_name', type=str, required=True, help='name of teacher') # resnet20/resnet110
parser.add_argument('--s_name', type=str, required=True, help='name of student') # resnet20/resnet110
# hyperparameter
parser.add_argument('--lambda_kd', type=float, default=0.2, help='trade-off parameter for kd loss')
parser.add_argument('--feat_dim', type=int, default=128, help='dimension of the projection space')
parser.add_argument('--nce_n', type=int, default=16384, help='number of negatives paired with each positive')
parser.add_argument('--nce_t', type=float, default=0.1, help='temperature parameter')
parser.add_argument('--nce_mom', type=float, default=0.5, help='momentum for non-parametric updates')
parser.add_argument('--mode', type=str, default='exact', choices=['exact', 'relax'])
args, unparsed = parser.parse_known_args()
args.save_root = os.path.join(args.save_root, args.note)
create_exp_dir(args.save_root)
log_format = '%(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
fh = logging.FileHandler(os.path.join(args.save_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
logging.info('----------- Network Initialization --------------')
snet = define_tsnet(name=args.s_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.s_init)
load_pretrained_model(snet, checkpoint['net'])
logging.info('Student: %s', snet)
logging.info('Student param size = %fMB', count_parameters_in_MB(snet))
tnet = define_tsnet(name=args.t_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.t_model)
load_pretrained_model(tnet, checkpoint['net'])
tnet.eval()
for param in tnet.parameters():
param.requires_grad = False
logging.info('Teacher: %s', tnet)
logging.info('Teacher param size = %fMB', count_parameters_in_MB(tnet))
logging.info('-----------------------------------------------')
# define transforms
if args.data_name == 'cifar10':
train_dataset = CIFAR10IdxSample
test_dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
train_dataset = CIFAR100IdxSample
test_dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('Invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
train_dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True,
n = args.nce_n,
mode = args.mode),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# define loss functions
s_dim = snet.module.get_channel_num()[-2]
t_dim = tnet.module.get_channel_num()[-2]
if args.cuda:
criterionCls = torch.nn.CrossEntropyLoss().cuda()
criterionKD = CRD(s_dim, t_dim, args.feat_dim, args.nce_n,
args.nce_t, args.nce_mom, len(train_loader.dataset)).cuda()
else:
criterionCls = torch.nn.CrossEntropyLoss()
criterionKD = CRD(s_dim, t_dim, args.feat_dim, args.nce_n,
args.nce_t, args.nce_mom, len(train_loader.dataset))
# initialize optimizer
optimizer = torch.optim.SGD(chain(snet.parameters(),
criterionKD.embed_t.parameters(),
criterionKD.embed_s.parameters()),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# warp nets and criterions for train and test
nets = {'snet':snet, 'tnet':tnet}
criterions = {'criterionCls':criterionCls, 'criterionKD':criterionKD}
best_top1 = 0
best_top5 = 0
for epoch in range(1, args.epochs+1):
adjust_lr(optimizer, epoch)
# train one epoch
epoch_start_time = time.time()
train(train_loader, nets, optimizer, criterions, epoch)
# evaluate on testing set
logging.info('Testing the models......')
test_top1, test_top5 = test(test_loader, nets, criterions, epoch)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
# save model
is_best = False
if test_top1 > best_top1:
best_top1 = test_top1
best_top5 = test_top5
is_best = True
logging.info('Saving models......')
save_checkpoint({
'epoch': epoch,
'snet': snet.state_dict(),
'tnet': tnet.state_dict(),
'prec@1': test_top1,
'prec@5': test_top5,
}, is_best, args.save_root)
def train(train_loader, nets, optimizer, criterions, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
kd_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionKD = criterions['criterionKD']
snet.train()
criterionKD.embed_s.train()
criterionKD.embed_t.train()
end = time.time()
for i, (img, target, idx, sample_idx) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
idx = idx.cuda(non_blocking=True)
sample_idx = sample_idx.cuda(non_blocking=True)
_, _, _, _, feat_s, out_s = snet(img)
_, _, _, _, feat_t, out_t = tnet(img)
cls_loss = criterionCls(out_s, target)
kd_loss = criterionKD(feat_s, feat_t, idx, sample_idx) * args.lambda_kd
loss = cls_loss + kd_loss
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
kd_losses.update(kd_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls:{cls_losses.val:.4f}({cls_losses.avg:.4f}) '
'KD:{kd_losses.val:.4f}({kd_losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
cls_losses=cls_losses, kd_losses=kd_losses, top1=top1, top5=top5))
logging.info(log_str)
def test(test_loader, nets, criterions, epoch):
cls_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
criterionCls = criterions['criterionCls']
snet.eval()
end = time.time()
for i, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
_, _, _, _, _, out_s = snet(img)
cls_loss = criterionCls(out_s, target)
prec1, prec5 = accuracy(out_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
f_l = [cls_losses.avg, top1.avg, top5.avg]
logging.info('Cls: {:.4f}, Prec@1: {:.2f}, Prec@5: {:.2f}'.format(*f_l))
return top1.avg, top5.avg
def adjust_lr(optimizer, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
logging.info('Epoch: {} lr: {:.3f}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/train_crd.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import numpy as np
from PIL import Image
import torchvision.datasets as dst
'''
Modified from https://github.com/HobbitLong/RepDistiller/blob/master/dataset/cifar100.py
'''
class CIFAR10IdxSample(dst.CIFAR10):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=False, n=4096, mode='exact', percent=1.0):
super().__init__(root=root, train=train, download=download,
transform=transform, target_transform=target_transform)
self.n = n
self.mode = mode
num_classes = 10
num_samples = len(self.data)
labels = self.targets
self.cls_positive = [[] for _ in range(num_classes)]
for i in range(num_samples):
self.cls_positive[labels[i]].append(i)
self.cls_negative = [[] for _ in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i]) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(num_classes)]
if 0 < percent < 1:
num = int(len(self.cls_negative[0]) * percent)
self.cls_negative = [np.random.permutation(self.cls_negative[i])[0:num]
for i in range(num_classes)]
self.cls_positive = np.asarray(self.cls_positive)
self.cls_negative = np.asarray(self.cls_negative)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.mode == 'exact':
pos_idx = index
elif self.mode == 'relax':
pos_idx = np.random.choice(self.cls_positive[target], 1)[0]
else:
raise NotImplementedError(self.mode)
replace = True if self.n > len(self.cls_negative[target]) else False
neg_idx = np.random.choice(self.cls_negative[target], self.n, replace=replace)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return img, target, index, sample_idx
class CIFAR100IdxSample(dst.CIFAR100):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=False, n=4096, mode='exact', percent=1.0):
super().__init__(root=root, train=train, download=download,
transform=transform, target_transform=target_transform)
self.n = n
self.mode = mode
num_classes = 100
num_samples = len(self.data)
labels = self.targets
self.cls_positive = [[] for _ in range(num_classes)]
for i in range(num_samples):
self.cls_positive[labels[i]].append(i)
self.cls_negative = [[] for _ in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i]) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(num_classes)]
if 0 < percent < 1:
num = int(len(self.cls_negative[0]) * percent)
self.cls_negative = [np.random.permutation(self.cls_negative[i])[0:num]
for i in range(num_classes)]
self.cls_positive = np.asarray(self.cls_positive)
self.cls_negative = np.asarray(self.cls_negative)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.mode == 'exact':
pos_idx = index
elif self.mode == 'relax':
pos_idx = np.random.choice(self.cls_positive[target], 1)[0]
else:
raise NotImplementedError(self.mode)
replace = True if self.n > len(self.cls_negative[target]) else False
neg_idx = np.random.choice(self.cls_negative[target], self.n, replace=replace)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return img, target, index, sample_idx
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/dataset.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import shutil
import numpy as np
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def count_parameters_in_MB(model):
return sum(np.prod(v.size()) for name, v in model.named_parameters())/1e6
def create_exp_dir(path):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
def load_pretrained_model(model, pretrained_dict):
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
def transform_time(s):
m, s = divmod(int(s), 60)
h, m = divmod(m, 60)
return h,m,s
def save_checkpoint(state, is_best, save_root):
save_path = os.path.join(save_root, 'checkpoint.pth.tar')
torch.save(state, save_path)
if is_best:
best_save_path = os.path.join(save_root, 'model_best.pth.tar')
shutil.copyfile(save_path, best_save_path)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/utils.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
def define_tsnet(name, num_class, cuda=True):
if name == 'resnet20':
net = resnet20(num_class=num_class)
elif name == 'resnet110':
net = resnet110(num_class=num_class)
else:
raise Exception('model name does not exist.')
if cuda:
net = torch.nn.DataParallel(net).cuda()
else:
net = torch.nn.DataParallel(net)
return net
class resblock(nn.Module):
def __init__(self, in_channels, out_channels, return_before_act):
super(resblock, self).__init__()
self.return_before_act = return_before_act
self.downsample = (in_channels != out_channels)
if self.downsample:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False)
self.ds = nn.Sequential(*[
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(out_channels)
])
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.ds = None
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
residual = x
pout = self.conv1(x) # pout: pre out before activation
pout = self.bn1(pout)
pout = self.relu(pout)
pout = self.conv2(pout)
pout = self.bn2(pout)
if self.downsample:
residual = self.ds(x)
pout += residual
out = self.relu(pout)
if not self.return_before_act:
return out
else:
return pout, out
class resnet20(nn.Module):
def __init__(self, num_class):
super(resnet20, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU()
self.res1 = self.make_layer(resblock, 3, 16, 16)
self.res2 = self.make_layer(resblock, 3, 16, 32)
self.res3 = self.make_layer(resblock, 3, 32, 64)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64, num_class)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.num_class = num_class
def make_layer(self, block, num, in_channels, out_channels): # num must >=2
layers = [block(in_channels, out_channels, False)]
for i in range(num-2):
layers.append(block(out_channels, out_channels, False))
layers.append(block(out_channels, out_channels, True))
return nn.Sequential(*layers)
def forward(self, x):
pstem = self.conv1(x) # pstem: pre stem before activation
pstem = self.bn1(pstem)
stem = self.relu(pstem)
stem = (pstem, stem)
rb1 = self.res1(stem[1])
rb2 = self.res2(rb1[1])
rb3 = self.res3(rb2[1])
feat = self.avgpool(rb3[1])
feat = feat.view(feat.size(0), -1)
out = self.fc(feat)
return stem, rb1, rb2, rb3, feat, out
def get_channel_num(self):
return [16, 16, 32, 64, 64, self.num_class]
def get_chw_num(self):
return [(16, 32, 32),
(16, 32, 32),
(32, 16, 16),
(64, 8 , 8 ),
(64,),
(self.num_class,)]
class resnet110(nn.Module):
def __init__(self, num_class):
super(resnet110, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU()
self.res1 = self.make_layer(resblock, 18, 16, 16)
self.res2 = self.make_layer(resblock, 18, 16, 32)
self.res3 = self.make_layer(resblock, 18, 32, 64)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64, num_class)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.num_class = num_class
def make_layer(self, block, num, in_channels, out_channels): # num must >=2
layers = [block(in_channels, out_channels, False)]
for i in range(num-2):
layers.append(block(out_channels, out_channels, False))
layers.append(block(out_channels, out_channels, True))
return nn.Sequential(*layers)
def forward(self, x):
pstem = self.conv1(x) # pstem: pre stem before activation
pstem = self.bn1(pstem)
stem = self.relu(pstem)
stem = (pstem, stem)
rb1 = self.res1(stem[1])
rb2 = self.res2(rb1[1])
rb3 = self.res3(rb2[1])
feat = self.avgpool(rb3[1])
feat = feat.view(feat.size(0), -1)
out = self.fc(feat)
return stem, rb1, rb2, rb3, feat, out
def get_channel_num(self):
return [16, 16, 32, 64, 64, self.num_class]
def get_chw_num(self):
return [(16, 32, 32),
(16, 32, 32),
(32, 16, 16),
(64, 8 , 8 ),
(64,),
(self.num_class,)]
def define_paraphraser(in_channels_t, k, use_bn, cuda=True):
net = paraphraser(in_channels_t, k, use_bn)
if cuda:
net = torch.nn.DataParallel(net).cuda()
else:
net = torch.nn.DataParallel(net)
return net
class paraphraser(nn.Module):
def __init__(self, in_channels_t, k, use_bn=True):
super(paraphraser, self).__init__()
factor_channels = int(in_channels_t*k)
self.encoder = nn.Sequential(*[
nn.Conv2d(in_channels_t, in_channels_t, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(in_channels_t) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channels_t, factor_channels, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(factor_channels) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(factor_channels, factor_channels, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(factor_channels) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
])
self.decoder = nn.Sequential(*[
nn.ConvTranspose2d(factor_channels, factor_channels, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(factor_channels) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(factor_channels, in_channels_t, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(in_channels_t) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(in_channels_t, in_channels_t, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(in_channels_t) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
z = self.encoder(x)
out = self.decoder(z)
return z, out
def define_translator(in_channels_s, in_channels_t, k, use_bn=True, cuda=True):
net = translator(in_channels_s, in_channels_t, k, use_bn)
if cuda:
net = torch.nn.DataParallel(net).cuda()
else:
net = torch.nn.DataParallel(net)
return net
class translator(nn.Module):
def __init__(self, in_channels_s, in_channels_t, k, use_bn=True):
super(translator, self).__init__()
factor_channels = int(in_channels_t*k)
self.encoder = nn.Sequential(*[
nn.Conv2d(in_channels_s, in_channels_s, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(in_channels_s) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channels_s, factor_channels, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(factor_channels) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(factor_channels, factor_channels, 3, 1, 1, bias=bool(1-use_bn)),
nn.BatchNorm2d(factor_channels) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
z = self.encoder(x)
return z
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/network.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import logging
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
from utils import AverageMeter, accuracy, transform_time
from utils import load_pretrained_model, save_checkpoint
from utils import create_exp_dir, count_parameters_in_MB
from network import define_tsnet
parser = argparse.ArgumentParser(description='train base net')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=50, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# others
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--note', type=str, default='try', help='note for this run')
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset') # cifar10/cifar100
parser.add_argument('--net_name', type=str, required=True, help='name of basenet') # resnet20/resnet110
args, unparsed = parser.parse_known_args()
args.save_root = os.path.join(args.save_root, args.note)
create_exp_dir(args.save_root)
log_format = '%(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
fh = logging.FileHandler(os.path.join(args.save_root, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
logging.info('----------- Network Initialization --------------')
net = define_tsnet(name=args.net_name, num_class=args.num_class, cuda=args.cuda)
logging.info('%s', net)
logging.info("param size = %fMB", count_parameters_in_MB(net))
logging.info('-----------------------------------------------')
# save initial parameters
logging.info('Saving initial parameters......')
save_path = os.path.join(args.save_root, 'initial_r{}.pth.tar'.format(args.net_name[6:]))
torch.save({
'epoch': 0,
'net': net.state_dict(),
'prec@1': 0.0,
'prec@5': 0.0,
}, save_path)
# initialize optimizer
optimizer = torch.optim.SGD(net.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# define loss functions
if args.cuda:
criterion = torch.nn.CrossEntropyLoss().cuda()
else:
criterion = torch.nn.CrossEntropyLoss()
# define transforms
if args.data_name == 'cifar10':
dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('Invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
best_top1 = 0
best_top5 = 0
for epoch in range(1, args.epochs+1):
adjust_lr(optimizer, epoch)
# train one epoch
epoch_start_time = time.time()
train(train_loader, net, optimizer, criterion, epoch)
# evaluate on testing set
logging.info('Testing the models......')
test_top1, test_top5 = test(test_loader, net, criterion)
epoch_duration = time.time() - epoch_start_time
logging.info('Epoch time: {}s'.format(int(epoch_duration)))
# save model
is_best = False
if test_top1 > best_top1:
best_top1 = test_top1
best_top5 = test_top5
is_best = True
logging.info('Saving models......')
save_checkpoint({
'epoch': epoch,
'net': net.state_dict(),
'prec@1': test_top1,
'prec@5': test_top5,
}, is_best, args.save_root)
def train(train_loader, net, optimizer, criterion, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.train()
end = time.time()
for i, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
_, _, _, _, _, out = net(img)
loss = criterion(out, target)
prec1, prec5 = accuracy(out, target, topk=(1,5))
losses.update(loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
log_str = ('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'loss:{losses.val:.4f}({losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,
losses=losses, top1=top1, top5=top5))
logging.info(log_str)
def test(test_loader, net, criterion):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.eval()
end = time.time()
for i, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
_, _, _, _, _, out = net(img)
loss = criterion(out, target)
prec1, prec5 = accuracy(out, target, topk=(1,5))
losses.update(loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
f_l = [losses.avg, top1.avg, top5.avg]
logging.info('Loss: {:.4f}, Prec@1: {:.2f}, Prec@5: {:.2f}'.format(*f_l))
return top1.avg, top5.avg
def adjust_lr(optimizer, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
logging.info('Epoch: {} lr: {:.3f}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main() | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/train_base.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
'''
CC with P-order Taylor Expansion of Gaussian RBF kernel
'''
class CC(nn.Module):
'''
Correlation Congruence for Knowledge Distillation
http://openaccess.thecvf.com/content_ICCV_2019/papers/
Peng_Correlation_Congruence_for_Knowledge_Distillation_ICCV_2019_paper.pdf
'''
def __init__(self, gamma, P_order):
super(CC, self).__init__()
self.gamma = gamma
self.P_order = P_order
def forward(self, feat_s, feat_t):
corr_mat_s = self.get_correlation_matrix(feat_s)
corr_mat_t = self.get_correlation_matrix(feat_t)
loss = F.mse_loss(corr_mat_s, corr_mat_t)
return loss
def get_correlation_matrix(self, feat):
feat = F.normalize(feat, p=2, dim=-1)
sim_mat = torch.matmul(feat, feat.t())
corr_mat = torch.zeros_like(sim_mat)
for p in range(self.P_order+1):
corr_mat += math.exp(-2*self.gamma) * (2*self.gamma)**p / \
math.factorial(p) * torch.pow(sim_mat, p)
return corr_mat
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/cc.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def conv1x1(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1,
padding=0, bias=False)
'''
Modified from https://github.com/HobbitLong/RepDistiller/blob/master/distiller_zoo/VID.py
'''
class VID(nn.Module):
'''
Variational Information Distillation for Knowledge Transfer
https://zpascal.net/cvpr2019/Ahn_Variational_Information_Distillation_for_Knowledge_Transfer_CVPR_2019_paper.pdf
'''
def __init__(self, in_channels, mid_channels, out_channels, init_var, eps=1e-6):
super(VID, self).__init__()
self.eps = eps
self.regressor = nn.Sequential(*[
conv1x1(in_channels, mid_channels),
# nn.BatchNorm2d(mid_channels),
nn.ReLU(),
conv1x1(mid_channels, mid_channels),
# nn.BatchNorm2d(mid_channels),
nn.ReLU(),
conv1x1(mid_channels, out_channels),
])
self.alpha = nn.Parameter(
np.log(np.exp(init_var-eps)-1.0) * torch.ones(out_channels)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
def forward(self, fm_s, fm_t):
pred_mean = self.regressor(fm_s)
pred_var = torch.log(1.0+torch.exp(self.alpha)) + self.eps
pred_var = pred_var.view(1, -1, 1, 1)
neg_log_prob = 0.5 * (torch.log(pred_var) + (pred_mean-fm_t)**2 / pred_var)
loss = torch.mean(neg_log_prob)
return loss
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/vid.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class AB(nn.Module):
'''
Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons
https://arxiv.org/pdf/1811.03233.pdf
'''
def __init__(self, margin):
super(AB, self).__init__()
self.margin = margin
def forward(self, fm_s, fm_t):
# fm befor activation
loss = ((fm_s + self.margin).pow(2) * ((fm_s > -self.margin) & (fm_t <= 0)).float() +
(fm_s - self.margin).pow(2) * ((fm_s <= self.margin) & (fm_t > 0)).float())
loss = loss.mean()
return loss | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/ab.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
'''
In the original paper, AFD is one of components of AFDS.
AFDS: Attention Feature Distillation and Selection
AFD: Attention Feature Distillation
AFS: Attention Feature Selection
We find the original implementation of attention is unstable, thus we replace it with a SE block.
'''
class AFD(nn.Module):
'''
Pay Attention to Features, Transfer Learn Faster CNNs
https://openreview.net/pdf?id=ryxyCeHtPB
'''
def __init__(self, in_channels, att_f):
super(AFD, self).__init__()
mid_channels = int(in_channels * att_f)
self.attention = nn.Sequential(*[
nn.Conv2d(in_channels, mid_channels, 1, 1, 0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, in_channels, 1, 1, 0, bias=True)
])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, fm_s, fm_t, eps=1e-6):
fm_t_pooled = F.adaptive_avg_pool2d(fm_t, 1)
rho = self.attention(fm_t_pooled)
# rho = F.softmax(rho.squeeze(), dim=-1)
rho = torch.sigmoid(rho.squeeze())
rho = rho / torch.sum(rho, dim=1, keepdim=True)
fm_s_norm = torch.norm(fm_s, dim=(2,3), keepdim=True)
fm_s = torch.div(fm_s, fm_s_norm+eps)
fm_t_norm = torch.norm(fm_t, dim=(2,3), keepdim=True)
fm_t = torch.div(fm_t, fm_t_norm+eps)
loss = rho * torch.pow(fm_s-fm_t, 2).mean(dim=(2,3))
loss = loss.sum(1).mean(0)
return loss
# class AFD(nn.Module):
# '''
# Pay Attention to Features, Transfer Learn Faster CNNs
# https://openreview.net/pdf?id=ryxyCeHtPB
# '''
# def __init__(self, chw):
# super(AFD, self).__init__()
# c, h, w = chw
# self.weight1 = nn.Parameter(math.sqrt(2.0) / math.sqrt(h*w) * torch.randn(h, h*w))
# self.bias1 = nn.Parameter(torch.zeros(h))
# self.weight2 = nn.Parameter(math.sqrt(2.0) / math.sqrt(h) * torch.randn(h))
# self.bias2 = nn.Parameter(torch.zeros(c))
# def forward(self, fm_s, fm_t, eps=1e-6):
# b, c, h, w = fm_t.size()
# fm_t_flatten = fm_t.view(fm_t.size(0), fm_t.size(1), -1)
# weight1 = torch.stack([self.weight1.t()]*b, dim=0)
# bias1 = self.bias1.unsqueeze(0).unsqueeze(1)
# rho = F.relu(torch.bmm(fm_t_flatten, weight1) + bias1)
# weight2 = self.weight2.view(-1, 1)
# bias2 = self.bias2.unsqueeze(0)
# rho = torch.mm(rho.view(-1, rho.size(2)), weight2).view(b,c) + bias2
# # rho = F.softmax(rho, dim=-1)
# rho = torch.sigmoid(rho)
# rho = rho / torch.sum(rho, dim=1, keepdim=True)
# # print(rho)
# fm_s_norm = torch.norm(fm_s, dim=(2,3), keepdim=True)
# fm_s = torch.div(fm_s, fm_s_norm+eps)
# fm_t_norm = torch.norm(fm_t, dim=(2,3), keepdim=True)
# fm_t = torch.div(fm_t, fm_t_norm+eps)
# loss = rho * torch.pow(fm_s-fm_t, 2).mean(dim=(2,3))
# loss = loss.sum(1).mean(0)
# return loss | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/afd.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftTarget(nn.Module):
'''
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
'''
def __init__(self, T):
super(SoftTarget, self).__init__()
self.T = T
def forward(self, out_s, out_t):
loss = F.kl_div(F.log_softmax(out_s/self.T, dim=1),
F.softmax(out_t/self.T, dim=1),
reduction='batchmean') * self.T * self.T
return loss | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/st.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class IRG(nn.Module):
'''
Knowledge Distillation via Instance Relationship Graph
http://openaccess.thecvf.com/content_CVPR_2019/papers/
Liu_Knowledge_Distillation_via_Instance_Relationship_Graph_CVPR_2019_paper.pdf
The official code is written by Caffe
https://github.com/yufanLIU/IRG
'''
def __init__(self, w_irg_vert, w_irg_edge, w_irg_tran):
super(IRG, self).__init__()
self.w_irg_vert = w_irg_vert
self.w_irg_edge = w_irg_edge
self.w_irg_tran = w_irg_tran
def forward(self, irg_s, irg_t):
fm_s1, fm_s2, feat_s, out_s = irg_s
fm_t1, fm_t2, feat_t, out_t = irg_t
loss_irg_vert = F.mse_loss(out_s, out_t)
irg_edge_feat_s = self.euclidean_dist_feat(feat_s, squared=True)
irg_edge_feat_t = self.euclidean_dist_feat(feat_t, squared=True)
irg_edge_fm_s1 = self.euclidean_dist_fm(fm_s1, squared=True)
irg_edge_fm_t1 = self.euclidean_dist_fm(fm_t1, squared=True)
irg_edge_fm_s2 = self.euclidean_dist_fm(fm_s2, squared=True)
irg_edge_fm_t2 = self.euclidean_dist_fm(fm_t2, squared=True)
loss_irg_edge = (F.mse_loss(irg_edge_feat_s, irg_edge_feat_t) +
F.mse_loss(irg_edge_fm_s1, irg_edge_fm_t1 ) +
F.mse_loss(irg_edge_fm_s2, irg_edge_fm_t2 )) / 3.0
irg_tran_s = self.euclidean_dist_fms(fm_s1, fm_s2, squared=True)
irg_tran_t = self.euclidean_dist_fms(fm_t1, fm_t2, squared=True)
loss_irg_tran = F.mse_loss(irg_tran_s, irg_tran_t)
# print(self.w_irg_vert * loss_irg_vert)
# print(self.w_irg_edge * loss_irg_edge)
# print(self.w_irg_tran * loss_irg_tran)
# print()
loss = (self.w_irg_vert * loss_irg_vert +
self.w_irg_edge * loss_irg_edge +
self.w_irg_tran * loss_irg_tran)
return loss
def euclidean_dist_fms(self, fm1, fm2, squared=False, eps=1e-12):
'''
Calculating the IRG Transformation, where fm1 precedes fm2 in the network.
'''
if fm1.size(2) > fm2.size(2):
fm1 = F.adaptive_avg_pool2d(fm1, (fm2.size(2), fm2.size(3)))
if fm1.size(1) < fm2.size(1):
fm2 = (fm2[:,0::2,:,:] + fm2[:,1::2,:,:]) / 2.0
fm1 = fm1.view(fm1.size(0), -1)
fm2 = fm2.view(fm2.size(0), -1)
fms_dist = torch.sum(torch.pow(fm1-fm2, 2), dim=-1).clamp(min=eps)
if not squared:
fms_dist = fms_dist.sqrt()
fms_dist = fms_dist / fms_dist.max()
return fms_dist
def euclidean_dist_fm(self, fm, squared=False, eps=1e-12):
'''
Calculating the IRG edge of feature map.
'''
fm = fm.view(fm.size(0), -1)
fm_square = fm.pow(2).sum(dim=1)
fm_prod = torch.mm(fm, fm.t())
fm_dist = (fm_square.unsqueeze(0) + fm_square.unsqueeze(1) - 2 * fm_prod).clamp(min=eps)
if not squared:
fm_dist = fm_dist.sqrt()
fm_dist = fm_dist.clone()
fm_dist[range(len(fm)), range(len(fm))] = 0
fm_dist = fm_dist / fm_dist.max()
return fm_dist
def euclidean_dist_feat(self, feat, squared=False, eps=1e-12):
'''
Calculating the IRG edge of feat.
'''
feat_square = feat.pow(2).sum(dim=1)
feat_prod = torch.mm(feat, feat.t())
feat_dist = (feat_square.unsqueeze(0) + feat_square.unsqueeze(1) - 2 * feat_prod).clamp(min=eps)
if not squared:
feat_dist = feat_dist.sqrt()
feat_dist = feat_dist.clone()
feat_dist[range(len(feat)), range(len(feat))] = 0
feat_dist = feat_dist / feat_dist.max()
return feat_dist | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/irg.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
# '''
# NST with Polynomial Kernel, where d=2 and c=0
# It can be treated as matching the Gram matrix of two vectorized feature map.
# '''
# class NST(nn.Module):
# def __init__(self):
# super(NST, self).__init__()
# def forward(self, fm_s, fm_t):
# fm_s = fm_s.view(fm_s.size(0), fm_s.size(1), -1)
# fm_s = F.normalize(fm_s, dim=2)
# fm_t = fm_t.view(fm_t.size(0), fm_t.size(1), -1)
# fm_t = F.normalize(fm_t, dim=2)
# gram_s = self.gram_matrix(fm_s)
# gram_t = self.gram_matrix(fm_t)
# loss = F.mse_loss(gram_s, gram_t)
# return loss
# def gram_matrix(self, fm):
# return torch.bmm(fm, fm.transpose(1,2))
'''
NST with Polynomial Kernel, where d=2 and c=0
'''
class NST(nn.Module):
'''
Like What You Like: Knowledge Distill via Neuron Selectivity Transfer
https://arxiv.org/pdf/1707.01219.pdf
'''
def __init__(self):
super(NST, self).__init__()
def forward(self, fm_s, fm_t):
fm_s = fm_s.view(fm_s.size(0), fm_s.size(1), -1)
fm_s = F.normalize(fm_s, dim=2)
fm_t = fm_t.view(fm_t.size(0), fm_t.size(1), -1)
fm_t = F.normalize(fm_t, dim=2)
loss = self.poly_kernel(fm_t, fm_t).mean() \
+ self.poly_kernel(fm_s, fm_s).mean() \
- 2 * self.poly_kernel(fm_s, fm_t).mean()
return loss
def poly_kernel(self, fm1, fm2):
fm1 = fm1.unsqueeze(1)
fm2 = fm2.unsqueeze(2)
out = (fm1 * fm2).sum(-1).pow(2)
return out
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/nst.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class FT(nn.Module):
'''
araphrasing Complex Network: Network Compression via Factor Transfer
http://papers.nips.cc/paper/7541-paraphrasing-complex-network-network-compression-via-factor-transfer.pdf
'''
def __init__(self):
super(FT, self).__init__()
def forward(self, factor_s, factor_t):
loss = F.l1_loss(self.normalize(factor_s), self.normalize(factor_t))
return loss
def normalize(self, factor):
norm_factor = F.normalize(factor.view(factor.size(0),-1))
return norm_factor
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/ft.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class SP(nn.Module):
'''
Similarity-Preserving Knowledge Distillation
https://arxiv.org/pdf/1907.09682.pdf
'''
def __init__(self):
super(SP, self).__init__()
def forward(self, fm_s, fm_t):
fm_s = fm_s.view(fm_s.size(0), -1)
G_s = torch.mm(fm_s, fm_s.t())
norm_G_s = F.normalize(G_s, p=2, dim=1)
fm_t = fm_t.view(fm_t.size(0), -1)
G_t = torch.mm(fm_t, fm_t.t())
norm_G_t = F.normalize(G_t, p=2, dim=1)
loss = F.mse_loss(norm_G_s, norm_G_t)
return loss | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/sp.py |
from .logits import Logits
from .st import SoftTarget
from .at import AT
from .fitnet import Hint
from .nst import NST
from .pkt import PKTCosSim
from .fsp import FSP
from .ft import FT
from .dml import DML
from .kdsvd import KDSVD
from .rkd import RKD
from .ab import AB
from .sp import SP
from .sobolev import Sobolev
from .bss import BSS, BSSAttacker
from .cc import CC
from .lwm import LwM
from .irg import IRG
from .vid import VID
from .ofd import OFD
from .afd import AFD
from .crd import CRD | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/__init__.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
AT with sum of absolute values with power p
'''
class AT(nn.Module):
'''
Paying More Attention to Attention: Improving the Performance of Convolutional
Neural Netkworks wia Attention Transfer
https://arxiv.org/pdf/1612.03928.pdf
'''
def __init__(self, p):
super(AT, self).__init__()
self.p = p
def forward(self, fm_s, fm_t):
loss = F.mse_loss(self.attention_map(fm_s), self.attention_map(fm_t))
return loss
def attention_map(self, fm, eps=1e-6):
am = torch.pow(torch.abs(fm), self.p)
am = torch.sum(am, dim=1, keepdim=True)
norm = torch.norm(am, dim=(2,3), keepdim=True)
am = torch.div(am, norm+eps)
return am | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/at.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
From https://github.com/lenscloth/RKD/blob/master/metric/loss.py
'''
class RKD(nn.Module):
'''
Relational Knowledge Distillation
https://arxiv.org/pdf/1904.05068.pdf
'''
def __init__(self, w_dist, w_angle):
super(RKD, self).__init__()
self.w_dist = w_dist
self.w_angle = w_angle
def forward(self, feat_s, feat_t):
loss = self.w_dist * self.rkd_dist(feat_s, feat_t) + \
self.w_angle * self.rkd_angle(feat_s, feat_t)
return loss
def rkd_dist(self, feat_s, feat_t):
feat_t_dist = self.pdist(feat_t, squared=False)
mean_feat_t_dist = feat_t_dist[feat_t_dist>0].mean()
feat_t_dist = feat_t_dist / mean_feat_t_dist
feat_s_dist = self.pdist(feat_s, squared=False)
mean_feat_s_dist = feat_s_dist[feat_s_dist>0].mean()
feat_s_dist = feat_s_dist / mean_feat_s_dist
loss = F.smooth_l1_loss(feat_s_dist, feat_t_dist)
return loss
def rkd_angle(self, feat_s, feat_t):
# N x C --> N x N x C
feat_t_vd = (feat_t.unsqueeze(0) - feat_t.unsqueeze(1))
norm_feat_t_vd = F.normalize(feat_t_vd, p=2, dim=2)
feat_t_angle = torch.bmm(norm_feat_t_vd, norm_feat_t_vd.transpose(1, 2)).view(-1)
feat_s_vd = (feat_s.unsqueeze(0) - feat_s.unsqueeze(1))
norm_feat_s_vd = F.normalize(feat_s_vd, p=2, dim=2)
feat_s_angle = torch.bmm(norm_feat_s_vd, norm_feat_s_vd.transpose(1, 2)).view(-1)
loss = F.smooth_l1_loss(feat_s_angle, feat_t_angle)
return loss
def pdist(self, feat, squared=False, eps=1e-12):
feat_square = feat.pow(2).sum(dim=1)
feat_prod = torch.mm(feat, feat.t())
feat_dist = (feat_square.unsqueeze(0) + feat_square.unsqueeze(1) - 2 * feat_prod).clamp(min=eps)
if not squared:
feat_dist = feat_dist.sqrt()
feat_dist = feat_dist.clone()
feat_dist[range(len(feat)), range(len(feat))] = 0
return feat_dist
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/rkd.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
'''
LwM is originally an incremental learning method with
classification/distillation/attention distillation losses.
Here, LwM is only defined as the Grad-CAM based attention distillation.
'''
class LwM(nn.Module):
'''
Learning without Memorizing
https://arxiv.org/pdf/1811.08051.pdf
'''
def __init__(self):
super(LwM, self).__init__()
def forward(self, out_s, fm_s, out_t, fm_t, target):
target_out_t = torch.gather(out_t, 1, target.view(-1, 1))
grad_fm_t = grad(outputs=target_out_t, inputs=fm_t,
grad_outputs=torch.ones_like(target_out_t),
create_graph=True, retain_graph=True, only_inputs=True)[0]
weights_t = F.adaptive_avg_pool2d(grad_fm_t, 1)
cam_t = torch.sum(torch.mul(weights_t, grad_fm_t), dim=1, keepdim=True)
cam_t = F.relu(cam_t)
cam_t = cam_t.view(cam_t.size(0), -1)
norm_cam_t = F.normalize(cam_t, p=2, dim=1)
target_out_s = torch.gather(out_s, 1, target.view(-1, 1))
grad_fm_s = grad(outputs=target_out_s, inputs=fm_s,
grad_outputs=torch.ones_like(target_out_s),
create_graph=True, retain_graph=True, only_inputs=True)[0]
weights_s = F.adaptive_avg_pool2d(grad_fm_s, 1)
cam_s = torch.sum(torch.mul(weights_s, grad_fm_s), dim=1, keepdim=True)
cam_s = F.relu(cam_s)
cam_s = cam_s.view(cam_s.size(0), -1)
norm_cam_s = F.normalize(cam_s, p=2, dim=1)
loss = F.l1_loss(norm_cam_s, norm_cam_t.detach())
return loss | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/lwm.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.gradcheck import zero_gradients
'''
Modified by https://github.com/bhheo/BSS_distillation
'''
def reduce_sum(x, keepdim=True):
for d in reversed(range(1, x.dim())):
x = x.sum(d, keepdim=keepdim)
return x
def l2_norm(x, keepdim=True):
norm = reduce_sum(x*x, keepdim=keepdim)
return norm.sqrt()
class BSS(nn.Module):
'''
Knowledge Distillation with Adversarial Samples Supporting Decision Boundary
https://arxiv.org/pdf/1805.05532.pdf
'''
def __init__(self, T):
super(BSS, self).__init__()
self.T = T
def forward(self, attacked_out_s, attacked_out_t):
loss = F.kl_div(F.log_softmax(attacked_out_s/self.T, dim=1),
F.softmax(attacked_out_t/self.T, dim=1),
reduction='batchmean') #* self.T * self.T
return loss
class BSSAttacker():
def __init__(self, step_alpha, num_steps, eps=1e-4):
self.step_alpha = step_alpha
self.num_steps = num_steps
self.eps = eps
def attack(self, model, img, target, attack_class):
img = img.detach().requires_grad_(True)
step = 0
while step < self.num_steps:
zero_gradients(img)
_, _, _, _, _, output = model(img)
score = F.softmax(output, dim=1)
score_target = score.gather(1, target.unsqueeze(1))
score_attack_class = score.gather(1, attack_class.unsqueeze(1))
loss = (score_attack_class - score_target).sum()
loss.backward()
step_alpha = self.step_alpha * (target == output.max(1)[1]).float()
step_alpha = step_alpha.unsqueeze(1).unsqueeze(1).unsqueeze(1)
if step_alpha.sum() == 0:
break
pert = (score_target - score_attack_class).unsqueeze(1).unsqueeze(1)
norm_pert = step_alpha * (pert + self.eps) * img.grad / l2_norm(img.grad)
step_adv = img + norm_pert
step_adv = torch.clamp(step_adv, -2.5, 2.5)
img.data = step_adv.data
step += 1
return img
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/bss.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class Logits(nn.Module):
'''
Do Deep Nets Really Need to be Deep?
http://papers.nips.cc/paper/5484-do-deep-nets-really-need-to-be-deep.pdf
'''
def __init__(self):
super(Logits, self).__init__()
def forward(self, out_s, out_t):
loss = F.mse_loss(out_s, out_t)
return loss
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/logits.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
Adopted from https://github.com/passalis/probabilistic_kt/blob/master/nn/pkt.py
'''
class PKTCosSim(nn.Module):
'''
Learning Deep Representations with Probabilistic Knowledge Transfer
http://openaccess.thecvf.com/content_ECCV_2018/papers/Nikolaos_Passalis_Learning_Deep_Representations_ECCV_2018_paper.pdf
'''
def __init__(self):
super(PKTCosSim, self).__init__()
def forward(self, feat_s, feat_t, eps=1e-6):
# Normalize each vector by its norm
feat_s_norm = torch.sqrt(torch.sum(feat_s ** 2, dim=1, keepdim=True))
feat_s = feat_s / (feat_s_norm + eps)
feat_s[feat_s != feat_s] = 0
feat_t_norm = torch.sqrt(torch.sum(feat_t ** 2, dim=1, keepdim=True))
feat_t = feat_t / (feat_t_norm + eps)
feat_t[feat_t != feat_t] = 0
# Calculate the cosine similarity
feat_s_cos_sim = torch.mm(feat_s, feat_s.transpose(0, 1))
feat_t_cos_sim = torch.mm(feat_t, feat_t.transpose(0, 1))
# Scale cosine similarity to [0,1]
feat_s_cos_sim = (feat_s_cos_sim + 1.0) / 2.0
feat_t_cos_sim = (feat_t_cos_sim + 1.0) / 2.0
# Transform them into probabilities
feat_s_cond_prob = feat_s_cos_sim / torch.sum(feat_s_cos_sim, dim=1, keepdim=True)
feat_t_cond_prob = feat_t_cos_sim / torch.sum(feat_t_cos_sim, dim=1, keepdim=True)
# Calculate the KL-divergence
loss = torch.mean(feat_t_cond_prob * torch.log((feat_t_cond_prob + eps) / (feat_s_cond_prob + eps)))
return loss
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/pkt.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class FSP(nn.Module):
'''
A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning
http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf
'''
def __init__(self):
super(FSP, self).__init__()
def forward(self, fm_s1, fm_s2, fm_t1, fm_t2):
loss = F.mse_loss(self.fsp_matrix(fm_s1,fm_s2), self.fsp_matrix(fm_t1,fm_t2))
return loss
def fsp_matrix(self, fm1, fm2):
if fm1.size(2) > fm2.size(2):
fm1 = F.adaptive_avg_pool2d(fm1, (fm2.size(2), fm2.size(3)))
fm1 = fm1.view(fm1.size(0), fm1.size(1), -1)
fm2 = fm2.view(fm2.size(0), fm2.size(1), -1).transpose(1,2)
fsp = torch.bmm(fm1, fm2) / fm1.size(2)
return fsp
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/fsp.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
class Sobolev(nn.Module):
'''
Sobolev Training for Neural Networks
https://arxiv.org/pdf/1706.04859.pdf
Knowledge Transfer with Jacobian Matching
http://de.arxiv.org/pdf/1803.00443
'''
def __init__(self):
super(Sobolev, self).__init__()
def forward(self, out_s, out_t, img, target):
target_out_s = torch.gather(out_s, 1, target.view(-1, 1))
grad_s = grad(outputs=target_out_s, inputs=img,
grad_outputs=torch.ones_like(target_out_s),
create_graph=True, retain_graph=True, only_inputs=True)[0]
norm_grad_s = F.normalize(grad_s.view(grad_s.size(0), -1), p=2, dim=1)
target_out_t = torch.gather(out_t, 1, target.view(-1, 1))
grad_t = grad(outputs=target_out_t, inputs=img,
grad_outputs=torch.ones_like(target_out_t),
create_graph=True, retain_graph=True, only_inputs=True)[0]
norm_grad_t = F.normalize(grad_t.view(grad_t.size(0), -1), p=2, dim=1)
loss = F.mse_loss(norm_grad_s, norm_grad_t.detach())
return loss
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/sobolev.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class Hint(nn.Module):
'''
FitNets: Hints for Thin Deep Nets
https://arxiv.org/pdf/1412.6550.pdf
'''
def __init__(self):
super(Hint, self).__init__()
def forward(self, fm_s, fm_t):
loss = F.mse_loss(fm_s, fm_t)
return loss | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/fitnet.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
DML with only two networks
'''
class DML(nn.Module):
'''
Deep Mutual Learning
https://zpascal.net/cvpr2018/Zhang_Deep_Mutual_Learning_CVPR_2018_paper.pdf
'''
def __init__(self):
super(DML, self).__init__()
def forward(self, out1, out2):
loss = F.kl_div(F.log_softmax(out1, dim=1),
F.softmax(out2, dim=1),
reduction='batchmean')
return loss
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/dml.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
'''
Modified from https://github.com/clovaai/overhaul-distillation/blob/master/CIFAR-100/distiller.py
'''
class OFD(nn.Module):
'''
A Comprehensive Overhaul of Feature Distillation
http://openaccess.thecvf.com/content_ICCV_2019/papers/
Heo_A_Comprehensive_Overhaul_of_Feature_Distillation_ICCV_2019_paper.pdf
'''
def __init__(self, in_channels, out_channels):
super(OFD, self).__init__()
self.connector = nn.Sequential(*[
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels)
])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, fm_s, fm_t):
margin = self.get_margin(fm_t)
fm_t = torch.max(fm_t, margin)
fm_s = self.connector(fm_s)
mask = 1.0 - ((fm_s <= fm_t) & (fm_t <= 0.0)).float()
loss = torch.mean((fm_s - fm_t)**2 * mask)
return loss
def get_margin(self, fm, eps=1e-6):
mask = (fm < 0.0).float()
masked_fm = fm * mask
margin = masked_fm.sum(dim=(0,2,3), keepdim=True) / (mask.sum(dim=(0,2,3), keepdim=True)+eps)
return margin | EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/ofd.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
'''
Modified from https://github.com/HobbitLong/RepDistiller/tree/master/crd
'''
class CRD(nn.Module):
'''
Contrastive Representation Distillation
https://openreview.net/pdf?id=SkgpBJrtvS
includes two symmetric parts:
(a) using teacher as anchor, choose positive and negatives over the student side
(b) using student as anchor, choose positive and negatives over the teacher side
Args:
s_dim: the dimension of student's feature
t_dim: the dimension of teacher's feature
feat_dim: the dimension of the projection space
nce_n: number of negatives paired with each positive
nce_t: the temperature
nce_mom: the momentum for updating the memory buffer
n_data: the number of samples in the training set, which is the M in Eq.(19)
'''
def __init__(self, s_dim, t_dim, feat_dim, nce_n, nce_t, nce_mom, n_data):
super(CRD, self).__init__()
self.embed_s = Embed(s_dim, feat_dim)
self.embed_t = Embed(t_dim, feat_dim)
self.contrast = ContrastMemory(feat_dim, n_data, nce_n, nce_t, nce_mom)
self.criterion_s = ContrastLoss(n_data)
self.criterion_t = ContrastLoss(n_data)
def forward(self, feat_s, feat_t, idx, sample_idx):
feat_s = self.embed_s(feat_s)
feat_t = self.embed_t(feat_t)
out_s, out_t = self.contrast(feat_s, feat_t, idx, sample_idx)
loss_s = self.criterion_s(out_s)
loss_t = self.criterion_t(out_t)
loss = loss_s + loss_t
return loss
class Embed(nn.Module):
def __init__(self, in_dim, out_dim):
super(Embed, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.linear(x)
x = F.normalize(x, p=2, dim=1)
return x
class ContrastLoss(nn.Module):
'''
contrastive loss, corresponding to Eq.(18)
'''
def __init__(self, n_data, eps=1e-7):
super(ContrastLoss, self).__init__()
self.n_data = n_data
self.eps = eps
def forward(self, x):
bs = x.size(0)
N = x.size(1) - 1
M = float(self.n_data)
# loss for positive pair
pos_pair = x.select(1, 0)
log_pos = torch.div(pos_pair, pos_pair.add(N / M + self.eps)).log_()
# loss for negative pair
neg_pair = x.narrow(1, 1, N)
log_neg = torch.div(neg_pair.clone().fill_(N / M), neg_pair.add(N / M + self.eps)).log_()
loss = -(log_pos.sum() + log_neg.sum()) / bs
return loss
class ContrastMemory(nn.Module):
def __init__(self, feat_dim, n_data, nce_n, nce_t, nce_mom):
super(ContrastMemory, self).__init__()
self.N = nce_n
self.T = nce_t
self.momentum = nce_mom
self.Z_t = None
self.Z_s = None
stdv = 1. / math.sqrt(feat_dim / 3.)
self.register_buffer('memory_t', torch.rand(n_data, feat_dim).mul_(2 * stdv).add_(-stdv))
self.register_buffer('memory_s', torch.rand(n_data, feat_dim).mul_(2 * stdv).add_(-stdv))
def forward(self, feat_s, feat_t, idx, sample_idx):
bs = feat_s.size(0)
feat_dim = self.memory_s.size(1)
n_data = self.memory_s.size(0)
# using teacher as anchor
weight_s = torch.index_select(self.memory_s, 0, sample_idx.view(-1)).detach()
weight_s = weight_s.view(bs, self.N + 1, feat_dim)
out_t = torch.bmm(weight_s, feat_t.view(bs, feat_dim, 1))
out_t = torch.exp(torch.div(out_t, self.T)).squeeze().contiguous()
# using student as anchor
weight_t = torch.index_select(self.memory_t, 0, sample_idx.view(-1)).detach()
weight_t = weight_t.view(bs, self.N + 1, feat_dim)
out_s = torch.bmm(weight_t, feat_s.view(bs, feat_dim, 1))
out_s = torch.exp(torch.div(out_s, self.T)).squeeze().contiguous()
# set Z if haven't been set yet
if self.Z_t is None:
self.Z_t = (out_t.mean() * n_data).detach().item()
if self.Z_s is None:
self.Z_s = (out_s.mean() * n_data).detach().item()
out_t = torch.div(out_t, self.Z_t)
out_s = torch.div(out_s, self.Z_s)
# update memory
with torch.no_grad():
pos_mem_t = torch.index_select(self.memory_t, 0, idx.view(-1))
pos_mem_t.mul_(self.momentum)
pos_mem_t.add_(torch.mul(feat_t, 1 - self.momentum))
pos_mem_t = F.normalize(pos_mem_t, p=2, dim=1)
self.memory_t.index_copy_(0, idx, pos_mem_t)
pos_mem_s = torch.index_select(self.memory_s, 0, idx.view(-1))
pos_mem_s.mul_(self.momentum)
pos_mem_s.add_(torch.mul(feat_s, 1 - self.momentum))
pos_mem_s = F.normalize(pos_mem_s, p=2, dim=1)
self.memory_s.index_copy_(0, idx, pos_mem_s)
return out_s, out_t
| EXA-1-master | exa/libraries/Knowledge-Distillation-Zoo/kd_losses/crd.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import distutils.command.clean
import glob
import importlib.util
import json
import os
import platform
import shlex
import shutil
import subprocess
import sys
from pathlib import Path
from typing import List
import setuptools
import torch
from torch.utils.cpp_extension import (
CUDA_HOME,
BuildExtension,
CppExtension,
CUDAExtension,
)
this_dir = os.path.dirname(__file__)
def get_extra_nvcc_flags_for_build_type() -> List[str]:
build_type = os.environ.get("XFORMERS_BUILD_TYPE", "RelWithDebInfo").lower()
if build_type == "relwithdebinfo":
return ["--generate-line-info"]
elif build_type == "release":
return []
else:
raise ValueError(f"Unknown build type: {build_type}")
def fetch_requirements():
with open("requirements.txt") as f:
reqs = f.read().strip().split("\n")
return reqs
def get_local_version_suffix() -> str:
date_suffix = datetime.datetime.now().strftime("%Y%m%d")
git_hash = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], cwd=Path(__file__).parent
).decode("ascii")[:-1]
return f"+{git_hash}.d{date_suffix}"
def write_version_file(version: str):
version_path = os.path.join(this_dir, "xformers", "version.py")
with open(version_path, "w") as f:
f.write("# noqa: C801\n")
f.write(f'__version__ = "{version}"\n')
tag = os.getenv("GIT_TAG")
if tag is not None:
f.write(f'git_tag = "{tag}"\n')
def symlink_package(name: str, path: Path, is_building_wheel: bool) -> None:
cwd = Path(__file__).resolve().parent
path_from = cwd / path
path_to = os.path.join(cwd, *name.split("."))
try:
if os.path.islink(path_to):
os.unlink(path_to)
elif os.path.isdir(path_to):
shutil.rmtree(path_to)
else:
os.remove(path_to)
except FileNotFoundError:
pass
# OSError: [WinError 1314] A required privilege is not held by the client
# Windows requires special permission to symlink. Fallback to copy
# When building wheels for linux 3.7 and 3.8, symlinks are not included
# So we force a copy, see #611
use_symlink = os.name != "nt" and not is_building_wheel
if use_symlink:
os.symlink(src=path_from, dst=path_to)
else:
shutil.copytree(src=path_from, dst=path_to)
def get_cuda_version(cuda_dir) -> int:
nvcc_bin = "nvcc" if cuda_dir is None else cuda_dir + "/bin/nvcc"
raw_output = subprocess.check_output([nvcc_bin, "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = int(release[0])
bare_metal_minor = int(release[1][0])
assert bare_metal_minor < 100
return bare_metal_major * 100 + bare_metal_minor
def get_flash_attention_extensions(cuda_version: int, extra_compile_args):
# Figure out default archs to target
DEFAULT_ARCHS_LIST = ""
if cuda_version > 1100:
DEFAULT_ARCHS_LIST = "7.5;8.0;8.6"
elif cuda_version == 1100:
DEFAULT_ARCHS_LIST = "7.5;8.0"
else:
return []
if os.getenv("XFORMERS_DISABLE_FLASH_ATTN", "0") != "0":
return []
archs_list = os.environ.get("TORCH_CUDA_ARCH_LIST", DEFAULT_ARCHS_LIST)
nvcc_archs_flags = []
for arch in archs_list.replace(" ", ";").split(";"):
assert len(arch) >= 3, f"Invalid sm version: {arch}"
num = 10 * int(arch[0]) + int(arch[2])
# Need at least 7.5
if num < 75:
continue
nvcc_archs_flags.append(f"-gencode=arch=compute_{num},code=sm_{num}")
if arch.endswith("+PTX"):
nvcc_archs_flags.append(f"-gencode=arch=compute_{num},code=compute_{num}")
if not nvcc_archs_flags:
return []
flash_root = os.path.join(this_dir, "third_party", "flash-attention")
if not os.path.exists(flash_root):
raise RuntimeError(
"flashattention submodule not found. Did you forget "
"to run `git submodule update --init --recursive` ?"
)
return [
CUDAExtension(
name="xformers._C_flashattention",
sources=[
os.path.join("third_party", "flash-attention", path)
for path in [
"csrc/flash_attn/fmha_api.cpp",
"csrc/flash_attn/src/fmha_fwd_hdim32.cu",
"csrc/flash_attn/src/fmha_fwd_hdim64.cu",
"csrc/flash_attn/src/fmha_fwd_hdim128.cu",
"csrc/flash_attn/src/fmha_bwd_hdim32.cu",
"csrc/flash_attn/src/fmha_bwd_hdim64.cu",
"csrc/flash_attn/src/fmha_bwd_hdim128.cu",
"csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
"csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
]
],
extra_compile_args={
**extra_compile_args,
"nvcc": extra_compile_args.get("nvcc", [])
+ [
"-O3",
"-std=c++17",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
]
+ nvcc_archs_flags
+ get_extra_nvcc_flags_for_build_type(),
},
include_dirs=[
p.absolute()
for p in [
Path(flash_root) / "csrc" / "flash_attn",
Path(flash_root) / "csrc" / "flash_attn" / "src",
Path(this_dir) / "third_party" / "cutlass" / "include",
]
],
)
]
def get_extensions():
extensions_dir = os.path.join("xformers", "csrc")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"), recursive=True)
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu"), recursive=True)
sputnik_dir = os.path.join(this_dir, "third_party", "sputnik")
cutlass_dir = os.path.join(this_dir, "third_party", "cutlass", "include")
cutlass_examples_dir = os.path.join(this_dir, "third_party", "cutlass", "examples")
if not os.path.exists(cutlass_dir):
raise RuntimeError(
f"CUTLASS submodule not found at {cutlass_dir}. "
"Did you forget to run "
"`git submodule update --init --recursive` ?"
)
extension = CppExtension
define_macros = []
extra_compile_args = {"cxx": ["-O3"]}
if sys.platform == "win32":
define_macros += [("xformers_EXPORTS", None)]
extra_compile_args["cxx"].extend(["/MP", "/Zc:lambda", "/Zc:preprocessor"])
elif "OpenMP not found" not in torch.__config__.parallel_info():
extra_compile_args["cxx"].append("-fopenmp")
include_dirs = [extensions_dir]
ext_modules = []
cuda_version = None
if (
(torch.cuda.is_available() and ((CUDA_HOME is not None)))
or os.getenv("FORCE_CUDA", "0") == "1"
or os.getenv("TORCH_CUDA_ARCH_LIST", "") != ""
):
extension = CUDAExtension
sources += source_cuda
include_dirs += [sputnik_dir, cutlass_dir, cutlass_examples_dir]
nvcc_flags = [
"-DHAS_PYTORCH",
"--use_fast_math",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--extended-lambda",
"-D_ENABLE_EXTENDED_ALIGNED_STORAGE",
# Workaround for a regression with nvcc > 11.6
# See https://github.com/facebookresearch/xformers/issues/712
"--ptxas-options=-O2",
"--ptxas-options=-allow-expensive-optimizations=true",
] + get_extra_nvcc_flags_for_build_type()
if os.getenv("XFORMERS_ENABLE_DEBUG_ASSERTIONS", "0") != "1":
nvcc_flags.append("-DNDEBUG")
nvcc_flags += shlex.split(os.getenv("NVCC_FLAGS", ""))
cuda_version = get_cuda_version(CUDA_HOME)
if cuda_version >= 1102:
nvcc_flags += [
"--threads",
"4",
"--ptxas-options=-v",
]
if sys.platform == "win32":
nvcc_flags += [
"-std=c++17",
"-Xcompiler",
"/Zc:lambda",
"-Xcompiler",
"/Zc:preprocessor",
]
extra_compile_args["nvcc"] = nvcc_flags
ext_modules += get_flash_attention_extensions(
cuda_version=cuda_version, extra_compile_args=extra_compile_args
)
ext_modules.append(
extension(
"xformers._C",
sorted(sources),
include_dirs=[os.path.abspath(p) for p in include_dirs],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
)
return ext_modules, {
"version": {
"cuda": cuda_version,
"torch": torch.__version__,
"python": platform.python_version(),
},
"env": {
k: os.environ.get(k)
for k in [
"TORCH_CUDA_ARCH_LIST",
"XFORMERS_BUILD_TYPE",
"XFORMERS_ENABLE_DEBUG_ASSERTIONS",
"NVCC_FLAGS",
"XFORMERS_PACKAGE_FROM",
]
},
}
class clean(distutils.command.clean.clean): # type: ignore
def run(self):
if os.path.exists(".gitignore"):
with open(".gitignore", "r") as f:
ignores = f.read()
for wildcard in filter(None, ignores.split("\n")):
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
class BuildExtensionWithMetadata(BuildExtension):
def __init__(self, *args, **kwargs) -> None:
self.xformers_build_metadata = kwargs.pop("xformers_build_metadata")
self.pkg_name = "xformers"
self.metadata_json = "cpp_lib.json"
super().__init__(*args, **kwargs)
@staticmethod
def _join_cuda_home(*paths) -> str:
"""
Hackfix to support custom `nvcc` binary (eg ccache)
TODO: Remove once we use PT 2.1.0 (https://github.com/pytorch/pytorch/pull/96987)
"""
if paths == ("bin", "nvcc") and "PYTORCH_NVCC" in os.environ:
return os.environ["PYTORCH_NVCC"]
if CUDA_HOME is None:
raise EnvironmentError(
"CUDA_HOME environment variable is not set. "
"Please set it to your CUDA install root."
)
return os.path.join(CUDA_HOME, *paths)
def build_extensions(self) -> None:
torch.utils.cpp_extension._join_cuda_home = (
BuildExtensionWithMetadata._join_cuda_home
)
super().build_extensions()
with open(
os.path.join(self.build_lib, self.pkg_name, self.metadata_json), "w+"
) as fp:
json.dump(self.xformers_build_metadata, fp)
def copy_extensions_to_source(self):
"""
Used for `pip install -e .`
Copies everything we built back into the source repo
"""
build_py = self.get_finalized_command("build_py")
package_dir = build_py.get_package_dir(self.pkg_name)
inplace_file = os.path.join(package_dir, self.metadata_json)
regular_file = os.path.join(self.build_lib, self.pkg_name, self.metadata_json)
self.copy_file(regular_file, inplace_file, level=self.verbose)
super().copy_extensions_to_source()
if __name__ == "__main__":
try:
# when installing as a source distribution, the version module should exist
# Let's import it manually to not trigger the load of the C++
# library - which does not exist yet, and creates a WARNING
spec = importlib.util.spec_from_file_location(
"xformers_version", os.path.join(this_dir, "xformers", "version.py")
)
if spec is None or spec.loader is None:
raise FileNotFoundError()
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
version = module.__version__
except FileNotFoundError:
if os.getenv("BUILD_VERSION"): # In CI
version = os.getenv("BUILD_VERSION", "0.0.0")
else:
version_txt = os.path.join(this_dir, "version.txt")
with open(version_txt) as f:
version = f.readline().strip()
version += get_local_version_suffix()
write_version_file(version)
is_building_wheel = "bdist_wheel" in sys.argv
# Embed a fixed version of flash_attn
# NOTE: The correct way to do this would be to use the `package_dir`
# parameter in `setuptools.setup`, but this does not work when
# developing in editable mode
# See: https://github.com/pypa/pip/issues/3160 (closed, but not fixed)
symlink_package(
"xformers._flash_attn",
Path("third_party") / "flash-attention" / "flash_attn",
is_building_wheel,
)
extensions, extensions_metadata = get_extensions()
setuptools.setup(
name="xformers",
description="XFormers: A collection of composable Transformer building blocks.",
version=version,
install_requires=fetch_requirements(),
packages=setuptools.find_packages(
exclude=("tests*", "benchmarks*", "experimental*")
),
ext_modules=extensions,
cmdclass={
"build_ext": BuildExtensionWithMetadata.with_options(
no_python_abi_suffix=True, xformers_build_metadata=extensions_metadata
),
"clean": clean,
},
url="https://facebookresearch.github.io/xformers/",
python_requires=">=3.7",
author="Facebook AI Research",
author_email="[email protected]",
long_description="XFormers: A collection of composable Transformer building blocks."
+ "XFormers aims at being able to reproduce most architectures in the Transformer-family SOTA,"
+ "defined as compatible and combined building blocks as opposed to monolithic models",
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
],
zip_safe=False,
)
| EXA-1-master | exa/libraries/xformers/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple, TypeVar
import torch
import torch.nn as nn
from pyre_extensions import TypeVarTuple, Unpack
from torch import Tensor
from typing_extensions import Literal as L
Ts = TypeVarTuple("Ts")
N = TypeVar("N", bound=int)
# flake8: noqa
"""
Tensor shape signatures can get complicated and hard to debug. We are basically
writing code at the level of types.
It's helpful to have type-level unit tests for the stubs.
Take care to add both a positive and a negative test for your stub. That way,
even if someone changes the stub to return a bad type like `Any`, we will still
be warned by an unused-ignore error. Otherwise, `y: Tensor[int, L[2], L[3]] =
foo(x)` would silently pass because `Any` is compatible with any type.
Use `pyre --output=json | pyre-upgrade` to add the `pyre-fixme` comment for you.
"""
def test_sin() -> None:
x: Tensor[int, L[2], L[3]]
same_shape_as_x: Tensor[int, L[2], L[3]]
not_same_shape_as_x: Tensor[int, L[2], L[99]]
y: Tensor[int, L[2], L[3]] = torch.sin(x)
# pyre-fixme[9]: y2 has type `Tensor[int, typing_extensions.Literal[2],
# typing_extensions.Literal[4]]`; used as `Tensor[int,
# typing_extensions.Literal[2], typing_extensions.Literal[3]]`.
y2: Tensor[int, L[2], L[4]] = torch.sin(x)
y3: Tensor[int, L[2], L[3]] = torch.sin(x, out=same_shape_as_x)
# pyre-fixme[6]: Expected `Tensor[Variable[torch.DType], *torch.Ts]` for 2nd
# param but got `Tensor[int, int, int]`.
# pyre-fixme[9]: y4 has type `Tensor[int, typing_extensions.Literal[2],
# typing_extensions.Literal[4]]`; used as `Tensor[int,
# typing_extensions.Literal[2], typing_extensions.Literal[3]]`.
y4: Tensor[int, L[2], L[4]] = torch.sin(x, out=not_same_shape_as_x)
y5: Tensor[int, L[2], L[3]] = torch.sin(x, out=None)
def test_unsqueeze() -> None:
x: Tensor[int, L[2], L[3]]
y: Tensor[int, L[1], L[2], L[3]] = x.unsqueeze(0)
y_torch_function: Tensor[int, L[1], L[2], L[3]] = torch.unsqueeze(x, 0)
y2: Tensor[int, L[2], L[1], L[3]] = x.unsqueeze(1)
y3: Tensor[int, L[2], L[3], L[1]] = x.unsqueeze(-1)
# pyre-fixme[9]: y4 has type `Tensor[int, typing_extensions.Literal[99]]`; used
# as `Tensor[int, typing_extensions.Literal[1], typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y4: Tensor[int, L[99]] = x.unsqueeze(0)
empty: Tensor[int]
y5: Tensor[int, L[1]] = empty.unsqueeze(0)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 1st param but got
# `typing_extensions.Literal[1]`.
y6: Tensor[int, L[1]] = empty.unsqueeze(1)
y7: Tensor[int, L[2], L[3], L[1]] = x.unsqueeze(2)
def test_unsqueeze_() -> None:
x: Tensor[int, L[2], L[3]]
y: Tensor[int, L[1], L[2], L[3]] = x.unsqueeze_(0)
y_error: Tensor[int, L[1], L[2], L[3]] = x.unsqueeze_(0)
# pyre-ignore[9]: `unsqueeze_` is an in-place shape-transforming function. But Pyre cannot
# update a variable's shape type.
z: Tensor[int, L[1], L[2], L[3]] = x
def test_squeeze_() -> None:
x: Tensor[int, L[1], L[2], L[3]]
out: Tensor
y: Tensor[int, L[2], L[3]] = x.squeeze_(out=out)
# pyre-ignore[9]: Expected error.
y_error: Tensor[int, L[2], L[99]] = x.squeeze_()
y2: Tensor[int, L[2], L[3]] = x.squeeze_().squeeze_()
x2: Tensor[int, L[2], L[3], L[1], L[1]]
x3: Tensor[int, L[2], L[3], L[1]]
y3: Tensor[int, L[2], L[3]] = x2.squeeze_()
y4: Tensor[int, L[2], L[3]] = x3.squeeze_()
y5: Tensor[int, L[2], L[3]] = x.squeeze_(0)
y6: Tensor[int, L[2], L[3], L[1]] = x2.squeeze_(-1)
def test_squeeze() -> None:
x: Tensor[int, L[1], L[2], L[3]]
out: Tensor
y: Tensor[int, L[2], L[3]] = x.squeeze(out=out)
# pyre-ignore[9]: Expected error.
y_error: Tensor[int, L[2], L[99]] = x.squeeze()
y2: Tensor[int, L[2], L[3]] = x.squeeze().squeeze()
x2: Tensor[int, L[2], L[3], L[1], L[1]]
x3: Tensor[int, L[2], L[3], L[1]]
y3: Tensor[int, L[2], L[3]] = x2.squeeze()
y4: Tensor[int, L[2], L[3]] = x3.squeeze()
y5: Tensor[int, L[2], L[3]] = x.squeeze(0)
y6: Tensor[int, L[2], L[3], L[1]] = x2.squeeze(-1)
def test_repeat() -> None:
x: Tensor[int, L[2], L[3]]
y: Tensor[int, L[8], L[15]] = x.repeat(4, 5)
# pyre-fixme[9]
y2: Tensor[int, L[8], L[16]] = x.repeat(4, 5)
# TODO(T96315150): This is passing by coincidence right now.
y3: Tensor[int, L[4], L[10], L[18]] = x.repeat(4, 5, 6)
# pyre-ignore[9]: Doesn't error as expected because we have limited overloads.
y3_error: Tensor[int, L[4], L[10], L[99]] = x.repeat(4, 5, 6)
# pyre-ignore[9, 19]
not_yet_supported: Tensor[int, L[4], L[5], L[12], L[21]] = x.repeat(4, 5, 6, 7)
# Fewer dimensions than the Tensor. Should raise a different error.
x.repeat(2)
one_dimension: Tensor[int, L[2]]
y4: Tensor[int, L[8]] = x.repeat(4)
# pyre-ignore[9]
y4_error: Tensor[int, L[99]] = x.repeat(4)
def test_multiply() -> None:
x: Tensor[torch.int64, L[2], L[3]]
y: Tensor[torch.float32, L[2], L[3]] = x * 2
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: Tensor[torch.bool, L[2], L[99]] = x * 2
y2: Tensor[torch.float32, L[2], L[3]] = 2 * x
# pyre-fixme[9]: y2_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.bool, L[2], L[99]] = 2 * x
y3: Tensor[torch.float32, L[2], L[3]] = x * 2.0
# pyre-fixme[9]: y3_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[4]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y3_error: Tensor[torch.float32, L[2], L[4]] = x * 2.0
z: Tensor[torch.int64, L[4], L[1], L[1]]
z_bad: Tensor[torch.int64, L[4], L[2], L[99]]
y4: Tensor[torch.int64, L[4], L[2], L[3]] = x * z
# pyre-fixme[2001]: Broadcast error at expression `x.__mul__(z_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[4], typing_extensions.Literal[2],
# typing_extensions.Literal[99]]` cannot be broadcasted together.
x * z_bad
x4: Tensor[torch.float32, L[2], L[3]]
x5: Tensor[torch.float32, L[2], L[3]]
x5_bad: Tensor[torch.float32, L[2], L[99]]
x4 *= x5
x4 *= 4
y5: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x4.__imul__(x5_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x4 *= x5_bad
def test_floor_division() -> None:
x: Tensor[torch.int64, L[2], L[3]]
x2: Tensor[torch.int64, L[2], L[1]]
y: Tensor[torch.int64, L[2], L[3]] = x // 2
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: Tensor[torch.bool, L[2], L[99]] = x // 2
y2: Tensor[torch.int64, L[2], L[3]] = 2 // x
# pyre-fixme[9]: y2_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.bool, L[2], L[99]] = 2 // x
y3: Tensor[torch.int64, L[2], L[3]] = x // x2
x3: Tensor[torch.float32, L[2], L[3]]
x4: Tensor[torch.float32, L[2], L[3]]
x4_bad: Tensor[torch.float32, L[2], L[99]]
x3 //= x4
x3 //= 4
y5: Tensor[torch.float32, L[2], L[3]] = x3
# pyre-fixme[2001]: Broadcast error at expression `x3.__ifloordiv__(x4_bad)`;
# types `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x3 //= x4_bad
def test_division() -> None:
x: Tensor[torch.int64, L[2], L[3]]
x2: Tensor[torch.int64, L[2], L[1]]
y: Tensor[torch.float32, L[2], L[3]] = x / 2
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: Tensor[torch.bool, L[2], L[99]] = x / 2
y2: Tensor[torch.float32, L[2], L[3]] = 2 / x
# pyre-fixme[9]: y2_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.bool, L[2], L[99]] = 2 / x
x3: Tensor[torch.float32, L[2], L[3]]
y3: Tensor[torch.float32, L[2], L[3]] = x3 / 2
y4: Tensor[torch.float32, L[2], L[3]] = 2 / x3
y5: Tensor[torch.float32, L[2], L[3]] = x / x2
x5: Tensor[torch.float32, L[2], L[3]]
x6: Tensor[torch.float32, L[2], L[3]]
x6_bad: Tensor[torch.float32, L[2], L[99]]
x5 /= x6
x5 /= 4
y6: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x5.__itruediv__(x6_bad)`;
# types `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x5 /= x6_bad
def test_setitem() -> None:
x: Tensor[torch.int64, L[2], L[3]]
x[0, 0] = 1
def test_arange(n: N) -> None:
y: Tensor[torch.int64, L[5]] = torch.arange(5)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64,
# typing_extensions.Literal[5]]`.
y_error: Tensor[torch.int64, L[99]] = torch.arange(5)
y2: Tensor[torch.int64, L[4]] = torch.arange(1, 5)
y3: Tensor[torch.int64, L[2]] = torch.arange(1, 6, 2)
y_float: Tensor[torch.float32, L[5]] = torch.arange(5, dtype=torch.float32)
y_float2: Tensor[torch.float32, L[2]] = torch.arange(1, 6, 2, dtype=torch.float32)
device: torch.device
y_generic: Tensor[torch.float32, N] = torch.arange(
0, n, device=device, dtype=torch.float32
)
# pyre-fixme[9]: Expected error.
y_generic_error: Tensor[torch.float32, L[99]] = torch.arange(
0, n, device=device, dtype=torch.float32
)
def test_embedding() -> None:
embedding = nn.Embedding(10, 20)
y: Tensor[torch.float32, L[10], L[20]] = embedding.weight
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[10], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[10],
# typing_extensions.Literal[20]]`.
y_error: Tensor[torch.float32, L[10], L[99]] = embedding.weight
x: Tensor[torch.float32, L[2], L[3], L[4]]
y2: Tensor[torch.float32, L[2], L[3], L[4], L[20]] = embedding(x)
# pyre-fixme[9]: y2_error has type `Tensor[torch.float32, typing_extensions.Liter...
y2_error: Tensor[torch.float32, L[2], L[3], L[4], L[99]] = embedding(x)
weight: Tensor[torch.float32, L[3], L[4]]
embedding2: nn.Embedding[L[3], L[4]] = nn.Embedding.from_pretrained(weight)
# pyre-fixme[9]: embedding2_error has type
# `Embedding[typing_extensions.Literal[3], typing_extensions.Literal[99]]`; used
# as `Embedding[typing_extensions.Literal[3], typing_extensions.Literal[4]]`.
embedding2_error: nn.Embedding[L[3], L[99]] = nn.Embedding.from_pretrained(weight)
y3: Tensor[torch.float32, L[2], L[3], L[4], L[4]] = embedding2(x)
def test_init_normal() -> None:
x: Tensor[torch.float32, L[5], L[10]]
y: Tensor[torch.float32, L[5], L[10]] = nn.init.normal_(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[5], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[5],
# typing_extensions.Literal[10]]`.
y_error: Tensor[torch.float32, L[5], L[99]] = nn.init.normal_(x)
def test_view() -> None:
x: Tensor[torch.float32, L[4], L[4]]
y: Tensor[torch.float32, L[16]] = x.view(16)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32,
# typing_extensions.Literal[16]]`.
y_error: Tensor[torch.float32, L[99]] = x.view(16)
# Should be an error because 4 * 4 != 99. Don't think this is going to be
# feasible any time soon.
y_error2: Tensor[torch.float32, L[99]] = x.view(99)
y_error3: Tensor[torch.float32, L[2], L[3], L[4], L[5]] = x.view(2, 3, 4, 5)
y2: Tensor[torch.float32, L[2], L[8]] = x.view(-1, 8)
# pyre-fixme[9]: y2_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[8]]`.
y2_error: Tensor[torch.float32, L[2], L[99]] = x.view(-1, 8)
x3: Tensor[torch.float32, L[2], L[3], L[4]]
y3: Tensor[torch.float32, L[24]] = x3.view(-1)
y4: Tensor[torch.float32, L[8], L[3]] = x3.view(-1, 3)
# pyre-fixme[9]: y4_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[8],
# typing_extensions.Literal[3]]`.
y4_error: Tensor[torch.float32, L[99], L[3]] = x3.view(-1, 3)
y5: Tensor[torch.float32, L[2], L[6], L[2]] = x3.view(2, -1, 2)
x4: Tensor[torch.float32, L[2], L[3], L[4], L[5]]
y6: Tensor[torch.float32, L[3], L[5], L[8]] = x4.view(3, 5, -1)
def test_reshape() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[24]] = torch.reshape(x, (-1,))
y2: Tensor[torch.float32, L[8], L[3]] = torch.reshape(x, (-1, 3))
# pyre-fixme[9]: y2_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[8],
# typing_extensions.Literal[3]]`.
y2_error: Tensor[torch.float32, L[99], L[3]] = torch.reshape(x, (-1, 3))
y3: Tensor[torch.float32, L[6], L[2], L[2]] = torch.reshape(x, (-1, 2, 2))
y4: Tensor[torch.float32, L[2], L[6], L[2]] = torch.reshape(x, (2, -1, 2))
y5: Tensor[torch.float32, L[4], L[3], L[2]] = torch.reshape(x, (4, 3, 2))
def test_transpose() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4], L[5], L[6]]
y: Tensor[torch.float32, L[2], L[3], L[4], L[6], L[5]] = x.transpose(-2, -1)
y_function: Tensor[torch.float32, L[2], L[3], L[4], L[6], L[5]] = torch.transpose(
x, -2, -1
)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: Tensor[torch.float32, L[2], L[4], L[99]] = x.transpose(-2, -1)
y2: Tensor[torch.float32, L[2], L[4], L[3], L[5], L[6]] = x.transpose(1, 2)
y3: Tensor[torch.float32, L[3], L[2], L[4], L[5], L[6]] = x.transpose(0, 1)
y4: Tensor[torch.float32, L[3], L[2], L[4], L[5], L[6]] = x.transpose(1, 0)
y5: Tensor[torch.float32, L[2], L[3], L[4], L[6], L[5]] = x.transpose(-1, -2)
not_yet_supported: Tensor[
torch.float32,
L[3],
L[2],
L[4],
L[5],
L[6]
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[4]`.
] = x.transpose(1, 4)
def test_flatten() -> None:
x: Tensor[torch.float32, L[2], L[3]]
x_large: Tensor[torch.float32, L[2], L[3], L[4], L[5]]
y: Tensor[torch.float32, L[6]] = x.flatten()
y_default: Tensor[torch.float32, L[6]] = torch.flatten(x)
y_large: Tensor[torch.float32, L[120]] = x_large.flatten()
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32,
# typing_extensions.Literal[6]]`.
y_error: Tensor[torch.float32, L[99]] = x.flatten()
z: Tensor[torch.float32, L[2], L[3], L[4]]
y2: Tensor[torch.float32, L[6], L[4]] = z.flatten(0, 1)
y2_keyword: Tensor[torch.float32, L[6], L[4]] = z.flatten(start_dim=0, end_dim=1)
y3: Tensor[torch.float32, L[2], L[12]] = z.flatten(1, 2)
y3_large: Tensor[torch.float32, L[2], L[12], L[5]] = x_large.flatten(1, 2)
y4: Tensor[torch.float32, L[2], L[3], L[20]] = x_large.flatten(2, 3)
x_6d: Tensor[torch.float32, L[2], L[3], L[4], L[5], L[6], L[7]]
y4_large: Tensor[torch.float32, L[2], L[3], L[20], L[6], L[7]] = x_6d.flatten(2, 3)
# Out of bounds.
# pyre-fixme[9]: y5_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[12]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[6]]`.
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 1st param but got
# `typing_extensions.Literal[99]`.
y5_error: Tensor[torch.float32, L[2], L[12]] = x.flatten(99, 100)
x_0d: Tensor[torch.float32]
y_0d: Tensor[torch.float32, L[1]] = x_0d.flatten()
def test_empty() -> None:
x: Tuple[L[1], L[2], L[3]]
y: Tensor
device: torch.device
result1: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty(
*x,
device=device,
layout=torch.strided,
requires_grad=True,
out=y,
pin_memory=False,
memory_format=torch.memory_format(),
)
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.empty(*x)
result2: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.empty(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
result4: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty(x)
result5: torch.Tensor[torch.float32, L[4]] = torch.empty(4)
result6: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.empty(
x, dtype=torch.int64
)
result7: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.empty(
*x, dtype=torch.int64
)
def test_empty_like() -> None:
x: torch.Tensor[torch.float32, L[1], L[2], L[3]]
out: Tensor
device: torch.device
y1: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.empty_like(
x, device=device, layout=torch.strided, requires_grad=True, out=out
)
# pyre-fixme[9]: Expected error.
y1_error: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.empty_like(
x, device=device, layout=torch.strided, requires_grad=True, out=out
)
y2: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.empty_like(
x,
dtype=torch.int64,
device=device,
layout=torch.strided,
requires_grad=True,
out=out,
)
def test_randn() -> None:
x: Tuple[L[1], L[2], L[3]]
y: Tensor
device: torch.device
result1: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.randn(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.randn(*x)
result2: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.randn(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[2], L[3]] = torch.randn(
*x, device=device, layout=torch.strided, requires_grad=True, out=y
)
result4: torch.Tensor[torch.float32, L[1], L[2], L[3]] = torch.randn(x)
result5: torch.Tensor[torch.float32, L[4]] = torch.randn(4)
result6: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.randn(
x, dtype=torch.int64
)
result7: torch.Tensor[torch.int64, L[1], L[2], L[3]] = torch.randn(
*x, dtype=torch.int64
)
def test_all() -> None:
x: torch.Tensor[torch.float32, L[1], L[2], L[3]]
device: torch.device
y: torch.Tensor[torch.bool, L[1]] = torch.all(x)
# pyre-fixme[9]: bad1 has type `Tensor[torch.bool,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.bool,
# typing_extensions.Literal[1]]`.
y_error: torch.Tensor[torch.bool, L[99]] = torch.all(x)
y2: torch.Tensor[torch.bool, L[2], L[3]] = torch.all(x, dim=0)
y3: torch.Tensor[torch.bool, L[1], L[3]] = torch.all(x, dim=1)
y4: torch.Tensor[torch.bool, L[1]] = x.all()
def test_where() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
good: Tuple[torch.LongTensor[int, int], torch.LongTensor[int, int]] = torch.where(x)
bad: Tuple[
torch.LongTensor[int, int], torch.LongTensor[int, int], L[99]
] = torch.where(x)
y: torch.Tensor[torch.float32, L[2], L[1]]
not_broadcastable: torch.Tensor[torch.float32, L[2], L[99]]
good: Tuple[torch.LongTensor[int, int], torch.LongTensor[int, int]] = torch.where(x)
good2: torch.Tensor[torch.float32, L[2], L[3]] = torch.where(x > 0, x, y)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
bad2: torch.Tensor[torch.float32, L[2], L[99]] = torch.where(x > 0, x, y)
# pyre-fixme[2001]: Broadcast error at expression `torch.where(x > 0, x,
# not_broadcastable)`; types `Tuple[typing_extensions.Literal[2],
# typing_extensions.Literal[3]]` and `Tuple[typing_extensions.Literal[2],
# typing_extensions.Literal[99]]` cannot be broadcasted together.
z = torch.where(x > 0, x, not_broadcastable)
def test_getitem() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float32, L[3], L[4]] = x[0]
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[4]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[3],
# typing_extensions.Literal[4]]`.
bad1: torch.Tensor[torch.float32, L[99], L[4]] = x[0]
good2: torch.Tensor[torch.float32, L[1], L[2], L[3], L[4]] = x[None]
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[2], L[3], L[4]] = x[None]
mask: torch.Tensor[torch.bool, L[2], L[3], L[4]]
good3: torch.Tensor[torch.float32, int] = x[mask]
# pyre-fixme[9]: bad3 has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32, int]`.
bad3: torch.Tensor[torch.float32, L[99]] = x[mask]
any1: Tuple[int, str, float] = x[2]
any2: Tuple[float, str, int] = x[2]
def test_expand() -> None:
x: torch.Tensor[torch.float32, L[1], L[2], L[3]]
shape: Tuple[L[4], L[1], L[3]]
good1: torch.Tensor[torch.float32, L[4], L[2], L[3]] = x.expand(shape)
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[2], L[3]] = x.expand(shape)
# pyre-fixme[2001]: Broadcast error at expression `x.expand((4, 99, 3))`; types `...
x.expand((4, 99, 3))
good2: torch.Tensor[torch.float32, L[4], L[2], L[3]] = x.expand(4, 1, 3)
def test_to() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.int32, L[2], L[3], L[4]] = x.to(torch.int32)
# pyre-fixme[9]: bad1 has type `Tensor[torch.int32, typing_extensions.Literal[99]...
bad1: torch.Tensor[torch.int32, L[99], L[3], L[4]] = x.to(torch.int32)
device: torch.device
good2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.to(device)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.to(device)
y: torch.Tensor[torch.int32, L[2], L[3], L[4]]
good3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = y.to(torch.float32, device)
# pyre-fixme[9]: bad3 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad3: torch.Tensor[torch.float32, L[99], L[3], L[4]] = y.to(torch.float32, device)
def test_Linear_to() -> None:
linear: nn.Linear[L[10], L[20]]
device: torch.device
linear.to(dtype=torch.int64, device=device)
def test_Module_eval() -> None:
module: nn.Module
module.eval()
def test_Module_train() -> None:
module: nn.Module
module.train(mode=True)
y: bool = module.training
def test_Linear_bias() -> None:
linear: nn.Linear[L[10], L[20]]
x: nn.Parameter = linear.bias
def test_sum() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y1: torch.Tensor[torch.float32, L[2], L[3]] = x.sum(-1, dtype=None)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[99], L[3]] = x.sum(-1, dtype=None)
y2: torch.Tensor[torch.float32, L[2], L[4]] = x.sum(-2)
y3: torch.Tensor[torch.float32] = x.sum()
y4: torch.Tensor[torch.float32, L[3], L[4]] = x.sum(0)
y5: torch.Tensor[torch.float32, L[2], L[4]] = x.sum(1)
y6: torch.Tensor[torch.float32] = torch.sum(x)
def test_cumsum() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.cumsum()
# pyre-fixme[9]: bad1 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad1: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.cumsum()
good2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.cumsum(dim=0)
# pyre-fixme[9]: bad2 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad2: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.cumsum(dim=0)
good3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.cumsum(dtype=None)
# pyre-fixme[9]: bad3 has type `Tensor[torch.float32, typing_extensions.Literal[9...
bad3: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.cumsum(dtype=None)
def test_contiguous() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.contiguous()
# pyre-fixme[9]: bad has type `Tensor[torch.float32, typing_extensions.Literal[99...
bad: torch.Tensor[torch.float32, L[99], L[3], L[4]] = x.contiguous()
def test_diff() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good: torch.Tensor[torch.float32, L[2], L[3], L[3]] = torch.diff(x)
# pyre-fixme[9]: bad has type `Tensor[torch.float32, typing_extensions.Literal[99...
bad: torch.Tensor[torch.float32, L[99], L[3], L[3]] = torch.diff(x)
good2: torch.Tensor[torch.float32, L[1], L[3], L[4]] = torch.diff(x, dim=0)
good3: torch.Tensor[torch.float32, L[2], L[2], L[4]] = torch.diff(x, dim=1)
good4: torch.Tensor[torch.float32, L[2], L[3], L[3]] = torch.diff(x, dim=-1)
good5: torch.Tensor[torch.float32, L[2], L[2], L[4]] = torch.diff(x, dim=-2)
good6: torch.Tensor[torch.float32, L[2], L[2], L[4]] = x.diff(dim=-2)
def test_argsort() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.argsort(x)
# pyre-fixme[9]: bad1 has type `LongTensor[torch.float32, typing_extensions.Liter...
bad1: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.argsort(x)
good2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.argsort(x, dim=0)
# pyre-fixme[9]: bad2 has type `LongTensor[torch.float32, typing_extensions.Liter...
bad2: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.argsort(x, dim=0)
good3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.argsort(
x, descending=True
)
# pyre-fixme[9]: bad3 has type `LongTensor[torch.float32, typing_extensions.Liter...
bad3: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.argsort(
x, descending=True
)
good4: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.argsort(dim=-1)
def test_functional_pad() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good: torch.Tensor[torch.float32, L[2], L[3], L[5]] = nn.functional.pad(x, (1, 0))
bad: torch.Tensor[torch.float32, L[99], L[3], L[5]] = nn.functional.pad(x, (1, 0))
good2: torch.Tensor[torch.float32, L[2], L[10], L[7]] = nn.functional.pad(
x, (1, 2, 3, 4), "constant", value=0.0
)
def test_allclose() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[1]]
not_broadcastable: torch.Tensor[torch.float32, L[3], L[4]]
good: bool = torch.allclose(x, y, atol=0.0, rtol=0.0, equal_nan=True)
# This should complain about non-broadcastable tensors but we don't have a
# way to constrain two parameter types to be broadcastable.
should_error: bool = torch.allclose(x, not_broadcastable)
def test_new_ones() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[8], L[9]] = x.new_ones((8, 9))
# pyre-fixme[9]: Expected error.
y_error: torch.Tensor[torch.float32, L[8], L[99]] = x.new_ones((8, 9))
y2: torch.Tensor[torch.int64, L[8], L[9]] = x.new_ones(
(8, 9), dtype=torch.int64, device="cuda", requires_grad=True
)
def test_ones_like() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
device: torch.device
good: torch.Tensor[torch.int64, L[2], L[3]] = torch.ones_like(
x, dtype=torch.int64, device=device
)
# pyre-fixme[9]: bad has type `Tensor[torch.int64,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.int64, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
bad: torch.Tensor[torch.int64, L[99], L[3]] = torch.ones_like(
x, dtype=torch.int64, device=device
)
bad2: torch.Tensor[torch.float32, L[2], L[3]] = torch.ones_like(
x,
)
def test_sparse_softmax() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.sparse.softmax(x, dim=-1)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[99], L[3]] = torch.sparse.softmax(x, dim=-1)
dtype: torch.int64
y2: torch.Tensor[torch.int64, L[2], L[3]] = torch.sparse.softmax(
x, dim=-1, dtype=dtype
)
def test_eye() -> None:
y: torch.Tensor[torch.int64, L[2], L[3]] = torch.eye(2, 3, dtype=torch.int64)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99], typing_extensions.Literal[3]]`; used as
# `Tensor[torch.int64, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.int64, L[99], L[3]] = torch.eye(2, 3, dtype=torch.int64)
y2: torch.Tensor[torch.float32, L[3], L[3]] = torch.eye(3)
def test_adaptive_average_pool2d() -> None:
model: nn.AdaptiveAvgPool2d[L[5], L[7]] = nn.AdaptiveAvgPool2d((5, 7))
# pyre-fixme[9]: model_error has type
# `AdaptiveAvgPool2d[typing_extensions.Literal[5],
# typing_extensions.Literal[99]]`; used as
# `AdaptiveAvgPool2d[typing_extensions.Literal[5], typing_extensions.Literal[7]]`.
model_error: nn.AdaptiveAvgPool2d[L[5], L[99]] = nn.AdaptiveAvgPool2d((5, 7))
model2: nn.AdaptiveAvgPool2d[L[5], L[5]] = nn.AdaptiveAvgPool2d(5)
# TODO(T100083794): This should be an error.
model2_error: nn.AdaptiveAvgPool2d[L[5], L[99]] = nn.AdaptiveAvgPool2d(5)
model3: nn.AdaptiveAvgPool2d[L[5], L[-1]] = nn.AdaptiveAvgPool2d((5, None))
# TODO(T100083794): This should be an error.
model3_error: nn.AdaptiveAvgPool2d[L[5], L[99]] = nn.AdaptiveAvgPool2d((5, None))
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[5], L[7]] = model(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[99], L[7]] = model(x)
y2: torch.Tensor[torch.float32, L[2], L[5], L[5]] = model2(x)
y3: torch.Tensor[torch.float32, L[2], L[5], L[4]] = model3(x)
def test_randperm() -> None:
y: torch.Tensor[torch.int64, L[10]] = torch.randperm(10, dtype=torch.int64)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64,
# typing_extensions.Literal[10]]`.
y_error: torch.Tensor[torch.int64, L[99]] = torch.randperm(10, dtype=torch.int64)
def test_sqrt() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.sqrt(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.sqrt(x)
def test_multinomial() -> None:
x: torch.Tensor[torch.float32, L[2], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.multinomial(x, 3)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.multinomial(x, 3)
x2: torch.Tensor[torch.float32, L[4]]
y2: torch.Tensor[torch.float32, L[3]] = torch.multinomial(x2, 3)
y2: torch.Tensor[torch.float32, L[3]] = x2.multinomial(3)
def test_bmm() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
matrix: torch.Tensor[torch.float32, L[2], L[4], L[5]]
y: torch.Tensor[torch.float32, L[2], L[3], L[5]] = torch.bmm(x, matrix)
y2: torch.Tensor[torch.float32, L[2], L[3], L[5]] = x.bmm(matrix)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.bmm(x, matrix)
bad_matrix: torch.Tensor[torch.float32, L[2], L[99], L[5]]
# Should raise an error but doesn't because we solve `L[99] <: M && L[4] <:
# M` to be M = int.
torch.bmm(x, bad_matrix)
def test_subtract() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[1]]
x2: torch.Tensor[torch.float32, L[2], L[1], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x - x2
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x - x2
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x2 - x
y3: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x - 42.0
y4: torch.Tensor[torch.float32, L[2], L[3], L[1]] = 42.0 - x
z: Any
# Should not error.
x - z
x5: Tensor[torch.float32, L[2], L[3]]
x6: Tensor[torch.float32, L[2], L[3]]
x6_bad: Tensor[torch.float32, L[2], L[99]]
x5 -= x6
x5 -= 4
y5: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x5.__isub__(x6_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x5 -= x6_bad
def test_add() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[1]]
x2: torch.Tensor[torch.float32, L[2], L[1], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x + x2
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x + x2
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x2 + x
y3: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x + 42.0
y4: torch.Tensor[torch.float32, L[2], L[3], L[1]] = 42.0 + x
x5: Tensor[torch.float32, L[2], L[3]]
x6: Tensor[torch.float32, L[2], L[3]]
x6_bad: Tensor[torch.float32, L[2], L[99]]
x5 += x6
x5 += 4
y5: Tensor[torch.float32, L[2], L[3]] = x5
# pyre-fixme[2001]: Broadcast error at expression `x5.__iadd__(x6_bad)`; types
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3]]` and
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[99]]` cannot be
# broadcasted together.
x5 += x6_bad
def test_torch_fft() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.complex64, L[2], L[3], L[4]] = torch.fft.fft(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.complex64, typing_extensions.Lite...
y_error: torch.Tensor[torch.complex64, L[2], L[3], L[99]] = torch.fft.fft(x)
y2: torch.Tensor[torch.complex64, L[2], L[3], L[4]] = torch.fft.fft(x, dim=-2)
def test_torch_real() -> None:
x: torch.Tensor[torch.complex64, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.real(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.real(x)
x2: torch.Tensor[torch.complex128, L[2], L[3], L[4]]
y2: torch.Tensor[torch.float64, L[2], L[3], L[4]] = torch.real(x2)
bad: torch.Tensor[torch.float32, L[2], L[3], L[4]]
# pyre-fixme[6]: Expected `Tensor[torch.complex64, *torch.Ts]` for 1st param but
# got `Tensor[torch.float32, int, int, int]`.
torch.real(bad)
def test_logical_and() -> None:
x: torch.Tensor[torch.complex64, L[2], L[1], L[4]]
x2: torch.Tensor[torch.float32, L[2], L[3], L[1]]
y: torch.Tensor[torch.bool, L[2], L[3], L[4]] = torch.logical_and(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.bool, typing_extensions.Literal[2...
y_error: torch.Tensor[torch.bool, L[2], L[3], L[99]] = torch.logical_and(x, x2)
y2: torch.Tensor[torch.bool, L[2], L[3], L[4]] = x.logical_and(x2)
not_broadcastable: torch.Tensor[torch.float32, L[2], L[3], L[99]]
# pyre-fixme[2001]: Broadcast error at expression `torch.logical_and(x, not_broad...
torch.logical_and(x, not_broadcastable)
x3: torch.Tensor[torch.complex64, L[2], L[1], L[1]]
# In-place version.
x.logical_and_(x3)
# This is actually an error because the output type (2, 3, 4) is not
# assignable to x. But we can't catch that because the typechecker doesn't
# know this is an in-place operator. Leaving this as is for now.
x.logical_and_(x2)
def test_and() -> None:
x_bool: torch.Tensor[torch.bool, L[2], L[1], L[4]]
x_bool2: torch.Tensor[torch.bool, L[2], L[3], L[1]]
y3: torch.Tensor[torch.bool, L[2], L[3], L[4]] = x_bool & x_bool2
# This broadcasts to (2, 1, 4), which is assignable to x_bool.
x_bool3: torch.Tensor[torch.bool, L[2], L[1], L[1]]
x_bool &= x_bool3
# This broadcasts to (2, 3, 4), which is not assignable to x_bool.
# pyre-fixme[9]: x_bool has type `Tensor[torch.bool, typing_extensions.Literal[2]...
x_bool &= x_bool2
x: torch.Tensor[torch.complex64, L[2], L[1], L[4]]
x2: torch.Tensor[torch.float32, L[2], L[3], L[1]]
# pyre-fixme[58]: `&` is not supported for operand types
# `Tensor[torch.complex64, int, int, int]` and `Tensor[torch.float32, int, int,
# int]`.
x & x2
def test_linalg_pinv() -> None:
x: torch.Tensor[torch.float32, L[2], L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[2], L[4], L[3]] = torch.linalg.pinv(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[4], L[99]] = torch.linalg.pinv(x)
wrong_datatype: torch.Tensor[torch.bool, L[2], L[3], L[4]]
# pyre-fixme[6]: Expected `Tensor[Variable[torch.linalg.FloatOrDouble <:
# [torch.float32, torch.float64, torch.complex64, torch.complex128]],
# *torch.linalg.Ts, Variable[N1 (bound to int)], Variable[N2 (bound to int)]]` for
# 1st param but got `Tensor[torch.bool, int, int, int]`.
torch.linalg.pinv(wrong_datatype)
torch.linalg.pinv(x, hermitian=True)
# Last two dimensions have to be equal.
x_square: torch.Tensor[torch.float32, L[2], L[3], L[4], L[4]]
y2: torch.Tensor[torch.float32, L[2], L[3], L[4], L[4]] = torch.linalg.pinv(
x_square, hermitian=True
)
def test_linalg_qr() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[3]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.linalg.qr(x)
# pyre-fixme[9]: y_error has type `Tuple[Tensor[torch.float32, typing_extensions....
y_error: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[99]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.linalg.qr(x)
y2: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[3]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.linalg.qr(x, mode="complete")
def test_torch_matmul() -> None:
x: torch.Tensor[torch.float32, L[2], L[1], L[3], L[4]]
x2: torch.Tensor[torch.float32, L[1], L[5], L[4], L[3]]
y: torch.Tensor[torch.float32, L[2], L[5], L[3], L[3]] = torch.matmul(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[5], L[3], L[99]] = torch.matmul(x, x2)
y2: torch.Tensor[torch.float32, L[2], L[5], L[3], L[3]] = x.matmul(x2)
y3: torch.Tensor[torch.float32, L[2], L[5], L[3], L[3]] = x.__matmul__(x2)
bad_x: torch.Tensor[torch.float32, L[1], L[5], L[99], L[3]]
torch.matmul(x, bad_x)
x_1d: torch.Tensor[torch.float32, L[3]]
x2_1d: torch.Tensor[torch.float32, L[3]]
y4: torch.Tensor[torch.float32] = torch.matmul(x_1d, x2_1d)
x3_1d_different: torch.Tensor[torch.float32, L[1]]
torch.matmul(x_1d, x3_1d_different)
def test_torch_optim() -> None:
block_parameters: Any
torch.optim.SGD(block_parameters, lr=1.0)
def test_torch_cuda() -> None:
torch.cuda.reset_peak_memory_stats()
def test_torch_profiler() -> None:
torch.profiler.profile()
def test_mse_loss() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
x2: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32] = nn.MSELoss(
size_average=True, reduce=True, reduction="mean"
)(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32]`.
y_error: torch.Tensor[torch.float32, L[99]] = nn.MSELoss()(x, x2)
def test_clip_grad_norm() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor = nn.utils.clip_grad_norm_(
x, max_norm=0.0, norm_type=0.0, error_if_nonfinite=True
)
# pyre-fixme[9]: y_error has type `int`; used as `Tensor[typing.Any,
# *Tuple[typing.Any, ...]]`.
y_error: int = nn.utils.clip_grad_norm_(
x, max_norm=0.0, norm_type=0.0, error_if_nonfinite=True
)
def test_clip_grad_value() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
nn.utils.clip_grad_value_([x], clip_value=0.0)
def test_bitwise_not() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.bitwise_not(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.bitwise_not(x)
y2: torch.Tensor[torch.float32, L[2], L[3]] = x.bitwise_not()
# In-place.
y3: torch.Tensor[torch.float32, L[2], L[3]] = x.bitwise_not_()
y4: torch.Tensor[torch.float32, L[2], L[3]] = ~x
def test_cdist() -> None:
x: torch.Tensor[torch.float32, L[5], L[1], L[2], L[3]]
x2: torch.Tensor[torch.float32, L[1], L[7], L[4], L[3]]
y: torch.Tensor[torch.float32, L[5], L[7], L[2], L[4]] = torch.cdist(x, x2)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[5], L[7], L[2], L[99]] = torch.cdist(x, x2)
not_broadcastable: torch.Tensor[torch.float32, L[99], L[1], L[2], L[3]]
# pyre-fixme[2001]: Broadcast error at expression `torch.cdist(x,
# not_broadcastable)`; types `Tuple[typing_extensions.Literal[5],
# typing_extensions.Literal[1]]` and `Tuple[typing_extensions.Literal[99],
# typing_extensions.Literal[1]]` cannot be broadcasted together.
torch.cdist(x, not_broadcastable)
def test_random_manual_seed() -> None:
torch.random.manual_seed(42)
def test_clone() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.clone(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.clone(x)
y2: torch.Tensor[torch.float32, L[2], L[3]] = x.clone()
def test_equal() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor[torch.bool, L[2], L[3]] = x == 42
# pyre-fixme[9]: y_error has type `Tensor[torch.bool,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.bool, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.bool, L[2], L[99]] = x == 42
# This doesn't return a Tensor as expected because `int.__eq__` accepts `object`.
y2: int = 42 == x
x2: torch.Tensor[torch.float32, L[2], L[1]]
x3: torch.Tensor[torch.float32, L[1], L[3]]
y3: torch.Tensor[torch.bool, L[2], L[3]] = x2 == x3
def test_diag_embed() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor = torch.diag_embed(x)
def test_unbind() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[torch.Tensor[torch.float32, L[2], L[4]], ...] = torch.unbind(x, dim=1)
# pyre-fixme[9]: y_error has type `Tuple[Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]], ...]`; used as
# `Tuple[Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[4]], ...]`.
y_error: Tuple[torch.Tensor[torch.float32, L[2], L[99]], ...] = torch.unbind(
x, dim=1
)
y2: Tuple[torch.Tensor[torch.float32, L[2], L[3]], ...] = torch.unbind(x, dim=-1)
y3: Tuple[torch.Tensor[torch.float32, L[3], L[4]], ...] = torch.unbind(x)
y4: Tuple[torch.Tensor[torch.float32, L[3], L[4]], ...] = x.unbind()
def test_size() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[L[2], L[3], L[4]] = x.size()
# pyre-fixme[9]: y_error has type `Tuple[typing_extensions.Literal[2],
# typing_extensions.Literal[3], typing_extensions.Literal[99]]`; used as
# `Tuple[typing_extensions.Literal[2], typing_extensions.Literal[3],
# typing_extensions.Literal[4]]`.
y_error: Tuple[L[2], L[3], L[99]] = x.size()
y2: L[2] = x.size(0)
y3: L[3] = x.size(1)
y4: L[4] = x.size(-1)
y5: L[3] = x.size(-2)
def test_stack(
arbitary_length_tuple: Tuple[torch.Tensor[torch.float32, L[3], L[4], L[5]], ...],
variadic_tuple: Tuple[Unpack[Ts]],
) -> None:
x: torch.Tensor[torch.float32, L[3], L[4], L[5]]
x_incompatible: torch.Tensor[torch.float32, L[3], L[4], L[99]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4], L[5]] = torch.stack((x, x))
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[4], L[99]] = torch.stack((x, x))
y_incompatible_tensors: torch.Tensor = torch.stack((x, x_incompatible))
y2: torch.Tensor[torch.float32, L[3], L[2], L[4], L[5]] = torch.stack((x, x), dim=1)
y3: torch.Tensor[torch.float32, L[3], L[3], L[4], L[5]] = torch.stack(
(x, x, x), dim=1
)
y4: torch.Tensor[torch.float32, L[3], L[3], L[4], L[5]] = torch.stack((x, x, x))
# Arbitrary-length tuples make it return an arbitrary Tensor.
y5: torch.Tensor = torch.stack(arbitary_length_tuple)
y6: torch.Tensor = torch.stack(variadic_tuple)
def test_repeat_interleave() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
repeats: torch.Tensor[torch.float32, L[2]]
y: torch.Tensor[torch.float32, L[72]] = torch.repeat_interleave(x, 3)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32,
# typing_extensions.Literal[72]]`.
y_error: torch.Tensor[torch.float32, L[99]] = torch.repeat_interleave(x, 3)
y2: torch.Tensor[torch.float32, L[4], L[3], L[4]] = torch.repeat_interleave(
x, 2, dim=0
)
y3: torch.Tensor[torch.float32, L[2], L[6], L[4]] = torch.repeat_interleave(
x, 2, dim=1
)
y4: torch.Tensor[torch.float32, L[2], L[3], L[8]] = torch.repeat_interleave(
x, 2, dim=-1
)
# Too dynamic because the output shape depends on the contents of repeats.
y5: torch.Tensor[torch.float32, L[0], L[3], L[4]] = torch.repeat_interleave(
x, repeats, dim=0
)
y6: torch.Tensor[torch.float32, L[2], L[3], L[8]] = x.repeat_interleave(2, dim=-1)
def test_meshgrid() -> None:
x1: torch.Tensor[torch.float32, L[2]]
x2: torch.Tensor[torch.float32, L[3]]
x3: torch.Tensor[torch.float32, L[4]]
y: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
] = torch.meshgrid(x1, x2, x3)
# pyre-fixme[9]: y_error has type `Tuple[Tensor[torch.float32, typing_extensions....
y_error: Tuple[
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[4]],
torch.Tensor[torch.float32, L[2], L[3], L[99]],
] = torch.meshgrid(x1, x2, x3)
y2: Tuple[
torch.Tensor[torch.float32, L[2], L[3]],
torch.Tensor[torch.float32, L[2], L[3]],
] = torch.meshgrid(x1, x2)
y3: Tuple[
torch.Tensor[torch.float32, L[2]],
] = torch.meshgrid(x1)
x4: Tensor
xs = tuple(x4 for _ in range(5))
y4: Tuple[torch.Tensor, ...] = torch.meshgrid(*xs)
xs2 = [x4 for _ in range(5)]
y5: Tuple[torch.Tensor, ...] = torch.meshgrid(*xs2)
def test_argmax() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.LongTensor[torch.int64] = torch.argmax(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64]`.
y_error: torch.LongTensor[torch.int64, L[99]] = torch.argmax(x)
y2: torch.LongTensor[torch.int64, L[3], L[4]] = torch.argmax(x, dim=0)
y3: torch.LongTensor[torch.int64, L[1], L[3], L[4]] = torch.argmax(
x, dim=0, keepdim=True
)
y4: torch.LongTensor[torch.int64, L[2], L[4]] = torch.argmax(x, dim=1)
y5: torch.LongTensor[torch.int64, L[2], L[1], L[4]] = torch.argmax(
x, dim=1, keepdim=True
)
y6: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmax(x, dim=2)
y7: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmax(
x, dim=2, keepdim=True
)
y8: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmax(x, dim=-1)
y9: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmax(
x, dim=-1, keepdim=True
)
y10: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = x.argmax(
dim=-1, keepdim=True
)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.argmax(x, dim=3)
def test_argmin() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.LongTensor[torch.int64] = torch.argmin(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64]`.
y_error: torch.LongTensor[torch.int64, L[99]] = torch.argmin(x)
y2: torch.LongTensor[torch.int64, L[3], L[4]] = torch.argmin(x, dim=0)
y3: torch.LongTensor[torch.int64, L[1], L[3], L[4]] = torch.argmin(
x, dim=0, keepdim=True
)
y4: torch.LongTensor[torch.int64, L[2], L[4]] = torch.argmin(x, dim=1)
y5: torch.LongTensor[torch.int64, L[2], L[1], L[4]] = torch.argmin(
x, dim=1, keepdim=True
)
y6: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmin(x, dim=2)
y7: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmin(
x, dim=2, keepdim=True
)
y8: torch.LongTensor[torch.int64, L[2], L[3]] = torch.argmin(x, dim=-1)
y9: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = torch.argmin(
x, dim=-1, keepdim=True
)
y10: torch.LongTensor[torch.int64, L[2], L[3], L[1]] = x.argmin(
dim=-1, keepdim=True
)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.argmin(x, dim=3)
def test_mean() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32] = torch.mean(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.float32]`.
y_error: torch.Tensor[torch.float32, L[99]] = torch.mean(x)
y2: torch.Tensor[torch.float32, L[3], L[4]] = torch.mean(x, dim=0)
y3: torch.Tensor[torch.float32, L[1], L[3], L[4]] = torch.mean(
x, dim=0, keepdim=True
)
y4: torch.Tensor[torch.float32, L[2], L[4]] = torch.mean(x, dim=1)
y5: torch.Tensor[torch.float32, L[2], L[1], L[4]] = torch.mean(
x, dim=1, keepdim=True
)
y6: torch.Tensor[torch.float32, L[2], L[3]] = torch.mean(x, dim=2)
y7: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.mean(
x, dim=2, keepdim=True
)
y8: torch.Tensor[torch.float32, L[2], L[3]] = torch.mean(x, dim=-1)
y9: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.mean(
x, dim=-1, keepdim=True
)
y10: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x.mean(dim=-1, keepdim=True)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.mean(x, dim=3)
def test_count_nonzero() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.int64] = torch.count_nonzero(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.int64,
# typing_extensions.Literal[99]]`; used as `Tensor[torch.int64]`.
y_error: torch.Tensor[torch.int64, L[99]] = torch.count_nonzero(x)
y2: torch.Tensor[torch.int64, L[3], L[4]] = torch.count_nonzero(x, dim=0)
y3: torch.Tensor[torch.int64, L[2], L[4]] = torch.count_nonzero(x, dim=1)
y4: torch.Tensor[torch.int64, L[2], L[3]] = torch.count_nonzero(x, dim=2)
y5: torch.Tensor[torch.int64, L[2], L[3]] = x.count_nonzero(dim=-1)
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.count_nonzero(x, dim=3)
def test_cat() -> None:
x1: torch.Tensor[torch.float32, L[2], L[3], L[4]]
x1_first_is_3: torch.Tensor[torch.float32, L[3], L[3], L[4]]
x1_first_is_4: torch.Tensor[torch.float32, L[4], L[3], L[4]]
x1_second_is_4: torch.Tensor[torch.float32, L[2], L[4], L[4]]
x1_second_is_5: torch.Tensor[torch.float32, L[2], L[5], L[4]]
x1_last_is_5: torch.Tensor[torch.float32, L[2], L[3], L[5]]
x1_last_is_6: torch.Tensor[torch.float32, L[2], L[3], L[6]]
# 2-element tuple.
y: torch.Tensor[torch.float32, L[5], L[3], L[4]] = torch.cat((x1, x1_first_is_3))
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[99], L[3], L[4]] = torch.cat(
(x1, x1_first_is_3)
)
y2: torch.Tensor[torch.float32, L[2], L[7], L[4]] = torch.cat(
(x1, x1_second_is_4), dim=1
)
y3: torch.Tensor[torch.float32, L[2], L[3], L[9]] = torch.cat(
(x1, x1_last_is_5), dim=-1
)
y3_shape_mismatch: torch.Tensor[torch.float32, Unpack[Tuple[Any, ...]]] = torch.cat(
(x1, x1_second_is_4), dim=-1
)
# 3-element tuple.
y4: torch.Tensor[torch.float32, L[9], L[3], L[4]] = torch.cat(
(x1, x1_first_is_3, x1_first_is_4)
)
y5: torch.Tensor[torch.float32, L[2], L[12], L[4]] = torch.cat(
(x1, x1_second_is_4, x1_second_is_5), dim=1
)
y6: torch.Tensor[torch.float32, L[2], L[3], L[15]] = torch.cat(
(x1, x1_last_is_5, x1_last_is_6), dim=-1
)
y_many_element_tuple: torch.Tensor[
torch.float32, Unpack[Tuple[Any, ...]]
] = torch.cat((x1, x1, x1, x1))
y_list: torch.Tensor[torch.float32, Unpack[Tuple[Any, ...]]] = torch.cat([x1, x1])
def test_sign() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.sign(x)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.sign(x)
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.sign()
def test_diagonal() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4], L[5]]
y: torch.Tensor = torch.diagonal(x)
def test_diag() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
y: torch.Tensor = torch.diag(x)
def test_module_list() -> None:
x: torch.Tensor[torch.float32, L[2], L[3]]
modules = nn.ModuleList([nn.AdaptiveAvgPool2d(0), nn.AdaptiveAvgPool2d(1)])
for module in modules:
y: Tensor = module(x)
z: int = len(modules)
def test_sparse_coo_tensor() -> None:
y: torch.Tensor[torch.float32, L[2], L[3]] = torch.sparse_coo_tensor(
torch.randn(5), [6, 7, 8], size=(2, 3)
)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32,
# typing_extensions.Literal[2], typing_extensions.Literal[99]]`; used as
# `Tensor[torch.float32, typing_extensions.Literal[2],
# typing_extensions.Literal[3]]`.
y_error: torch.Tensor[torch.float32, L[2], L[99]] = torch.sparse_coo_tensor(
torch.randn(5), [6, 7, 8], size=(2, 3)
)
y2: torch.Tensor = torch.sparse_coo_tensor(torch.randn(5), [6, 7, 8])
def test_max() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32] = torch.max(x)
y2: torch.Tensor[torch.float32, L[3], L[4]] = torch.max(x, dim=0).values
y2_indices: torch.Tensor[torch.int64, L[3], L[4]] = torch.max(x, dim=0).indices
y2_getitem: torch.Tensor[torch.int64, L[3], L[4]] = torch.max(x, dim=0)[1]
y3: torch.Tensor[torch.float32, L[1], L[3], L[4]] = torch.max(
x, dim=0, keepdim=True
).values
y4: torch.Tensor[torch.float32, L[2], L[4]] = torch.max(x, dim=1).values
y5: torch.Tensor[torch.float32, L[2], L[1], L[4]] = torch.max(
x, dim=1, keepdim=True
).values
y6: torch.Tensor[torch.float32, L[2], L[3]] = torch.max(x, dim=2).values
y7: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.max(
x, dim=2, keepdim=True
).values
y8: torch.Tensor[torch.float32, L[2], L[3]] = torch.max(x, dim=-1).values
y9: torch.Tensor[torch.float32, L[2], L[3], L[1]] = torch.max(
x, dim=-1, keepdim=True
).values
y10: torch.Tensor[torch.float32, L[2], L[4]] = torch.max(x, dim=-2).values
y11: torch.Tensor[torch.float32, L[2], L[1], L[4]] = torch.max(
x, dim=-2, keepdim=True
).values
y12: torch.Tensor[torch.float32, L[2], L[3], L[1]] = x.max(
dim=-1, keepdim=True
).values
# pyre-fixme[6]: Expected `typing_extensions.Literal[0]` for 2nd param but got
# `typing_extensions.Literal[3]`.
torch.max(x, dim=3).values
def test_einsum() -> None:
x: Tensor = torch.einsum("ii", torch.randn(4, 4))
def test_type_as() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
x2: torch.Tensor[torch.int64, L[2], L[3], L[4]]
y: torch.Tensor[torch.int64, L[2], L[3], L[4]] = x.type_as(x2)
def test_softmax() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = torch.softmax(x, dim=1)
# pyre-fixme[9]: y_error has type `Tensor[torch.float32, typing_extensions.Litera...
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = torch.softmax(x, dim=1)
y2: torch.Tensor[torch.int64, L[2], L[3], L[4]] = torch.softmax(
x, dim=1, dtype=torch.int64
)
y3: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.softmax(dim=1)
def test_conv2d() -> None:
x: Tensor[torch.float32, L[20], L[16], L[50], L[100]]
y7: Tensor[torch.float32, L[20], L[33], L[56], L[100]] = nn.Conv2d(
16, 33, (3, 5), padding=(4, 2), bias=False
)(x)
# pyre-fixme[9]: y7_error has type `Tensor[torch.float32, typing_extensions.Liter...
y7_error: Tensor[torch.float32, L[20], L[33], L[56], L[99]] = nn.Conv2d(
16, 33, (3, 5), padding=(4, 2)
)(x)
module: nn.Module = nn.Conv2d(16, 33, (3, 5), padding=(4, 2))
def test_nn_Parameter() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.Parameter(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.Parameter(x)
def test_torch_datatypes() -> None:
x: torch.float16
x2: torch.int
def test_norm() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
y1: Tensor[torch.float32] = torch.norm(x)
y2: Tensor[torch.float32, L[3], L[4]] = torch.norm(x, dim=0, out=x_out, p=1)
# pyre-fixme[9]: Expected error.
y2_error: Tensor[torch.float32, L[3], L[99]] = torch.norm(x, dim=0)
y3: Tensor[torch.float32, L[1], L[3], L[4]] = torch.norm(x, dim=0, keepdim=True)
def test_rand() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
device: torch.device
y1: Tensor[torch.float32, L[2], L[3], L[4]] = torch.rand(2, 3, 4)
# pyre-fixme[9]: Expected Error.
y1_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.rand(2, 3, 4)
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.rand(
2,
3,
4,
dtype=torch.int64,
device=device,
layout=torch.strided,
out=x_out,
requires_grad=True,
generator=torch.default_generator,
)
y3: Tensor[torch.float32, L[2], L[3], L[4]] = torch.rand((2, 3, 4))
def test_randint() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
device: torch.device
y1: Tensor[torch.int64, L[2], L[3], L[4]] = torch.randint(0, 3, (2, 3, 4))
# pyre-fixme[9]: Expected error.
y1_error: Tensor[torch.int64, L[2], L[3], L[99]] = torch.randint(0, 3, (2, 3, 4))
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.randint(
3,
(2, 3, 4),
dtype=torch.int64,
device=device,
layout=torch.strided,
out=x_out,
requires_grad=True,
generator=torch.default_generator,
)
def test_zeros() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
x_out: Tensor[torch.float32, L[2], L[3], L[4]]
device: torch.device
y1: Tensor[torch.float32, L[2], L[3], L[4]] = torch.zeros(2, 3, 4)
# pyre-fixme[9]: Expected Error.
y1_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.zeros(2, 3, 4)
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.zeros(
2,
3,
4,
dtype=torch.int64,
device=device,
layout=torch.strided,
out=x_out,
requires_grad=True,
)
y3: Tensor[torch.float32, L[2], L[3], L[4]] = torch.zeros((2, 3, 4))
def test_stride() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[L[2], L[3], L[4]] = x.stride()
# pyre-fixme[9]: Expected error.
y_error: Tuple[L[2], L[3], L[99]] = x.stride()
y2: L[12] = x.stride(0)
y3: L[4] = x.stride(1)
y4: L[1] = x.stride(2)
def test_chunk() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tuple[
Tensor[torch.float32, L[2], L[3], L[2]], Tensor[torch.float32, L[2], L[3], L[2]]
] = torch.chunk(x, 2, dim=-1)
# pyre-fixme[9]: Expected error.
y_error: Tuple[
Tensor[torch.float32, L[2], L[3], L[99]],
Tensor[torch.float32, L[2], L[3], L[2]],
] = torch.chunk(x, 2, dim=-1)
y2: Tuple[
Tensor[torch.float32, L[1], L[3], L[4]], Tensor[torch.float32, L[1], L[3], L[4]]
] = torch.chunk(x, 2, dim=0)
y3: Tuple[
Tensor[torch.float32, L[1], L[3], L[4]], Tensor[torch.float32, L[1], L[3], L[4]]
] = x.chunk(2, dim=0)
def test_abs() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = x.abs()
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = x.abs()
def test_enable_grad() -> None:
with torch.enable_grad():
pass
def test_normal() -> None:
y: Tensor[torch.float32, L[2], L[3], L[4]] = torch.normal(
0, 1, size=(2, 3, 4), device="cuda", requires_grad=True
)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.normal(
0, 1, size=(2, 3, 4), device="cuda", requires_grad=True
)
def test_dim() -> None:
x0: Tensor[torch.float32]
x1: Tensor[torch.float32, L[2]]
x2: Tensor[torch.float32, L[2], L[3]]
x3: Tensor[torch.float32, L[2], L[3], L[4]]
y: L[3] = x3.dim()
# pyre-fixme[9]: Expected error.
y_error: L[5] = x3.dim()
y2: L[0] = x0.dim()
y3: L[1] = x1.dim()
y4: L[2] = x2.dim()
def test_is_cuda() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: bool = x.is_cuda
def test_autograd_backward() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
torch.autograd.backward(x, x)
def test_linalg_norm() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2]] = torch.linalg.norm(x, dim=(-2, -1))
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[99]] = torch.linalg.norm(x, dim=(-2, -1))
def test_Sized() -> None:
x: torch.Size = torch.Size((2, 3, 4))
def test_initial_seed() -> None:
x: int = torch.initial_seed()
def test_log_softmax() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = torch.log_softmax(x, dim=1)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = torch.log_softmax(x, dim=1)
y2: Tensor[torch.int64, L[2], L[3], L[4]] = torch.log_softmax(
x, dtype=torch.int64, dim=1
)
def test_masked_select() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
mask: Tensor[torch.bool, L[2], L[3], L[4]]
out: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor = x.masked_select(mask, out=out)
y2: Tensor = torch.masked_select(x, mask, out=out)
def test__lt__() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.bool, L[2], L[3], L[4]] = x < 3.0
def test_pow() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = x**4
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = x**4
def test_item() -> None:
x: Tensor[torch.float32]
x2: Tensor[torch.float32, L[1]]
y: torch.float32 = x.item()
# pyre-fixme[9]: Expected error.
y_error: torch.int64 = x.item()
y2: torch.float32 = x.item()
def test_uniform_() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.init.uniform_(x, a=1.0, b=2.0)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.init.uniform_(
x, a=1.0, b=2.0
)
def test_kaiming_uniform_() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.init.kaiming_uniform_(
x, a=1.0, mode="fan_in", nonlinearity="leaky_relu"
)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.init.kaiming_uniform_(x)
def test_constant_() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.init.constant_(x, val=1.0)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.init.constant_(x, val=1.0)
def test_leaky_relu() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = nn.LeakyReLU(
negative_slope=1.0, inplace=True
)(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = nn.LeakyReLU(
negative_slope=1.0, inplace=True
)(x)
def test_fft_fft2() -> None:
x: Tensor[torch.complex64, L[2], L[3], L[4]]
y: Tensor[torch.complex64, L[2], L[3], L[4]] = torch.fft.fft2(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.complex64, L[2], L[3], L[99]] = torch.fft.fft2(x)
def test_real() -> None:
x: Tensor[torch.complex64, L[2], L[3], L[4]]
y: Tensor[torch.float32, L[2], L[3], L[4]] = x.real
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = x.real
x2: Tensor[torch.complex128, L[2], L[3], L[4]]
y2: Tensor[torch.float64, L[2], L[3], L[4]] = x2.real
not_complex: Tensor[torch.float64, L[2], L[3], L[4]]
# Should error but we don't have overloads for @property.
not_complex.real
def test_Tensor_init() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
# pyre-fixme[9]: Unexpected error because the constructor doesn't bind DType.
y: Tensor[torch.float32, L[2], L[3], L[4]] = Tensor((2, 3, 4), device="cuda")
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[2], L[3], L[99]] = Tensor((2, 3, 4), device="cuda")
y2: Tensor[torch.float32, L[2], L[3], L[4]] = Tensor(2, 3, 4, device="cuda")
y3: Tensor[torch.float32, L[2], L[3], L[4]] = Tensor(x)
def test_reflection_pad2d() -> None:
module: nn.Module = nn.ReflectionPad2d(4)
x: Tensor[torch.float32, L[20], L[16], L[50], L[100]]
y: Tensor[torch.float32, L[20], L[16], L[58], L[108]] = nn.ReflectionPad2d(4)(x)
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.float32, L[20], L[16], L[58], L[99]] = nn.ReflectionPad2d(4)(
x
)
def test_half() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
good1: torch.Tensor[torch.float16, L[2], L[3], L[4]] = x.half(torch.memory_format())
# pyre-fixme[9]: Expected error.
bad1: torch.Tensor[torch.float16, L[99], L[3], L[4]] = x.half()
def test_is_contiguous() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
y: bool = x.is_contiguous(torch.memory_format())
def test_scatter() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
# We don't really check for the shape of index or src.
index: torch.LongTensor[torch.float32, L[99]]
src: torch.Tensor[torch.float32, L[99], L[99]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter(0, index, src)
# pyre-fixme[9]: Expected error.
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x.scatter(0, index, src)
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter(2, index, src)
def test_scatter_() -> None:
x: torch.Tensor[torch.float32, L[2], L[3], L[4]]
# We don't really check for the shape of index or src.
index: torch.LongTensor[torch.float32, L[99]]
src: torch.Tensor[torch.float32, L[99], L[99]]
y: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter_(0, index, src)
# pyre-fixme[9]: Expected error.
y_error: torch.Tensor[torch.float32, L[2], L[3], L[99]] = x.scatter_(0, index, src)
y2: torch.Tensor[torch.float32, L[2], L[3], L[4]] = x.scatter_(2, index, src)
def test_bool() -> None:
x: Tensor[torch.float32, L[2], L[3], L[4]]
y: Tensor[torch.bool, L[2], L[3], L[4]] = x.bool()
# pyre-fixme[9]: Expected error.
y_error: Tensor[torch.bool, L[2], L[3], L[99]] = x.bool()
| EXA-1-master | exa/libraries/xformers/stubs/torch_stub_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
from pathlib import Path
from typing import Optional
from packaging import version
# TODO: consolidate with the code in build_conda.py
THIS_PATH = Path(__file__).resolve()
version_from_file = (THIS_PATH.parents[1] / "version.txt").read_text().strip()
def get_tagged_version() -> Optional[str]:
"""
Return whether we are at an exact version (namely the version variable).
"""
try:
tag = subprocess.check_output(
["git", "describe", "--tags", "--exact-match", "HEAD"],
text=True,
stderr=subprocess.DEVNULL,
).strip()
except subprocess.CalledProcessError: # no tag
return None
if not tag.startswith("v"):
return None
# Should match the version in `version.txt`
# (except for the suffix like `rc` tag)
assert (
version.parse(version_from_file).release == version.parse(tag[1:]).release
), f"The version in version.txt ({version_from_file}) does not match the given tag ({tag})"
return tag[1:]
def get_dev_version() -> str:
num_commits = subprocess.check_output(
["git", "rev-list", "--count", "HEAD"], text=True
).strip()
return f"{version_from_file}.dev{num_commits}"
if __name__ == "__main__":
tagged_version = get_tagged_version()
if tagged_version is not None:
print(tagged_version, end="")
else:
print(get_dev_version(), end="")
| EXA-1-master | exa/libraries/xformers/packaging/compute_wheel_version.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import shutil
import subprocess
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional
import compute_wheel_version
THIS_PATH = Path(__file__).resolve()
SOURCE_ROOT_DIR = THIS_PATH.parents[1]
PYTHON_VERSIONS = ["3.9", "3.10"]
PYTORCH_TO_CUDA_VERSIONS = {
"1.11.0": ["10.2", "11.1", "11.3", "11.5"],
"1.12.0": ["10.2", "11.3", "11.6"],
"1.12.1": ["10.2", "11.3", "11.6"],
"1.13": ["11.6", "11.7"],
}
def conda_docker_image_for_cuda(cuda_version: str) -> str:
"""
Given a cuda version, return a docker image we could
build in.
"""
if cuda_version in ("10.1", "10.2", "11.1"):
return "pytorch/conda-cuda"
if cuda_version == "11.3":
return "pytorch/conda-builder:cuda113"
if cuda_version == "11.5":
return "pytorch/conda-builder:cuda115"
if cuda_version == "11.6":
return "pytorch/conda-builder:cuda116"
if cuda_version == "11.7":
return "pytorch/conda-builder:cuda117"
raise ValueError(f"Unknown cuda version {cuda_version}")
@dataclass
class Build:
"""
Represents one configuration of a build, i.e.
a set of versions of dependent libraries.
Members:
conda_always_copy: avoids hard linking which can behave weirdly.
conda_debug: get added information about package search
conda_dirty: see intermediate files after build
build_inside_tree: output in build/ not ../build
is_release: whether this is an official versioned release
"""
python_version: str
pytorch_version: str
pytorch_channel: str
cuda_version: str
cuda_dep_runtime: str
conda_always_copy: bool = True
conda_debug: bool = False
conda_dirty: bool = False
build_inside_tree: bool = False
tagged_version: Optional[str] = field(
default_factory=compute_wheel_version.get_tagged_version
)
def _get_build_version(self) -> str:
if self.tagged_version is not None:
return self.tagged_version
git_hash = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], text=True
).strip()
dev_version = compute_wheel_version.get_dev_version()
return f"{dev_version}+git.{git_hash}"
def _set_env_for_build(self) -> None:
"""
NOTE: Variables set here won't be visible in `setup.py`
UNLESS they are also specified in meta.yaml
"""
os.environ["BUILD_VERSION"] = self._get_build_version()
tag = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], text=True
).strip()
os.environ["GIT_TAG"] = tag
os.environ["PYTORCH_VERSION"] = self.pytorch_version
os.environ["CU_VERSION"] = self.cuda_version
os.environ["SOURCE_ROOT_DIR"] = str(SOURCE_ROOT_DIR)
# At build time, the same major/minor (otherwise we might get a CPU pytorch ...)
cuda_constraint_build = "=" + ".".join(self.cuda_version.split(".")[:2])
pytorch_version_tuple = tuple(
int(v) for v in self.pytorch_version.split(".")[:2]
)
if pytorch_version_tuple < (1, 13):
os.environ[
"CONDA_CUDA_CONSTRAINT_BUILD"
] = f"cudatoolkit{cuda_constraint_build}"
os.environ[
"CONDA_CUDA_CONSTRAINT_RUN"
] = f"cudatoolkit{self.cuda_dep_runtime}"
else:
os.environ[
"CONDA_CUDA_CONSTRAINT_BUILD"
] = f"pytorch-cuda{cuda_constraint_build}"
os.environ[
"CONDA_CUDA_CONSTRAINT_RUN"
] = f"pytorch-cuda{self.cuda_dep_runtime}"
if self.conda_always_copy:
os.environ["CONDA_ALWAYS_COPY"] = "true"
def _get_build_args(self) -> List[str]:
args = [
"conda",
"build",
"-c",
self.pytorch_channel,
"-c",
"nvidia",
"--python",
self.python_version,
"--no-anaconda-upload",
]
if self.conda_debug:
args += ["--debug"]
if self.conda_dirty:
args += ["--dirty"]
if not self.build_inside_tree:
args += ["--croot", "../build"]
return args + ["packaging/xformers"]
def do_build(self) -> None:
self._set_env_for_build()
args = self._get_build_args()
print(args)
subprocess.check_call(args)
def move_artifacts_to_store(self, store_pytorch_package: bool) -> None:
"""
Run after a build to move the built package, and, if using nightly, the
used PyTorch package, to a location where they will be recognized
as build artifacts.
"""
print("moving artifacts")
assert not self.build_inside_tree
artifacts = Path("packages")
artifacts.mkdir(exist_ok=True)
for filename in Path("../build/linux-64").resolve().glob("*.tar.bz2"):
print("moving", filename, "to", artifacts)
shutil.move(filename, artifacts)
if store_pytorch_package:
for filename in Path("/opt/conda/pkgs").glob("pytorch-[12].*.tar.bz2"):
print("moving", filename, "to", artifacts)
shutil.move(filename, artifacts)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Build the conda package.")
parser.add_argument(
"--python", metavar="3.X", required=True, help="python version e.g. 3.10"
)
parser.add_argument(
"--cuda-dep-runtime", metavar="1X.Y", required=True, help="eg '>=11.7,<11.9"
)
parser.add_argument(
"--cuda", metavar="1X.Y", required=True, help="cuda version e.g. 11.3"
)
parser.add_argument(
"--pytorch", metavar="1.Y.Z", required=True, help="PyTorch version e.g. 1.11.0"
)
parser.add_argument(
"--build-inside-tree",
action="store_true",
help="Build in build/ instead of ../build/",
)
parser.add_argument(
"--store",
action="store_true",
help="position artifact to store",
)
parser.add_argument(
"--store-pytorch-package",
action="store_true",
help="position artifact to store",
)
parser.add_argument(
"--pytorch-channel", default="pytorch", help="Use 'pytorch-nightly' for nightly"
)
args = parser.parse_args()
pkg = Build(
pytorch_channel=args.pytorch_channel,
python_version=args.python,
pytorch_version=args.pytorch,
cuda_version=args.cuda,
build_inside_tree=args.build_inside_tree,
cuda_dep_runtime=args.cuda_dep_runtime,
)
pkg.do_build()
pkg.move_artifacts_to_store(store_pytorch_package=args.store_pytorch_package)
# python packaging/conda/build_conda.py --cuda 11.6 --python 3.10 --pytorch 1.12.1
# python packaging/conda/build_conda.py --cuda 11.3 --python 3.9 --pytorch 1.12.1 # <= the dino one
# python packaging/conda/build_conda.py --cuda 11.6 --python 3.10 --pytorch 1.11.0
# Note this does the build outside the root of the tree.
# TODO:
# - Make a local conda package cache available inside docker
# - do we need builds for both _GLIBCXX_USE_CXX11_ABI values?
# - how to prevent some cpu only builds of pytorch from being discovered?
| EXA-1-master | exa/libraries/xformers/packaging/build_conda.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/xformers/experimental/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
setup(
name="ragged_inference",
author="Facebook AI Research",
version="0.0.0",
packages=["ragged_inference"],
install_requires=[],
scripts=[],
python_requires=">=3.6",
)
| EXA-1-master | exa/libraries/xformers/experimental/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from ragged_inference.test_utils import assert_eq, bf16_support
from ragged_inference.triton_v2_matmul import matmul
SHAPES = [
(3, 7),
(384, 128),
(784, 512),
(1024, 1024),
(2048, 384),
]
_dtypes = [
{"device": "cuda", "dtype": torch.float16},
{"device": "cuda", "dtype": torch.float32},
]
if bf16_support():
_dtypes.append({"device": "cuda", "dtype": torch.bfloat16})
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("dtype", _dtypes)
def test_matmul(shape, dtype):
a = torch.randn(shape, **dtype)
b = torch.randn(shape, **dtype).T
out = matmul(a, b)
torch_out = torch.matmul(a, b)
assert_eq(out, torch_out, rtol=0.01, atol=0.2)
"""
pytest -vxs --tb=native tests/ragged_inference/test_triton_v2_matmul.py -k test_matmul
"""
| EXA-1-master | exa/libraries/xformers/experimental/tests/test_triton_v2_matmul.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
import pytest
import torch
from ragged_inference.garbage_pad_ragged_acts import RaggedActivations
from ragged_inference.seq_kv_cache import (
SingleSeqKVCache,
_create_indices,
calculate_scores_via_qk_dotprod,
extend_kv_caches,
garbage_pad_keys,
garbage_pad_seq_kv_cache,
)
from ragged_inference.test_utils import assert_eq, bf16_support
_dtypes = [{"device": "cuda", "dtype": torch.float16}]
if bf16_support():
_dtypes.append({"device": "cuda", "dtype": torch.bfloat16})
def _single_seq_kv_cache(n_ctx, value, d_model, dtype) -> SingleSeqKVCache:
return SingleSeqKVCache(
keys=torch.full([n_ctx, d_model], value, **dtype),
values=torch.full([n_ctx, d_model], value, **dtype),
)
@pytest.mark.parametrize("dtype", _dtypes)
def test_garbage_pad_seq_kv_cache_correctness(dtype):
seq_kv_cache = [
_single_seq_kv_cache(n_ctx=1, value=33, d_model=2, dtype=dtype),
_single_seq_kv_cache(n_ctx=3, value=42, d_model=2, dtype=dtype),
_single_seq_kv_cache(n_ctx=7, value=55, d_model=2, dtype=dtype),
]
padded_keys, padded_values = garbage_pad_seq_kv_cache(seq_kv_cache)
# Check that the non-garbage portion of each is correct
assert_eq(padded_keys[0, :1, :], seq_kv_cache[0].keys)
assert_eq(padded_keys[1, :3, :], seq_kv_cache[1].keys)
assert_eq(padded_keys[2, :7, :], seq_kv_cache[2].keys)
assert_eq(padded_values[0, :1, :], seq_kv_cache[0].values)
assert_eq(padded_values[1, :3, :], seq_kv_cache[1].values)
assert_eq(padded_values[2, :7, :], seq_kv_cache[2].values)
@pytest.mark.parametrize("dtype", _dtypes)
def test_extend_kv_caches_correctness(dtype):
d_model = 6
seq_kv_cache = [
_single_seq_kv_cache(n_ctx=1, value=33, d_model=d_model, dtype=dtype),
_single_seq_kv_cache(n_ctx=3, value=42, d_model=d_model, dtype=dtype),
_single_seq_kv_cache(n_ctx=7, value=55, d_model=d_model, dtype=dtype),
]
n_ctx_new = 1
active_keys = RaggedActivations.from_list(
[
torch.ones(n_ctx_new, d_model, **dtype),
torch.ones(n_ctx_new, d_model, **dtype),
torch.ones(n_ctx_new, d_model, **dtype),
]
)
active_values = RaggedActivations.from_list(
[
torch.ones(n_ctx_new, d_model, **dtype) * 2,
torch.ones(n_ctx_new, d_model, **dtype) * 2,
torch.ones(n_ctx_new, d_model, **dtype) * 2,
]
)
new_cache = extend_kv_caches(seq_kv_cache, active_keys, active_values)
assert_eq(new_cache[0].keys[:, 0].cpu(), [33, 1])
assert_eq(new_cache[0].values[:, 0].cpu(), [33, 2])
assert_eq(new_cache[1].keys[:, 0].cpu(), [42, 42, 42, 1])
assert_eq(new_cache[1].values[:, 0].cpu(), [42, 42, 42, 2])
assert_eq(new_cache[2].keys[:, 0].cpu(), [55, 55, 55, 55, 55, 55, 55, 1])
assert_eq(new_cache[2].values[:, 0].cpu(), [55, 55, 55, 55, 55, 55, 55, 2])
@pytest.mark.parametrize("dtype", _dtypes)
def test_index_select_throughput(dtype):
n_ctx_per_seq = 8192
n_seqs = 20
d_model_per_gpu = 12 * 1024 // 8
keys = _single_seq_kv_cache(
n_ctx=n_ctx_per_seq * n_seqs, value=42, d_model=d_model_per_gpu, dtype=dtype
).keys
indices = _create_indices(tuple(n_ctx_per_seq for _ in range(n_seqs)))
for strategy in ["index_select", "gather", "slice"]:
if strategy == "slice":
def do_the_op():
return keys[indices, :]
elif strategy == "gather":
stacked_idxs = torch.stack([indices for _ in range(d_model_per_gpu)], dim=1)
def do_the_op():
torch.gather(input=keys, dim=0, index=stacked_idxs)
elif strategy == "index_select":
def do_the_op():
torch.index_select(input=keys, dim=0, index=indices)
else:
raise ValueError(f"{strategy=}")
# warmup
do_the_op()
torch.cuda.synchronize()
started_at = time.time()
n_iters = 10
for _ in range(n_iters):
do_the_op()
torch.cuda.synchronize()
elapsed_micros = (time.time() - started_at) * 1e6
micros_per_mb = elapsed_micros / n_iters
micros_per_seq = micros_per_mb / n_seqs
print(
f"""
# Speed when {strategy=}
{micros_per_seq=:.1f}µs per seq
"""
)
@pytest.mark.parametrize("dtype", _dtypes)
def test_garbage_pad_keys_throughput(dtype, n_ctx_per_seq=1024):
n_seqs = 100
d_model_per_gpu = 12 * 1024 // 8
seq_kv_cache = [
_single_seq_kv_cache(
n_ctx=n_ctx_per_seq, value=42, d_model=d_model_per_gpu, dtype=dtype
)
for _ in range(n_seqs)
]
bytes_in_keys_per_seq = n_ctx_per_seq * d_model_per_gpu * 2 # 2 from bf16
bytes_in_keys_total = bytes_in_keys_per_seq * n_seqs
hbm_bw_bytes_per_gpu = 1555e9 # 1.5TB/s
# If we just read the bytes directly from memory
theor_load_micros_per_seq = bytes_in_keys_per_seq / hbm_bw_bytes_per_gpu * 1e6
# Doing our operation should be slower than the theoretical minimum because we
# do the following to the items
#
# 1. Read them from the per-seq areas
# 2. Write them back into the buffer
expected_micros_per_seq = theor_load_micros_per_seq * 2
# warmup
garbage_pad_keys(seq_kv_cache)
torch.cuda.synchronize()
started_at = time.time()
n_iters = 10
for _ in range(n_iters):
garbage_pad_keys(seq_kv_cache)
torch.cuda.synchronize()
elapsed_micros = (time.time() - started_at) * 1e6
micros_per_mb = elapsed_micros / n_iters
micros_per_seq = micros_per_mb / n_seqs
print(
f"""
# Theoretical
{bytes_in_keys_total/1e9=:.3f}GB
{bytes_in_keys_per_seq/1e6=:.2f}MB
{theor_load_micros_per_seq=:.1f}µs per seq (to just load once from memory)
{expected_micros_per_seq=:.1f}µs per seq
# Actual
{micros_per_mb=:.1f}µs per microbatch
{micros_per_seq=:.1f}µs per seq
{micros_per_seq/expected_micros_per_seq:.1f}x the expected HBM-bandwidth bound time
"""
)
@pytest.mark.parametrize("dtype", _dtypes)
def test_garbage_pad_active_queries_throughput(dtype, n_active_ctx_per_seq=5):
n_seqs = 100
d_model_per_gpu = 12 * 1024 // 8
active_queries = RaggedActivations.from_list(
[
torch.ones(n_active_ctx_per_seq, d_model_per_gpu, **dtype) * 2
for _ in range(n_seqs)
]
)
bytes_in_queries_per_seq = n_active_ctx_per_seq * d_model_per_gpu * 2 # 2 from bf16
bytes_in_queries_total = bytes_in_queries_per_seq * n_seqs
hbm_bw_bytes_per_gpu = 1555e9 # 1.5TB/s
# If we just read the bytes directly from memory
theor_load_micros_per_seq = bytes_in_queries_per_seq / hbm_bw_bytes_per_gpu * 1e6
# Doing our operation should be slower than the theoretical minimum because we
# do the following to the items
#
# 1. Read them from the per-seq areas
# 2. Write them back into the buffer
expected_micros_per_seq = theor_load_micros_per_seq * 2
# warmup
active_queries.to_garbage_padded()
torch.cuda.synchronize()
started_at = time.time()
n_iters = 10
for _ in range(n_iters):
active_queries.to_garbage_padded()
torch.cuda.synchronize()
elapsed_micros = (time.time() - started_at) * 1e6
micros_per_mb = elapsed_micros / n_iters
micros_per_seq = micros_per_mb / n_seqs
print(
f"""
# Theoretical
{bytes_in_queries_total/1e9=:.3f}GB
{bytes_in_queries_per_seq/1e6=:.2f}MB
{theor_load_micros_per_seq=:.1f}µs per seq (to just load once from memory)
{expected_micros_per_seq=:.1f}µs per seq
# Actual
{micros_per_mb=:.1f}µs per microbatch
{micros_per_seq=:.1f}µs per seq
{micros_per_seq/expected_micros_per_seq:.1f}x the expected HBM-bandwidth bound time
"""
)
@pytest.mark.parametrize("dtype", _dtypes)
def test_calculate_scores_via_qk_dotprod_throughput(
dtype, n_key_ctx_per_seq=1024, n_active_query_ctx_per_seq=5
):
n_seqs = 100
d_model_per_gpu = 12 * 1024 // 8
seq_kv_cache = [
_single_seq_kv_cache(
n_ctx=n_key_ctx_per_seq, value=42, d_model=d_model_per_gpu, dtype=dtype
)
for _ in range(n_seqs)
]
active_queries = RaggedActivations.from_list(
[
torch.ones(n_active_query_ctx_per_seq, d_model_per_gpu, **dtype) * 2
for _ in range(n_seqs)
]
)
assert n_key_ctx_per_seq > n_active_query_ctx_per_seq * 10, (
"n_active_query_ctx_per_seq must be much larger than "
"n_key_ctx_per_seq for our simulator to be useful because "
"we round the HBM memory bandwidth for the active_queries and "
"for the scores down to zero"
)
bytes_in_keys_per_seq = n_key_ctx_per_seq * d_model_per_gpu * 2 # 2 from bf16
bytes_in_keys_total = bytes_in_keys_per_seq * n_seqs
hbm_bw_bytes_per_gpu = 1555e9 # 1.5TB/s
# If we just read the bytes directly from memory
theor_load_micros_per_seq = bytes_in_keys_per_seq / hbm_bw_bytes_per_gpu * 1e6
# Doing our operation should be slower than the theoretical minimum because we
# do the following to the items
#
# 1. Read them from the per-seq areas
# 2. Write them back into the buffer
expected_micros_per_seq = theor_load_micros_per_seq * 2
# warmup
calculate_scores_via_qk_dotprod(seq_kv_cache, active_queries)
torch.cuda.synchronize()
started_at = time.time()
n_iters = 10
for _ in range(n_iters):
calculate_scores_via_qk_dotprod(seq_kv_cache, active_queries)
torch.cuda.synchronize()
elapsed_micros = (time.time() - started_at) * 1e6
micros_per_mb = elapsed_micros / n_iters
micros_per_seq = micros_per_mb / n_seqs
print(
f"""
# Theoretical
{bytes_in_keys_total/1e9=:.3f}GB
{bytes_in_keys_per_seq/1e6=:.2f}MB
{theor_load_micros_per_seq=:.1f}µs per seq (to just load once from memory)
{expected_micros_per_seq=:.1f}µs per seq
# Actual
{micros_per_mb=:.1f}µs per microbatch
{micros_per_seq=:.1f}µs per seq
{micros_per_seq/expected_micros_per_seq:.1f}x the expected HBM-bandwidth bound time
"""
)
"""
# Run tests with the following
pytest -vsx tests/ragged_inference/test_seq_kv_cache.py
# Profile with the following
pytest -vsx tests/ragged_inference/test_seq_kv_cache.py -k test_calculate_scores_via_qk_dotprod_throughput
"""
| EXA-1-master | exa/libraries/xformers/experimental/tests/test_seq_kv_cache.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
import pytest
import torch
from ragged_inference.test_utils import assert_eq, bf16_support
from ragged_inference.triton_v2_qk_dotprod import qk_dotprod
from ragged_inference.triton_v2_ragged_qk_dotprod import (
RaggedQkPidLookupTable,
ragged_single_seq_qk_dotprod,
)
SHAPES = [
(3, 7),
(384, 128),
(784, 512),
(1024, 1024),
(2048, 384),
]
_dtypes = [
{"device": "cuda", "dtype": torch.float16},
{"device": "cuda", "dtype": torch.float32},
]
if bf16_support():
_dtypes.append({"device": "cuda", "dtype": torch.bfloat16})
def qk_dotprod_pytorch(q, k):
# attention matrix
return torch.einsum("bqd,bkd->bqk", q, k)
def qk_dotprod_single_head_pytorch(q, k):
# attention matrix
return torch.einsum("qd,kd->qk", q, k)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("dtype", _dtypes)
def test_qk_dotprod(shape, dtype):
a = torch.randn(shape, **dtype)
b = torch.randn(shape, **dtype)
out = qk_dotprod(a, b)
torch_out = qk_dotprod_single_head_pytorch(a, b)
assert_eq(out, torch_out, rtol=0.05, atol=0.2)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("dtype", _dtypes)
def test_ragged_qk_dotprod(shape, dtype):
a = torch.randn(shape, **dtype)
b = torch.randn(shape, **dtype)
lut = RaggedQkPidLookupTable.from_single_seq(n_ctx_q=shape[0], n_ctx_k=shape[0])
out = ragged_single_seq_qk_dotprod(a, b, lut)
torch_out = qk_dotprod_single_head_pytorch(a, b)
assert_eq(out, torch_out, rtol=0.02, atol=0.2)
@pytest.mark.parametrize("dtype", _dtypes)
def test_ragged_qk_dotprod_perf(dtype):
active_tokens = 5
d_head = 256
active_and_cached_tokens = 8000 * 50
n_iters = 10
q = torch.randn((active_tokens, d_head), **dtype)
k = torch.randn((active_and_cached_tokens, d_head), **dtype)
lut = RaggedQkPidLookupTable.from_single_seq(
n_ctx_q=active_tokens, n_ctx_k=active_and_cached_tokens
)
for _ in range(3):
out = ragged_single_seq_qk_dotprod(q, k, lut) # noqa: F841
torch.cuda.synchronize()
started_at = time.time()
for _ in range(n_iters):
out = ragged_single_seq_qk_dotprod(q, k, lut) # noqa: F841
torch.cuda.synchronize()
elapsed_micros = (time.time() - started_at) * 1e6
bytes_in_keys_per_seq = active_and_cached_tokens * d_head * 2 # 2 from bf16
bytes_in_keys_total = bytes_in_keys_per_seq
hbm_bw_bytes_per_gpu = 1555e9 # 1.5TB/s
# If we just read the bytes directly from memory
theor_load_micros_per_seq = bytes_in_keys_per_seq / hbm_bw_bytes_per_gpu * 1e6
expected_micros_per_seq = theor_load_micros_per_seq
micros_per_seq = elapsed_micros / n_iters
print(
f"""
# Theoretical
{bytes_in_keys_total/1e9=:.3f}GB
{bytes_in_keys_per_seq/1e6=:.2f}MB
{theor_load_micros_per_seq=:.1f}µs per seq (to just load once from memory)
{expected_micros_per_seq=:.1f}µs per seq
# Actual
{micros_per_seq=:.1f}µs per seq
{micros_per_seq/expected_micros_per_seq:.1f}x the expected HBM-bandwidth bound time
"""
)
# FIXME: Write a proper device agnostic test
if "A100" in torch.cuda.get_device_name(0):
assert micros_per_seq / expected_micros_per_seq < 1.5
@pytest.mark.parametrize("dtype", _dtypes)
def test_simple_qk_dotprod(dtype):
shape = (8, 8)
k = torch.zeros(shape, **dtype)
k[0, 0] = 1.0
k[0, 1] = 1.0
q = torch.randn(shape, **dtype)
print(f"{q=}")
print(f"{k=}")
out = qk_dotprod(q, k)
torch_out = qk_dotprod_single_head_pytorch(q, k)
assert_eq(out, torch_out, rtol=0.01, atol=0.2)
"""
pytest -vxs --tb=native tests/ragged_inference/test_triton_v2_qk_dotprod.py -k test_ragged_qk_dotprod
"""
| EXA-1-master | exa/libraries/xformers/experimental/tests/test_triton_v2_qk_dotprod.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
import pytest
import torch
from ragged_inference.garbage_pad_ragged_acts import RaggedActivations
from ragged_inference.seq_kv_cache import scores_via_qk_dotprod
from ragged_inference.test_utils import (
assert_eq,
bf16_support,
make_seq,
make_seq_arange,
)
from ragged_inference.triton_v2_ragged_qk_dotprod import (
RaggedQkPidLookupTable,
ragged_qk_dotprod,
)
_dtypes = [{"device": "cuda", "dtype": torch.float16}]
if bf16_support():
_dtypes.append({"device": "cuda", "dtype": torch.bfloat16})
@pytest.mark.parametrize("dtype", _dtypes)
def test_ragged_qk_dotprod_single_seq(dtype):
d_head = 2
key = RaggedActivations.from_list(
[
make_seq(n_ctx=3, value=42, d_model=d_head, dtype=dtype),
]
)
query = RaggedActivations.from_list(
[
make_seq(n_ctx=4, value=55, d_model=d_head, dtype=dtype),
]
)
torch_scores = scores_via_qk_dotprod(query, key)
print(f"{torch_scores=}")
lut = RaggedQkPidLookupTable.from_query_and_key_tokens_per_seq(
n_ctx_q_per_seq=query.n_ctx_per_seq, n_ctx_k_per_seq=key.n_ctx_per_seq
)
scores = ragged_qk_dotprod(query, key, lut)
assert_eq(torch_scores, scores)
@pytest.mark.parametrize("dtype", _dtypes)
def test_ragged_qk_dotprod_multiple_seqs_lut(dtype):
d_head = 2
key = RaggedActivations.from_list(
[
make_seq_arange(n_ctx=5, start_value=0, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=2, start_value=5, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=3, start_value=7, d_head=d_head, dtype=dtype),
]
)
query = RaggedActivations.from_list(
[
make_seq_arange(n_ctx=3, start_value=0, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=2, start_value=3, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=2, start_value=5, d_head=d_head, dtype=dtype),
]
)
lut = RaggedQkPidLookupTable.from_query_and_key_tokens_per_seq(
n_ctx_q_per_seq=query.n_ctx_per_seq,
n_ctx_k_per_seq=key.n_ctx_per_seq,
block_q_override=2,
block_k_override=2,
)
assert_eq(lut.pid_to_in_q_token_offset.cpu(), [0, 0, 0, 2, 2, 2, 3, 5, 5])
assert_eq(lut.pid_to_in_k_token_offset.cpu(), [0, 2, 4, 0, 2, 4, 5, 7, 9])
assert_eq(lut.pid_to_out_q_block.cpu(), [0, 0, 0, 1, 1, 1, 0, 0, 0])
assert_eq(lut.pid_to_out_k_block.cpu(), [0, 1, 2, 0, 1, 2, 0, 0, 1])
assert_eq(lut.pid_to_out_seq_idx.cpu(), [0, 0, 0, 0, 0, 0, 1, 2, 2])
assert_eq(lut.n_pids_total, 9)
@pytest.mark.parametrize("dtype", _dtypes)
def test_ragged_qk_dotprod_multiple_seqs(dtype):
d_head = 2
key = RaggedActivations.from_list(
[
make_seq_arange(n_ctx=5, start_value=0, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=2, start_value=5, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=3, start_value=7, d_head=d_head, dtype=dtype),
]
)
query = RaggedActivations.from_list(
[
make_seq_arange(n_ctx=3, start_value=0, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=2, start_value=3, d_head=d_head, dtype=dtype),
make_seq_arange(n_ctx=2, start_value=5, d_head=d_head, dtype=dtype),
]
)
lut = RaggedQkPidLookupTable.from_query_and_key_tokens_per_seq(
n_ctx_q_per_seq=query.n_ctx_per_seq,
n_ctx_k_per_seq=key.n_ctx_per_seq,
)
torch_scores = scores_via_qk_dotprod(query, key)
scores = ragged_qk_dotprod(query, key, lut)
for seq_idx, (n_ctx_q, n_ctx_k) in enumerate(
zip(key.n_ctx_per_seq, query.n_ctx_per_seq)
):
print(f"Checking {seq_idx=}")
assert_eq(
torch_scores[seq_idx, :n_ctx_q, :n_ctx_k],
scores[seq_idx, :n_ctx_q, :n_ctx_k],
)
@pytest.mark.parametrize("dtype", _dtypes)
def test_ragged_qk_dotprod_multiple_seqs_perf(dtype):
n_q_ctx = 5
n_seqs = 50
d_head = 256
n_k_ctx = 8000
n_iters = 10
query = RaggedActivations.from_list(
[
make_seq_arange(n_ctx=n_q_ctx, start_value=0, d_head=d_head, dtype=dtype)
for _ in range(n_seqs)
]
)
key = RaggedActivations.from_list(
[
make_seq_arange(n_ctx=n_k_ctx, start_value=0, d_head=d_head, dtype=dtype)
for _ in range(n_seqs)
]
)
lut = RaggedQkPidLookupTable.from_query_and_key_tokens_per_seq(
n_ctx_q_per_seq=query.n_ctx_per_seq,
n_ctx_k_per_seq=key.n_ctx_per_seq,
)
for _ in range(3):
out = ragged_qk_dotprod(query, key, lut) # noqa: F841
torch.cuda.synchronize()
started_at = time.time()
for _ in range(n_iters):
out = ragged_qk_dotprod(query, key, lut) # noqa: F841
torch.cuda.synchronize()
elapsed_micros = (time.time() - started_at) * 1e6
bytes_in_keys_per_seq = n_k_ctx * d_head * 2 # 2 from bf16
bytes_in_keys_total = bytes_in_keys_per_seq * n_seqs
hbm_bw_bytes_per_gpu = 1555e9 # 1.5TB/s
# If we just read the bytes directly from memory
theor_load_micros_per_seq = bytes_in_keys_per_seq / hbm_bw_bytes_per_gpu * 1e6
expected_micros_per_seq = theor_load_micros_per_seq
micros_per_seq = elapsed_micros / (n_iters * n_seqs)
micros_per_mb = elapsed_micros / (n_iters)
print(
f"""
# Theoretical
{bytes_in_keys_total/1e9=:.3f}GB
{bytes_in_keys_per_seq/1e6=:.2f}MB
{theor_load_micros_per_seq=:.1f}µs per seq (to just load once from memory)
{expected_micros_per_seq=:.1f}µs per seq
# Actual
{micros_per_seq=:.1f}µs per seq
{micros_per_mb=:.1f}µs per seq
{micros_per_seq/expected_micros_per_seq:.1f}x the expected HBM-bandwidth bound time
"""
)
# FIXME: write a proper, device agnostic test
if "A100" in torch.cuda.get_device_name(0):
assert micros_per_seq / expected_micros_per_seq < 1.5
"""
pytest -vxs --tb=native tests/ragged_inference/test_triton_v2_ragged_qk_dotprod.py -k test_ragged_qk_dotprod_multiple_seqs_perf # noqa
"""
| EXA-1-master | exa/libraries/xformers/experimental/tests/test_triton_v2_ragged_qk_dotprod.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Tuple
import numpy as np
import torch
_DTYPE_PRECISIONS = {
torch.float16: (1e-3, 1e-3),
torch.bfloat16: (1e-1, 1e-3),
torch.float32: (1e-4, 1e-5),
torch.float64: (1e-5, 1e-8),
}
def _get_default_rtol_and_atol(
actual: torch.Tensor, expected: torch.Tensor
) -> Tuple[float, float]:
expected_rtol = expected_atol = actual_rtol = actual_atol = 0.0
if isinstance(actual, torch.Tensor):
actual_rtol, actual_atol = _DTYPE_PRECISIONS.get(actual.dtype, (0.0, 0.0))
if isinstance(expected, torch.Tensor):
expected_rtol, expected_atol = _DTYPE_PRECISIONS.get(expected.dtype, (0.0, 0.0))
return max(actual_rtol, expected_rtol), max(actual_atol, expected_atol)
def assert_eq(actual, expected, msg="", rtol=None, atol=None):
"""Asserts two things are equal with nice support for lists and tensors
It also gives prettier error messages than assert a == b
"""
if not msg:
msg = f"Values are not equal: \n\ta={actual} \n\tb={expected}"
if isinstance(actual, torch.Size):
actual = list(actual)
if isinstance(expected, torch.Size):
expected = list(expected)
if isinstance(actual, tuple):
actual = list(actual)
if isinstance(expected, tuple):
expected = list(expected)
if isinstance(actual, torch.Tensor):
if rtol is None and atol is None:
rtol, atol = _get_default_rtol_and_atol(actual=actual, expected=expected)
torch.testing.assert_allclose(actual, expected, msg=msg, rtol=rtol, atol=atol)
return
if isinstance(actual, np.ndarray):
np.testing.assert_allclose(actual, expected, rtol=rtol or 0, atol=atol or 0)
return
if isinstance(actual, torch.Size) or isinstance(expected, torch.Size):
assert actual == expected, msg
return
if isinstance(actual, dict):
assert isinstance(expected, dict)
assert actual.keys() == expected.keys(), msg
for key in actual.keys():
assert_eq(actual[key], expected[key], msg=msg, rtol=rtol, atol=atol)
return
if isinstance(actual, (tuple, list, set)):
assert isinstance(expected, type(actual))
assert len(actual) == len(expected), msg
for ai, bi in zip(actual, expected):
assert_eq(ai, bi, msg=msg, rtol=rtol, atol=atol)
return
if rtol is None and atol is None:
assert actual == expected, f"{actual} != {expected}"
else:
atol = 0 if atol is None else atol
rtol = 0 if rtol is None else rtol
assert (
abs(actual - expected) <= atol + expected * rtol
), f"{actual} != {expected}"
_gpu_is_old = None
def gpu_capabilities_older_than_70() -> bool:
"""Return True if the GPU's compute capability is older than SM70."""
global _gpu_is_old
if _gpu_is_old is None:
for i in range(torch.cuda.device_count()):
major, _ = torch.cuda.get_device_capability(f"cuda:{i}")
if major < 7:
_gpu_is_old = True
if _gpu_is_old is None:
_gpu_is_old = False
return _gpu_is_old
def bf16_support():
# Ampere cards support bf16
return torch.cuda.is_available() and (
"RTX" in torch.cuda.get_device_name() or "A100" in torch.cuda.get_device_name()
)
def make_seq(n_ctx: int, value: int, d_model: int, dtype: Dict[str, Any]):
return torch.full([n_ctx, d_model], value, **dtype)
def make_seq_arange(n_ctx: int, start_value: int, d_head: int, dtype: Dict[str, Any]):
return (
torch.full([n_ctx, d_head], start_value, **dtype)
+ torch.arange(n_ctx, **dtype)[:, None]
)
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import numpy as np
import torch
import triton
import triton.language as tl
@triton.jit
def garbage_pad_ragged_acts_kernel(
ragged_acts_ptr,
ragged_acts_offset_per_seq_ptr,
n_ctx_per_seq_ptr,
padded_acts_ptr,
BLOCK_SIZE: tl.constexpr, # How many inputs each program should process
n_ctx_max: tl.constexpr,
):
# There are multiple 'program's processing different data. We identify which program
# we are here
seq_idx = tl.program_id(axis=0)
ctx_idx = tl.program_id(axis=1)
# This program will process inputs that are offset from the initial data.
# for instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
ragged_acts_offset_ptr = ragged_acts_offset_per_seq_ptr + seq_idx
ragged_acts_offset = tl.load(ragged_acts_offset_ptr)
# Create a mask to guard memory operations against out-of-bounds accesses
n_ctx_in_this_seq_ptr = n_ctx_per_seq_ptr + seq_idx
n_ctx_in_this_seq = tl.load(n_ctx_in_this_seq_ptr)
ctx_idx_too_large_mask = ctx_idx < n_ctx_in_this_seq
# Note that offsets is a list of pointers
ragged_acts_offsets = ragged_acts_offset + tl.arange(0, BLOCK_SIZE)
# Load ragged acts, since we use a BLOCK_SIZE of d_model, the only out of bounds
# that we can have is if the n_ctx value is too big
acts = tl.load(ragged_acts_ptr + ragged_acts_offsets, mask=ctx_idx_too_large_mask)
# Calculate the offsets for the padded acts
padded_acts_offset = n_ctx_max * seq_idx * BLOCK_SIZE
# Write things back, again masking out the sections that would be garbage
tl.store(padded_acts_ptr + padded_acts_offset, acts, mask=ctx_idx_too_large_mask)
class RaggedActivations:
def __init__(self, raw_tensor: torch.Tensor, n_ctx_per_seq: List[int]):
self.raw_tensor = raw_tensor
self.n_ctx_per_seq = n_ctx_per_seq
@property
def n_seqs(self):
return len(self.n_ctx_per_seq)
@property
def max_n_ctx_per_seq(self):
return max(self.n_ctx_per_seq)
@property
def dtype(self):
return self.raw_tensor.dtype
@property
def device(self):
return self.raw_tensor.device
@classmethod
def from_list(cls, tensors: List[torch.Tensor]):
"""Tensors must all be of shape [n_ctx, d_model]."""
return cls(
raw_tensor=torch.cat(tensors),
n_ctx_per_seq=[tensor.shape[0] for tensor in tensors],
)
def iter_full_tensors(self):
idx_so_far = 0
for n_ctx_in_this_seq in self.n_ctx_per_seq:
yield self.raw_tensor[idx_so_far : idx_so_far + n_ctx_in_this_seq]
idx_so_far += n_ctx_in_this_seq
def to_garbage_padded(self) -> torch.Tensor:
"""
Create a tensor of shape (n_seqs, n_ctx_max, d_model) where the
sequences are right-padded with garbage data
"""
n_seqs = len(self.n_ctx_per_seq)
n_ctx_max = max(self.n_ctx_per_seq)
n_dim = self.raw_tensor.shape[-1]
# TODO: flag use zeros for garbage
padded_acts = torch.zeros(
n_seqs, n_ctx_max, n_dim, dtype=self.raw_tensor.dtype, device="cuda"
)
idx_so_far = 0
for seq_idx, n_ctx_in_this_seq in enumerate(self.n_ctx_per_seq):
this_seq = self.raw_tensor[idx_so_far : idx_so_far + n_ctx_in_this_seq]
padded_acts[seq_idx, :n_ctx_in_this_seq, :] = this_seq
idx_so_far += n_ctx_in_this_seq
return padded_acts
def triton_to_garbage_padded(self) -> torch.Tensor:
"""
Create a tensor of shape (n_seqs, n_ctx_max, d_model) where the
sequences are right-padded with garbage data
"""
n_seqs = len(self.n_ctx_per_seq)
n_ctx_max = max(self.n_ctx_per_seq)
ragged_acts = self.raw_tensor
d_model = ragged_acts.shape[-1]
padded_acts = torch.empty(
n_seqs, n_ctx_max, d_model, dtype=ragged_acts.dtype, device="cuda"
)
# We just use one program per n_ctx position for simplicity
assert d_model >= 128, f"bad {d_model=}"
assert d_model <= 8 * 1024, f"bad {d_model=}"
assert d_model % 32 == 0, f"bad {d_model=}"
# We use numpy here because it's a bit faster
n_ctx_per_seq = self.n_ctx_per_seq
ragged_acts_offset_per_seq = get_acts_offset_per_seq(n_ctx_per_seq)
# The SPMD launch grid denotes the number of kernel instances that run in parallel.
# It is analogous to CUDA launch grids. It can be either Tuple[int], or
# Callable(metaparameters) -> Tuple[int]
#
# In this case, we use a 2D grid where the size is n_ctx
grid_2d = (n_seqs, n_ctx_max)
# NOTE:
# - each torch.tensor object is implicitly converted into a pointer to its
# first element.
# - `triton.jit`'ed functions can be indexed with a launch grid to obtain a
# callable GPU kernel
# [breakpoint()]
garbage_pad_ragged_acts_kernel[grid_2d](
ragged_acts,
torch.tensor(ragged_acts_offset_per_seq, device="cuda"),
torch.tensor(self.n_ctx_per_seq, device="cuda"),
padded_acts,
BLOCK_SIZE=d_model,
n_ctx_max=n_ctx_max,
)
return padded_acts
def get_acts_offset_per_seq(n_ctx_per_seq):
n_ctx_per_seq_shifted = np.array([0] + n_ctx_per_seq[:-1])
ragged_acts_offset_per_seq = n_ctx_per_seq_shifted.cumsum(axis=0)
return ragged_acts_offset_per_seq
"""
# TODO: Build LUT
seq_idx = 1
ctx_idx = 0
ragged_offset = 1
# How to do a list of tensors?
#
# TODO: Add the QK dotprod to get scores
# - Start with a ragged tensor for the keys also
# - Using a list of tensors as the Keys
# - Using sequences
# 16x16x256
# scores [n_seq, n_ctx_keys_max, n_ctx_queries_max]
# final_out [n_seq, n_ctx_keys_max, d_model]
"""
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/garbage_pad_ragged_acts.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from typing import List, Tuple
import torch
from ragged_inference.garbage_pad_ragged_acts import RaggedActivations
class SingleSeqKVCache:
def __init__(self, keys: torch.Tensor, values: torch.Tensor):
# Tensor of shape [2, n_ctx, d_model_per_gpu]
# - keys are cache[0]
# - values are cache[1]
self.raw_keys = keys
self.raw_values = values
@property
def keys(self) -> torch.Tensor:
return self.raw_keys
@property
def values(self) -> torch.Tensor:
return self.raw_values
@property
def n_ctx(self):
return self.raw_values.shape[0]
@property
def d_model_per_gpu(self):
return self.raw_values.shape[-1]
@property
def is_cuda(self):
return self.raw_values.is_cuda
@property
def dtype(self):
return self.raw_values.dtype
def extend_kv_caches(
seq_kv_cache: List[SingleSeqKVCache],
active_keys: RaggedActivations,
active_values: RaggedActivations,
) -> List[SingleSeqKVCache]:
assert seq_kv_cache[0].is_cuda
updated_seq_kv_cache = []
for cache, keys, values in zip(
seq_kv_cache, active_keys.iter_full_tensors(), active_values.iter_full_tensors()
):
# Dim 1 is the context
new_cache = SingleSeqKVCache(
keys=torch.cat([cache.keys, keys], dim=0),
values=torch.cat([cache.values, values], dim=0),
)
updated_seq_kv_cache.append(new_cache)
return updated_seq_kv_cache
def garbage_pad_seq_kv_cache(
seq_kv_cache: List[SingleSeqKVCache],
) -> Tuple[torch.Tensor, torch.Tensor]:
assert seq_kv_cache[0].is_cuda
dtype = seq_kv_cache[0].dtype
n_ctx_per_kv_cache = [seq.n_ctx for seq in seq_kv_cache]
# Create a view so that the output is (n_seqs, n_ctx_max, d_model)
# This should not incur an extra memcopy
n_seqs = len(n_ctx_per_kv_cache)
n_ctx_max = max(n_ctx_per_kv_cache)
padded_keys = torch.empty(
n_seqs,
n_ctx_max,
seq_kv_cache[0].d_model_per_gpu,
dtype=dtype,
device="cuda",
)
padded_values = torch.empty(
n_seqs,
n_ctx_max,
seq_kv_cache[0].d_model_per_gpu,
dtype=dtype,
device="cuda",
)
for seq_idx, seq in enumerate(seq_kv_cache):
padded_keys[seq_idx, : seq.n_ctx, :] = seq.keys
padded_values[seq_idx, : seq.n_ctx, :] = seq.values
return (padded_keys, padded_values)
def garbage_pad_keys(
seq_kv_cache: List[SingleSeqKVCache],
) -> torch.Tensor:
assert seq_kv_cache[0].is_cuda
dtype = seq_kv_cache[0].dtype
n_ctx_per_kv_cache = [seq.n_ctx for seq in seq_kv_cache]
# Create a view so that the output is (n_seqs, n_ctx_max, d_model)
# This should not incur an extra memcopy
n_seqs = len(n_ctx_per_kv_cache)
n_ctx_max = max(n_ctx_per_kv_cache)
padded_keys = torch.empty(
n_seqs,
n_ctx_max,
seq_kv_cache[0].d_model_per_gpu,
dtype=dtype,
device="cuda",
)
for seq_idx, seq in enumerate(seq_kv_cache):
padded_keys[seq_idx, : seq.n_ctx, :] = seq.keys
return padded_keys
@lru_cache(maxsize=1) # Memoize because we repeat this for consecutive resblocks
def _create_indices(n_ctx_per_kv_cache):
"""
We cache this because it requires some substantial CPU work and it's done multiple
times sequentially (once per resblock)
"""
indices_list = []
ragged_idx = 0
max_n_ctx = max(n_ctx_per_kv_cache)
for n_ctx in n_ctx_per_kv_cache:
for idx_into_seq in range(max_n_ctx):
if idx_into_seq < n_ctx:
indices_list.append(ragged_idx)
ragged_idx += 1
else:
indices_list.append(0) # Add a placeholder
return torch.tensor(indices_list, device="cuda")
def calculate_scores_via_qk_dotprod(
seq_kv_cache: List[SingleSeqKVCache], # These have already been extended
active_queries: RaggedActivations,
) -> torch.Tensor:
padded_keys = garbage_pad_keys(seq_kv_cache)
padded_active_queries = active_queries.to_garbage_padded()
return torch.einsum("bkd,bqd->bqk", padded_keys, padded_active_queries)
def scores_via_qk_dotprod(
query: RaggedActivations,
key: RaggedActivations,
) -> torch.Tensor:
padded_query = query.to_garbage_padded()
padded_key = key.to_garbage_padded()
return torch.einsum("bkd,bqd->bqk", padded_key, padded_query)
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/seq_kv_cache.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
import triton.language as tl
from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
# This implements a simple QKt matrix multiplication (non ragged), for reference
# Author: Tom B Brown
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
def get_configs_io_bound():
configs = []
for num_stages in [2, 3, 4, 5, 6]:
for block_m in [16, 32]:
for block_k in [32, 64]:
for block_n in [32, 64, 128, 256]:
num_warps = 2 if block_n <= 64 else 4
configs.append(
triton.Config(
{
"BLOCK_M": block_m,
"BLOCK_K": block_n,
"BLOCK_D": block_k,
},
num_stages=num_stages,
num_warps=num_warps,
)
)
return configs
def get_all_configs():
return [
# basic configs for compute-bound matmuls
triton.Config(
{"BLOCK_M": 128, "BLOCK_K": 256, "BLOCK_D": 32},
num_stages=3,
num_warps=8,
),
triton.Config(
{"BLOCK_M": 256, "BLOCK_K": 128, "BLOCK_D": 32},
num_stages=3,
num_warps=8,
),
triton.Config(
{"BLOCK_M": 256, "BLOCK_K": 64, "BLOCK_D": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_K": 256, "BLOCK_D": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_K": 128, "BLOCK_D": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_K": 64, "BLOCK_D": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_K": 128, "BLOCK_D": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_K": 32, "BLOCK_D": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_K": 32, "BLOCK_D": 32},
num_stages=5,
num_warps=2,
),
] + get_configs_io_bound()
def get_fast_dev_configs():
return [
triton.Config(
{"BLOCK_Q": 64, "BLOCK_K": 32, "BLOCK_D": 32},
num_stages=5,
num_warps=2,
)
]
# fmt: off
@triton.autotune(
# configs=get_all_configs(),
configs=get_fast_dev_configs(),
key=["n_ctx_q", "n_ctx_k", "d_model"],
prune_configs_by={
"early_config_prune": early_config_prune,
"perf_model": estimate_matmul_time,
"top_k": 10,
},
)
@triton.jit
def _kernel(
q_ptr, k_ptr, scores_ptr,
n_ctx_q,
n_ctx_k, # N
d_model,
stride_ctx_q, stride_ctx_k,
stride_d, # Stride along the d_model_per_head dim
stride_out_q, stride_out_k,
BLOCK_Q: tl.constexpr,
BLOCK_K: tl.constexpr,
BLOCK_D: tl.constexpr,
):
# fmt: on
# matrix multiplication
pid = tl.program_id(0)
# Determine the number of blocks in the grid
grid_k = (n_ctx_k + BLOCK_K - 1) // BLOCK_K
pid_q = pid // grid_k
pid_k = pid % grid_k
# do matrix multiplication
rq = pid_q * BLOCK_Q + tl.arange(0, BLOCK_Q)
rq = tl.max_contiguous(tl.multiple_of(rq % n_ctx_q, BLOCK_Q), BLOCK_Q)
rk = pid_k * BLOCK_K + tl.arange(0, BLOCK_K)
rk = tl.max_contiguous(tl.multiple_of(rk % n_ctx_k, BLOCK_K), BLOCK_K)
# Iterate through blocks of the d_model dimension and accumulate values into acc
acc_tile = tl.zeros((BLOCK_Q, BLOCK_K), dtype=tl.float32)
rd = tl.arange(0, BLOCK_D)
q_ptr_tile = q_ptr + (rq[:, None] * stride_ctx_q + rd[None, :] * stride_d)
k_ptr_tile = k_ptr + (rd[:, None] * stride_d + rk[None, :] * stride_ctx_k)
for d_max_offset in range(d_model, 0, -BLOCK_D):
q_tile = tl.load(q_ptr_tile, mask=rd[None, :] < d_max_offset, other=0.0)
k_tile = tl.load(k_ptr_tile, mask=rd[:, None] < d_max_offset, other=0.0)
# In einsum notation, the following does: qd,dk->qk
acc_tile += tl.dot(q_tile, k_tile)
q_ptr_tile += BLOCK_D * stride_d
k_ptr_tile += BLOCK_D * stride_d
acc_tile = acc_tile.to(scores_ptr.dtype.element_ty)
# We rematerialize rq and rk here because it allows them to be deallocated above
# instead of being kept in registers throughout the inner for-loop
rq = pid_q * BLOCK_Q + tl.arange(0, BLOCK_Q)
rk = pid_k * BLOCK_K + tl.arange(0, BLOCK_K)
scores_offset_tile = rq[:, None] * stride_out_q + rk[None, :] * stride_out_k
scores_ptr_tile = scores_ptr + scores_offset_tile
mask = (rq < n_ctx_q)[:, None] & (rk < n_ctx_k)[None, :]
tl.store(scores_ptr_tile, acc_tile, mask=mask)
def qk_dotprod(query, key):
device = query.device
# handle non-contiguous inputs if necessary
if query.stride(0) > 1 and query.stride(1) > 1:
query = query.contiguous()
if key.stride(0) > 1 and key.stride(1) > 1:
key = key.contiguous()
# check constraints
n_ctx_q, d_model = query.shape
n_ctx_k, d_model_k = key.shape
assert d_model == d_model_k, f"{query.shape=} {key.shape=}"
# allocates output
scores_out = torch.empty((n_ctx_q, n_ctx_k), device=device, dtype=query.dtype)
# Stride along the d_model dimension
stride_d = query.stride(1)
assert stride_d == key.stride(1), f"{stride_d=}, {key.stride(1)=}"
# launch kernel
def grid(META):
return (
triton.cdiv(n_ctx_q, META["BLOCK_Q"])
* triton.cdiv(n_ctx_k, META["BLOCK_K"]),
)
_kernel[grid](
query,
key,
scores_out,
n_ctx_q,
n_ctx_k,
d_model,
query.stride(0), # stride_ctx_q
key.stride(0), # stride_ctx_k
stride_d, # stride_d
scores_out.stride(0), # stride_out_q
scores_out.stride(1), # stride_out_k
)
return scores_out
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/triton_v2_qk_dotprod.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
import triton.language as tl
from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
# Credits: this comes directly from the Triton repo, authors are Da Yan and Phil Tillet
# See https://github.com/openai/triton/blob/v2.0/python/triton/ops/matmul.py
# copied here to help with development of new features, to be cleaned up
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
def get_configs_io_bound():
configs = []
for num_stages in [2, 3, 4, 5, 6]:
for block_m in [16, 32]:
for block_k in [32, 64]:
for block_n in [32, 64, 128, 256]:
num_warps = 2 if block_n <= 64 else 4
configs.append(
triton.Config(
{
"BLOCK_M": block_m,
"BLOCK_N": block_n,
"BLOCK_K": block_k,
},
num_stages=num_stages,
num_warps=num_warps,
)
)
return configs
def get_all_configs():
return [
# basic configs for compute-bound matmuls
triton.Config(
{"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32},
num_stages=3,
num_warps=8,
),
triton.Config(
{"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32},
num_stages=3,
num_warps=8,
),
triton.Config(
{"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32},
num_stages=4,
num_warps=4,
),
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32},
num_stages=5,
num_warps=2,
),
] + get_configs_io_bound()
def get_fast_dev_configs():
return [
triton.Config(
{"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32},
num_stages=5,
num_warps=2,
)
]
# fmt: off
@triton.autotune(
# configs=get_all_configs(),
configs=get_fast_dev_configs(),
key=["M", "N", "K"],
prune_configs_by={
"early_config_prune": early_config_prune,
"perf_model": estimate_matmul_time,
"top_k": 10,
},
)
@triton.jit
def _kernel(
A, B, C, M, N, K,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
):
# fmt: on
# matrix multiplication
pid = tl.program_id(0)
# Determine the number of blocks in the grid
grid_n = (N + BLOCK_N - 1) // BLOCK_N
pid_m = pid // grid_n
pid_n = pid % grid_n
# do matrix multiplication
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = tl.arange(0, BLOCK_K)
# pointers
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(K, 0, -BLOCK_K):
a = tl.load(A, mask=rk[None, :] < k, other=0.0)
b = tl.load(B, mask=rk[:, None] < k, other=0.0)
acc += tl.dot(a, b)
A += BLOCK_K * stride_ak
B += BLOCK_K * stride_bk
acc = acc.to(C.dtype.element_ty)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.store(C, acc, mask=mask)
def matmul(a, b):
device = a.device
# handle non-contiguous inputs if necessary
if a.stride(0) > 1 and a.stride(1) > 1:
a = a.contiguous()
if b.stride(0) > 1 and b.stride(1) > 1:
b = b.contiguous()
# checks constraints
assert a.shape[1] == b.shape[0], f"incompatible dimensions, {a.shape=} {b.shape=}"
M, K = a.shape
_, N = b.shape
# allocates output
c = torch.empty((M, N), device=device, dtype=a.dtype)
# launch kernel
def grid(META):
return (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),)
# fmt: off
_kernel[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
)
# fmt: on
return c
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/triton_v2_matmul.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import List, Optional
import torch
import triton
import triton.language as tl
from ragged_inference.garbage_pad_ragged_acts import RaggedActivations
from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
# This implements a ragged attention (batched attention mechanism natively handling different sequence sizes)
# Author: Tom B Brown
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
# TODO: tune these
BLOCK_Q = 16
BLOCK_K = 128
BLOCK_D = 32
def get_fast_dev_configs():
return [
triton.Config(
{"BLOCK_Q": BLOCK_Q, "BLOCK_K": BLOCK_K, "BLOCK_D": BLOCK_D},
num_stages=5,
num_warps=2,
)
]
@triton.autotune(
# configs=get_all_configs(),
configs=get_fast_dev_configs(),
key=["max_n_ctx_q_across_seqs", "max_n_ctx_k_across_seqs", "d_head"],
prune_configs_by={
"early_config_prune": early_config_prune,
"perf_model": estimate_matmul_time,
"top_k": 10,
},
)
@triton.jit
def _qk_dotprod_kernel(
# Pointers to our tensors
q_ptr,
k_ptr,
scores_ptr, # Rectangular output tensor
# Pointers to lookup tables (sometimes referred to as a "lut")
pid_to_in_q_token_offset_ptr,
pid_to_in_k_token_offset_ptr,
pid_to_out_q_block_ptr,
pid_to_out_k_block_ptr,
pid_to_out_seq_idx_ptr,
# Integers
max_n_ctx_q_across_seqs,
max_n_ctx_k_across_seqs,
d_head,
stride_ctx_q,
stride_ctx_k,
stride_out_q,
stride_out_k,
stride_out_seq,
total_ctx_q_across_all_seqs,
total_ctx_k_across_all_seqs,
# These get populated from the triton.Config
BLOCK_Q: tl.constexpr,
BLOCK_K: tl.constexpr,
BLOCK_D: tl.constexpr,
):
"""
Adapted from https://github.com/openai/triton/blob/v2.0/python/triton/ops/matmul.py
"""
# matrix multiplication
pid = tl.program_id(0)
out_q_block = tl.load(pid_to_out_q_block_ptr + pid)
out_k_block = tl.load(pid_to_out_k_block_ptr + pid)
out_seq_idx = tl.load(pid_to_out_seq_idx_ptr + pid)
in_q_token_offset = tl.load(pid_to_in_q_token_offset_ptr + pid)
in_k_token_offset = tl.load(pid_to_in_k_token_offset_ptr + pid)
# Define indices ranges, we follow the triton convention of prefixing
# with "r" to denote a range like "rq" is the range for queries below
rq = in_q_token_offset + tl.arange(0, BLOCK_Q)
rk = in_k_token_offset + tl.arange(0, BLOCK_K)
# Prevent out of bounds reads. It's ok to read garbage data for queries and keys that aren't
# actually used. Their values don't effect any of the outputs.
q_ctx_in_bounds = rq < total_ctx_q_across_all_seqs
k_ctx_in_bounds = rk < total_ctx_k_across_all_seqs
# We will accumulate all the d_head items into acc
acc_tile = tl.zeros((BLOCK_Q, BLOCK_K), dtype=tl.float32)
rd = tl.arange(0, BLOCK_D) # rd indexes into the d_head dimension
# We use broadcasting to convert our 1D ranges into 2D tiles
q_ptr_tile = q_ptr + (rq[:, None] * stride_ctx_q + rd[None, :])
k_ptr_tile = k_ptr + (rd[:, None] + rk[None, :] * stride_ctx_k)
# After we have profiling, see if we can rewrite this to be more readable by
# just updating rd += BLOCK_D
for d_max_offset in range(d_head, 0, -BLOCK_D):
q_tile = tl.load(
q_ptr_tile,
mask=(rd[None, :] < d_max_offset) & q_ctx_in_bounds[:, None],
other=0.0,
)
k_tile = tl.load(
k_ptr_tile,
mask=(rd[:, None] < d_max_offset) & k_ctx_in_bounds[None, :],
other=0.0,
)
# In einsum notation, the tl.dot does: qd,dk->qk
# This should use tensorcores, so the inputs might be fp16, but the outputs
# and all the internal accumulators are fp32
acc_tile += tl.dot(q_tile, k_tile)
q_ptr_tile += BLOCK_D
k_ptr_tile += BLOCK_D
# Figure out the output blocks
rq_out = out_q_block * BLOCK_Q + tl.arange(0, BLOCK_Q)
rk_out = out_k_block * BLOCK_K + tl.arange(0, BLOCK_K)
scores_offset_tile = (
rq_out[:, None] * stride_out_q
+ rk_out[None, :] * stride_out_k
+ out_seq_idx * stride_out_seq
)
scores_ptr_tile = scores_ptr + scores_offset_tile
mask = (rq_out < max_n_ctx_q_across_seqs)[:, None] & (
rk_out < max_n_ctx_k_across_seqs
)[None, :]
# Cast back to lower precision immediately before storing
acc_tile = acc_tile.to(scores_ptr.dtype.element_ty)
tl.store(scores_ptr_tile, acc_tile, mask=mask)
@dataclass
class RaggedQkPidLookupTable:
# TODO: link to a drawing of what these tensors are
# All cuda tensors
pid_to_in_q_token_offset: torch.Tensor
pid_to_in_k_token_offset: torch.Tensor
pid_to_out_q_block: torch.Tensor
pid_to_out_k_block: torch.Tensor
pid_to_out_seq_idx: torch.Tensor
n_pids_total: int
@staticmethod
def from_single_seq(n_ctx_q: int, n_ctx_k: int) -> "RaggedQkPidLookupTable":
grid_q = triton.cdiv(n_ctx_q, BLOCK_Q)
grid_k = triton.cdiv(n_ctx_k, BLOCK_K)
n_pids_total = grid_q * grid_k
pid_to_in_q_token_offset = torch.zeros(
n_pids_total, dtype=torch.int32, device="cuda"
)
pid_to_in_k_token_offset = torch.zeros(
n_pids_total, dtype=torch.int32, device="cuda"
)
pid_to_out_q_block = torch.zeros(n_pids_total, dtype=torch.int32, device="cuda")
pid_to_out_k_block = torch.zeros(n_pids_total, dtype=torch.int32, device="cuda")
pid_to_out_seq_idx = torch.zeros(n_pids_total, dtype=torch.int32, device="cuda")
for pid in range(n_pids_total):
q_block_idx = pid // grid_k
k_block_idx = pid % grid_k
in_q_token_offset = q_block_idx * BLOCK_Q
in_k_token_offset = k_block_idx * BLOCK_K
pid_to_out_q_block[pid] = q_block_idx
pid_to_out_k_block[pid] = k_block_idx
pid_to_in_q_token_offset[pid] = in_q_token_offset
pid_to_in_k_token_offset[pid] = in_k_token_offset
return RaggedQkPidLookupTable(
pid_to_in_q_token_offset=pid_to_in_q_token_offset,
pid_to_in_k_token_offset=pid_to_in_k_token_offset,
pid_to_out_q_block=pid_to_out_q_block,
pid_to_out_k_block=pid_to_out_k_block,
pid_to_out_seq_idx=pid_to_out_seq_idx,
n_pids_total=n_pids_total,
)
@staticmethod
def from_query_and_key_tokens_per_seq(
n_ctx_q_per_seq: List[int],
n_ctx_k_per_seq: List[int],
block_q_override: Optional[int] = None,
block_k_override: Optional[int] = None,
) -> "RaggedQkPidLookupTable":
block_q = block_q_override if block_q_override else BLOCK_Q
block_k = block_k_override if block_k_override else BLOCK_K
pid_to_in_q_token_offset = []
pid_to_in_k_token_offset = []
pid_to_out_q_block = []
pid_to_out_k_block = []
pid_to_out_seq_idx = []
n_in_q_token_so_far = 0
n_in_k_token_so_far = 0
for seq_idx, (n_ctx_q, n_ctx_k) in enumerate(
zip(n_ctx_q_per_seq, n_ctx_k_per_seq)
):
# Everything below is per sequence
n_q_ctx_blocks = triton.cdiv(n_ctx_q, block_q)
n_k_ctx_blocks = triton.cdiv(n_ctx_k, block_k)
n_pids_in_seq = n_q_ctx_blocks * n_k_ctx_blocks
for pid in range(n_pids_in_seq):
q_block_idx = pid // n_k_ctx_blocks
k_block_idx = pid % n_k_ctx_blocks
in_q_token_offset = q_block_idx * block_q
in_k_token_offset = k_block_idx * block_k
pid_to_out_q_block.append(q_block_idx)
pid_to_out_k_block.append(k_block_idx)
pid_to_in_q_token_offset.append(in_q_token_offset + n_in_q_token_so_far)
pid_to_in_k_token_offset.append(in_k_token_offset + n_in_k_token_so_far)
pid_to_out_seq_idx.append(seq_idx)
n_in_q_token_so_far += n_ctx_q
n_in_k_token_so_far += n_ctx_k
args = {"dtype": torch.int32, "device": "cuda"}
return RaggedQkPidLookupTable(
pid_to_in_q_token_offset=torch.tensor(pid_to_in_q_token_offset, **args), # type: ignore
pid_to_in_k_token_offset=torch.tensor(pid_to_in_k_token_offset, **args), # type: ignore
pid_to_out_q_block=torch.tensor(pid_to_out_q_block, **args), # type: ignore
pid_to_out_k_block=torch.tensor(pid_to_out_k_block, **args), # type: ignore
pid_to_out_seq_idx=torch.tensor(pid_to_out_seq_idx, **args), # type: ignore
n_pids_total=len(pid_to_in_q_token_offset),
)
def ragged_single_seq_qk_dotprod(
query: torch.Tensor, key: torch.Tensor, lut: RaggedQkPidLookupTable
) -> torch.Tensor:
assert query.ndim == 2 and key.ndim == 2
device = query.device
# handle non-contiguous inputs if necessary
if query.stride(0) > 1 and query.stride(1) > 1:
query = query.contiguous()
if key.stride(0) > 1 and key.stride(1) > 1:
key = key.contiguous()
# check constraints
n_ctx_q, d_head = query.shape
n_ctx_k, d_head_k = key.shape
assert d_head == d_head_k, f"{query.shape=} {key.shape=}"
# allocates output
scores_out = torch.empty((1, n_ctx_q, n_ctx_k), device=device, dtype=query.dtype)
# Stride along the d_head dimension must be 1
assert query.stride(1) == 1, f"{query.stride(1)}"
assert key.stride(1) == 1, f"{key.stride(1)}"
# pid_to_seq_idx = [0, 0, 1, 2, 2]
grid = (lut.n_pids_total,)
_qk_dotprod_kernel[grid](
q_ptr=query,
k_ptr=key,
scores_ptr=scores_out,
# Lookup tables (sometimes referred to as a "lut")
pid_to_in_q_token_offset_ptr=lut.pid_to_in_q_token_offset,
pid_to_in_k_token_offset_ptr=lut.pid_to_in_k_token_offset,
pid_to_out_q_block_ptr=lut.pid_to_out_q_block,
pid_to_out_k_block_ptr=lut.pid_to_out_k_block,
pid_to_out_seq_idx_ptr=lut.pid_to_out_seq_idx,
# Integers
max_n_ctx_q_across_seqs=n_ctx_q,
max_n_ctx_k_across_seqs=n_ctx_k,
d_head=d_head,
stride_ctx_q=query.stride(0),
stride_ctx_k=key.stride(0),
stride_out_seq=scores_out.stride(0),
stride_out_q=scores_out.stride(1),
stride_out_k=scores_out.stride(2),
total_ctx_q_across_all_seqs=n_ctx_q,
total_ctx_k_across_all_seqs=n_ctx_k,
)
return scores_out.reshape((n_ctx_q, n_ctx_k))
def ragged_qk_dotprod(
query: RaggedActivations, key: RaggedActivations, lut: RaggedQkPidLookupTable
) -> torch.Tensor:
device = query.device
assert query.raw_tensor.is_contiguous()
assert key.raw_tensor.is_contiguous()
# check constraints
total_ctx_q_across_all_seqs, d_head = query.raw_tensor.shape
total_ctx_k_across_all_seqs, d_head_k = key.raw_tensor.shape
assert d_head == d_head_k, f"{query.raw_tensor.shape=} {key.raw_tensor.shape=}"
# allocates output
# max_n_ctx_q_across_seqs = query.max_n_ctx_per_seq
assert query.n_seqs == key.n_seqs
# TODO: flag use zeros for garbage
scores_out = torch.ones(
(query.n_seqs, query.max_n_ctx_per_seq, key.max_n_ctx_per_seq),
device=device,
dtype=query.dtype,
)
# Stride along the d_head dimension must be 1
assert query.raw_tensor.stride(1) == 1, f"{query.raw_tensor.stride(1)}"
assert key.raw_tensor.stride(1) == 1, f"{key.raw_tensor.stride(1)}"
# pid_to_seq_idx = [0, 0, 1, 2, 2]
grid = (lut.n_pids_total,)
_qk_dotprod_kernel[grid](
q_ptr=query.raw_tensor,
k_ptr=key.raw_tensor,
scores_ptr=scores_out,
# Lookup tables (sometimes referred to as a "lut")
pid_to_in_q_token_offset_ptr=lut.pid_to_in_q_token_offset,
pid_to_in_k_token_offset_ptr=lut.pid_to_in_k_token_offset,
pid_to_out_q_block_ptr=lut.pid_to_out_q_block,
pid_to_out_k_block_ptr=lut.pid_to_out_k_block,
pid_to_out_seq_idx_ptr=lut.pid_to_out_seq_idx,
# Integers
max_n_ctx_q_across_seqs=query.max_n_ctx_per_seq,
max_n_ctx_k_across_seqs=key.max_n_ctx_per_seq,
d_head=d_head,
stride_ctx_q=query.raw_tensor.stride(0),
stride_ctx_k=key.raw_tensor.stride(0),
stride_out_seq=scores_out.stride(0),
stride_out_q=scores_out.stride(1),
stride_out_k=scores_out.stride(2),
total_ctx_q_across_all_seqs=total_ctx_q_across_all_seqs,
total_ctx_k_across_all_seqs=total_ctx_k_across_all_seqs,
)
return scores_out
| EXA-1-master | exa/libraries/xformers/experimental/ragged_inference/triton_v2_ragged_qk_dotprod.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import xformers
try:
import timm
from timm.models.vision_transformer import VisionTransformer
except ImportError:
timm = None
VisionTransformer = None
from xformers.helpers.timm_sparse_attention import TimmSparseAttention
_device_list = ["cpu", "cuda:0"] if torch.cuda.is_available() else ["cpu"]
@pytest.mark.skipif(not xformers._is_triton_available(), reason="requires triton")
@pytest.mark.skipif(timm is None, reason="requires timm")
@pytest.mark.parametrize("device", _device_list)
def test_timm_sparse_attention(device):
img_size = 224
patch_size = 16
batch = 8
# Instantiate the reference model
model = VisionTransformer(
img_size=img_size,
patch_size=patch_size,
embed_dim=96,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=torch.nn.LayerNorm,
).to(device)
# Monkey patch all attentions to test the sparse-aware wrap
def replace_attn_with_xformers_one(module, att_mask):
module_output = module
if isinstance(module, timm.models.vision_transformer.Attention):
qkv = module.qkv
dim = qkv.weight.shape[1] * module.num_heads
module_output = TimmSparseAttention(
dim, module.num_heads, attn_mask=att_mask
)
for name, child in module.named_children():
module_output.add_module(
name, replace_attn_with_xformers_one(child, att_mask)
)
del module
return module_output
H, W = img_size // patch_size, img_size // patch_size
mask = (torch.rand((H * W + 1, H * W + 1), device=device) > 0.5).bool()
model = replace_attn_with_xformers_one(model, att_mask=mask)
# Check that we can throw a couple of random pictures at it
inputs = torch.rand((batch, 3, img_size, img_size), device=device)
_ = model(inputs)
| EXA-1-master | exa/libraries/xformers/tests/test_timm_sparse.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components import Activation
from xformers.components.feedforward import FEEDFORWARD_REGISTRY, build_feedforward
from xformers.components.feedforward.mixture_of_experts import GateConfig
from xformers.helpers.test_utils import init_torch_distributed_local
BATCH = 4
SEQ = 256
EMBD = 16
LATENT = 128
DROPOUT = 0.5
DEVICES = (
[torch.device("cpu")] if not torch.cuda.is_available() else [torch.device("cuda")]
)
assert FEEDFORWARD_REGISTRY.keys(), "Feedforward layers should have been registered"
@pytest.mark.parametrize("feedforward_name", FEEDFORWARD_REGISTRY.keys())
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("device", DEVICES)
def test_feedforward(
feedforward_name: str, activation: Activation, device: torch.device
):
test_config = {
"name": feedforward_name,
"dim_model": LATENT,
"dropout": DROPOUT,
"activation": activation,
"hidden_layer_multiplier": 4,
"number_of_experts": 4, # MoE
"gate": "top_2", # MoE
}
if feedforward_name == "MixtureOfExperts":
init_torch_distributed_local()
# dummy, just check construction and dimensions in the FW pass
ffw = build_feedforward(test_config)
if ffw.requires_cuda and not device.type == "cuda":
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip("This MLP requires CUDA and current device does not match")
inputs = torch.rand(BATCH, SEQ, LATENT, device=device)
ffw = ffw.to(device)
_ = ffw(inputs)
def get_expert():
return torch.nn.Linear(LATENT, LATENT, bias=False)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="This test requires CUDA")
@pytest.mark.parametrize("gate", [g.value for g in GateConfig])
@pytest.mark.parametrize("number_of_local_experts", [None, 4])
@pytest.mark.parametrize("expert_constructor", [None, get_expert])
def test_moe(gate, number_of_local_experts, expert_constructor):
test_config = {
"name": "MixtureOfExperts",
"dim_model": LATENT,
"dropout": DROPOUT,
"activation": Activation.ReLU,
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"number_of_local_experts": number_of_local_experts,
"gate": gate,
"expert_constructor": expert_constructor,
}
init_torch_distributed_local()
# dummy, just check construction and dimensions in the FW pass
ffw = build_feedforward(test_config)
inputs = torch.rand(BATCH, SEQ, LATENT, device=torch.device("cuda"))
ffw = ffw.to(torch.device("cuda"))
outputs = ffw(inputs)
loss = torch.sum(outputs)
loss.backward()
| EXA-1-master | exa/libraries/xformers/tests/test_feedforward.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import xformers
from xformers.components import MultiHeadDispatch
from xformers.components.attention import build_attention
from xformers.components.attention.attention_patterns import block_sparsify_tensor
from xformers.triton.utils import get_current_cuda_device
# CREDITS:
# Tests from, very lightly changed
# https://github.com/openai/triton/blob/master/python/test/unit/operators/test_blocksparse.py
# Initially copied here folowing a fork from the matmul kernel
_triton_available = xformers._is_triton_available()
_matmul_types = []
if _triton_available:
try:
import triton
from triton.ops.blocksparse import matmul as blocksparse_matmul
from triton.ops.blocksparse import softmax as blocksparse_softmax
from xformers.components.attention import BlockSparseAttention
from xformers.triton.utils import gpu_capabilities_older_than_70
_triton_available = not gpu_capabilities_older_than_70()
_matmul_types = ["sdd", "dsd", "dds"]
except (ImportError, ModuleNotFoundError) as e:
import logging
logging.warning(f"Triton is not available: {e}. Some tests will be skipped")
_triton_available = False
def mask_tensor(x, mask, block, value=0):
ret = x.clone()
for h, i, j in zip(*(mask == 0).nonzero(as_tuple=True)):
ret[:, h, i * block : (i + 1) * block, j * block : (j + 1) * block] = value
return ret
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.skipif(
not _triton_available or get_current_cuda_device() == "T4",
reason="FIXME - blocksparse matmuls are slightly off on T4s",
)
@pytest.mark.parametrize("MODE", _matmul_types)
@pytest.mark.parametrize("TRANS_A", [False, True])
@pytest.mark.parametrize("TRANS_B", [False, True])
@pytest.mark.parametrize("BLOCK", [16, 32, 64])
@pytest.mark.parametrize("DTYPE", [torch.float16])
def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=32, H=2, M=512, N=384, K=256):
# set seed
torch.random.manual_seed(0)
# create inputs
a = torch.randn(
(Z, H, K, M) if TRANS_A else (Z, H, M, K), dtype=DTYPE, device="cuda"
)
b = torch.randn(
(Z, H, N, K) if TRANS_B else (Z, H, K, N), dtype=DTYPE, device="cuda"
)
shape = {
"sdd": (M, N),
"dsd": (a.shape[2], a.shape[3]),
"dds": (b.shape[2], b.shape[3]),
}[MODE]
layout = torch.randint(2, (H, shape[0] // BLOCK, shape[1] // BLOCK))
# triton result
op = blocksparse_matmul(
layout,
BLOCK,
MODE,
trans_a=TRANS_A,
trans_b=TRANS_B,
device=torch.device("cuda"),
)
ra = block_sparsify_tensor(a, layout, BLOCK) if MODE == "dsd" else a
rb = block_sparsify_tensor(b, layout, BLOCK) if MODE == "dds" else b
rc = triton.testing.catch_oor(lambda: op(ra, rb), pytest)
# torch result
ta = mask_tensor(a, layout, BLOCK) if MODE == "dsd" else a
tb = mask_tensor(b, layout, BLOCK) if MODE == "dds" else b
ta = ta.transpose(2, 3) if TRANS_A else ta
tb = tb.transpose(2, 3) if TRANS_B else tb
tc = torch.matmul(ta, tb)
tc = mask_tensor(tc, layout, BLOCK) if MODE == "sdd" else tc
tc = block_sparsify_tensor(tc, layout, BLOCK) if MODE == "sdd" else tc
# compare
torch.testing.assert_close(rc, tc)
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.parametrize("BLOCK", [32, 128])
@pytest.mark.parametrize("WIDTH", [256, 576, 1024, 1792])
@pytest.mark.parametrize("DTYPE", [torch.float16, torch.float32])
def test_softmax(BLOCK, WIDTH, DTYPE):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 4, WIDTH, WIDTH
scale = 0.4
# create inputs
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
x = torch.randn((Z, H, M, N), dtype=DTYPE, requires_grad=True, device="cuda")
# triton result
op = blocksparse_softmax(layout, BLOCK, device=torch.device("cuda"))
tx = block_sparsify_tensor(x, layout, BLOCK)
ty = op(tx, scale=scale)
# torch result
rx = mask_tensor(x, layout, BLOCK, value=float("-inf"))
rx = rx[:, :, : (M // BLOCK) * BLOCK, : (M // BLOCK) * BLOCK]
ry = torch.softmax(rx * scale, -1)
ry = block_sparsify_tensor(ry, layout, BLOCK)
# compare
torch.testing.assert_close(ry, ty)
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.parametrize("block", [32, 43, 128]) # 16, 32,
@pytest.mark.parametrize("dtype", [torch.float16])
def test_attention_fwd_bwd(
block,
dtype,
input_scale=1.0,
scale=1 / 8.0,
n_ctx=384,
batch_size=2,
n_heads=2,
):
# inputs
head_dim = 64
qkv_shape = (batch_size, n_heads, n_ctx, head_dim)
qkvs = [
torch.nn.Parameter(input_scale * torch.randn(qkv_shape), requires_grad=True)
.to(dtype)
.cuda()
for _ in range(3)
]
def loss_fn(x):
return (x**2).mean()
# Triton:
n_blocks = n_ctx // block
layout = torch.ones([n_heads, n_blocks, n_blocks], dtype=torch.long)
query, key, value = [x.clone() for x in qkvs]
query.retain_grad()
key.retain_grad()
value.retain_grad()
if block not in [16, 32, 64, 128]:
# Check that unsupported dimensions are caught
with pytest.raises(AssertionError):
_ = BlockSparseAttention(layout, block)
else:
block_sparse_attention = BlockSparseAttention(layout, block)
attn_out = block_sparse_attention(q=query, k=key, v=value, scale=scale)
# ad hoc loss
loss = loss_fn(attn_out)
loss.backward()
grads = [query.grad, key.grad, value.grad]
# Torch version:
torch_q, torch_k, torch_v = [x.clone() for x in qkvs]
torch_q = torch_q * scale
torch_q.retain_grad()
torch_k.retain_grad()
torch_v.retain_grad()
scores = scale * torch.einsum("bhsd,bhtd->bhst", torch_q, torch_k)
probs = torch.softmax(scores, dim=-1)
torch_attn_out = torch.einsum("bhst,bhtd->bhsd", probs, torch_v)
# ad hoc loss
torch_loss = loss_fn(torch_attn_out)
torch_loss.backward()
torch_grads = [torch_q.grad, torch_k.grad, torch_v.grad]
# comparison
torch.testing.assert_close(
loss, torch_loss, msg=f"Triton loss {loss} and torch loss {torch_loss}"
)
for g1, g2 in zip(grads, torch_grads):
torch.testing.assert_close(
torch.norm(g1),
torch.norm(g2),
msg=f"Triton grad {torch.norm(g1).item()} and torch grad {torch.norm(g2).item()}",
)
@pytest.mark.skipif(not _triton_available, reason="Triton requires a recent CUDA gpu")
@pytest.mark.parametrize("dtype", [torch.float16])
def test_blocksparse_attention_parity(dtype):
def _reset_seeds():
torch.manual_seed(0)
seq = 64
model = 128
heads = 4
block_size = 16
batch_size = 2
batched_dim = heads * batch_size
dim_head = model // heads
test_config = {
"dropout": 0.0,
"causal": False,
"seq_len": seq,
"num_heads": 4,
"dim_head": dim_head,
"block_size": block_size,
"layout": torch.ones(seq // block_size, seq // block_size, dtype=torch.long),
}
inputs = torch.rand(batched_dim, seq, model, device="cuda", dtype=dtype)
_reset_seeds()
test_config["name"] = "scaled_dot_product"
attention_sdp = build_attention(test_config)
multi_head_sdp = MultiHeadDispatch(
seq_len=seq,
dim_model=model,
residual_dropout=0.0,
num_heads=heads,
attention=attention_sdp,
).to(device=torch.device("cuda"), dtype=dtype)
r_sdp = multi_head_sdp(inputs, inputs, inputs)
_reset_seeds()
test_config["name"] = "blocksparse"
attention_blocksparse = build_attention(test_config)
multi_head_blocksparse = MultiHeadDispatch(
seq_len=seq,
dim_model=model,
residual_dropout=0.0,
num_heads=heads,
attention=attention_blocksparse,
).to(device=torch.device("cuda"), dtype=dtype)
r_blocksparse = multi_head_blocksparse(inputs, inputs, inputs)
torch.testing.assert_close(r_sdp, r_blocksparse, atol=5e-5, rtol=6e-3)
| EXA-1-master | exa/libraries/xformers/tests/test_triton_blocksparse.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
import xformers
from xformers.components import Activation, build_activation
_gpu_available = torch.cuda.is_available()
_triton_available = xformers._is_triton_available()
if _triton_available:
try:
import triton # noqa: F401
from xformers.triton import dropout as triton_dropout
from xformers.triton.dropout import FusedDropoutBias
from xformers.triton.utils import gpu_capabilities_older_than_70
_triton_available = True
except ImportError:
logging.warning(
"Triton is not available, some optimizations will not be tested."
)
_triton_available = False
# Testing odd (non-power-of-two for instance) shapes on purpose
SHAPES = [
(384, 512),
(8, 384, 128),
(8, 784, 512),
(4, 16, 384),
(4, 16, 1024),
(2, 16, 2048),
(2, 16, 4096),
(1, 16, 12288),
]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
def test_dropout_cpu():
triton_dropout = FusedDropoutBias(p=0.1, bias_shape=None)
x = torch.normal(0, 1, size=(16, 16), device="cpu")
_ = triton_dropout(x)
# Check eval means no dropout
triton_dropout.eval()
y = triton_dropout(x)
assert y.count_nonzero() == y.numel()
triton_dropout.train()
y = triton_dropout(x)
assert y.count_nonzero() != y.numel()
@pytest.mark.skipif(not _gpu_available, reason="GPU is not available")
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [False, True])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("p", [0, 0.1, 0.5])
def test_dropout(shape, amp, bias, p):
"""
Check some basic dropout properties
"""
torch.random.manual_seed(0)
torch.cuda.manual_seed_all(0)
x = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b = (
torch.normal(0, 1, size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
with autocast(enabled=amp):
tol = 1e-2 if amp else 1e-5 # AMP rounding causes issues, 1e-5 is the default
# Check that 0 means no dropout
y = triton_dropout(x, p=0, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert torch.allclose(x_ref, y, rtol=tol), f"{x[x>y]}"
# Check that 1 means drop all
y = triton_dropout(x, p=1, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert torch.allclose(torch.zeros_like(y), y, rtol=tol)
# Check that .99 means probably dropout
y = triton_dropout(x, p=0.99, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert not torch.allclose(x_ref, y, rtol=tol)
# Check that the drops are different for every row (could catch broken seeds per row)
y = triton_dropout(x, p=0.5)
y = y.flatten(0, 1) if y.ndim == 3 else y
assert not torch.sum(torch.eq(y[0, :] == 0.0, y[1, :] == 0.0)) == y.shape[1]
# Check that the drops are different over time, for the same line
y_a = triton_dropout(x, p=0.5)
y_b = triton_dropout(x, p=0.5)
y_a = y_a.flatten(0, 1) if y_a.ndim == 3 else y_a
y_b = y_b.flatten(0, 1) if y_b.ndim == 3 else y_b
assert (
not torch.sum(torch.eq(y_a[0, :] == 0.0, y_b[0, :] == 0.0)).item()
== y.shape[1]
)
# Check that the drop probability is about right
y = triton_dropout(x, p=p)
drop_p = (y.numel() - y.count_nonzero()) / y.numel()
assert abs(drop_p - p) < 0.02
# Check that the same seeds lead to the same dropout
torch.manual_seed(0)
torch.cuda.manual_seed(0)
y_1 = triton_dropout(x, p=0.5)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
y_2 = triton_dropout(x, p=0.5)
torch.testing.assert_close(y_1, y_2)
@pytest.mark.skipif(not _gpu_available, reason="GPU is not available")
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [False, True])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("p", [0, 0.01, 0.5])
def test_dropout_parity(shape, amp, bias, activation, p):
"""
Check some basic dropout properties
"""
torch.random.manual_seed(0)
x = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b = (
torch.ones(size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
torch.random.manual_seed(0)
x_ = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b_ = (
torch.ones(size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
with autocast(enabled=amp):
torch_activation = build_activation(activation)
res_torch = torch.nn.functional.dropout(
torch_activation(x + b if b is not None else x), p=p
)
loss_torch = torch.sum(res_torch)
res_triton = triton_dropout(x=x_, p=p, bias=b_, activation=activation)
loss_triton = torch.sum(res_triton)
if p < 0.01:
# Check the FW pass
assert torch.allclose(
loss_torch, loss_triton, rtol=0.01
), f"{loss_torch} - {loss_triton}"
# Check the gradients
loss_torch.backward()
loss_triton.backward()
# - gradients wrt inputs
assert torch.allclose(
torch.norm(x.grad), torch.norm(x_.grad), rtol=0.01
), f"{x.grad}\n{x_.grad}"
# - gradients wrt bias
if bias:
assert torch.allclose(
torch.norm(b.grad), torch.norm(b_.grad), rtol=0.01
), f"{b.grad.norm()} - {b_.grad.norm()}"
| EXA-1-master | exa/libraries/xformers/tests/test_triton_dropout.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
import pytest
import torch
from xformers.components.attention import FavorAttention, ScaledDotProduct
from xformers.components.attention.feature_maps import (
FeatureMapType,
NormDistribution,
SMHyperbolic,
SMOrf,
SMReg,
)
_device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
@pytest.mark.parametrize("features", [SMOrf, SMHyperbolic, SMReg])
def test_random_matrix(features):
torch.random.manual_seed(0)
DRAWS = 100
DIM = 10
for _ in range(DRAWS):
q = features._get_random_ortho_matrix(
1, DIM, device=_device, norm_distribution=NormDistribution.Xi
).squeeze(0)
# Check that the matrix is indeed orthonormal
torch.allclose(
torch.diag(q @ q.transpose(0, 1)),
torch.diag(torch.ones(10, device=_device)),
)
# Check that the row norm is in the right ballpark (sqrt(dim))
assert abs(torch.mean(torch.norm(q, dim=1)).item() - math.sqrt(DIM)) < 1.0
def _plot_distribution(ortho_feature_map):
# Debug helper, check the uniformity of the random matrix draws
DRAWS = 1000
DIM = 50
q = ortho_feature_map._get_random_ortho_matrix(DRAWS, DIM, device=_device)
x, y = [], []
for qq in q:
# For every matrix, look at the real and imaginary eigen value
e = torch.linalg.eigvals(qq)
x.append(e.real)
y.append(e.imag)
# Ideally the repartition of the real and imaginary eigenvalues
# should build a circle in the complex plane
import matplotlib.pyplot as plt
import seaborn as sns
sns.kdeplot(x=torch.cat(x).cpu().numpy(), y=torch.cat(y).cpu().numpy())
plt.axis("equal")
plt.savefig("kde.png")
def _get_rng_data(device):
emb = 10
batch_size = 2
seq_len = 20
num_heads = 1
shape = (batch_size * num_heads, seq_len, emb)
return torch.randn(shape, device=device)
def test_feature_map_shape():
# Check the delayed initialization of the feature map
nb_random_features = 1000
batch = _get_rng_data(_device)
att = FavorAttention(
dropout=0.0,
dim_features=nb_random_features,
feature_map_type=FeatureMapType.SMOrf,
)
_ = att(batch, batch, batch)
assert att.feature_map.features.shape[0] == batch.shape[-1]
assert att.feature_map.features.shape[1] == nb_random_features
def test_feature_map_redraw():
# Check the delayed initialization of the feature map
nb_random_features = 1000
batch = _get_rng_data(_device)
def check(should_redraw: bool):
att = FavorAttention(
dropout=0.0,
dim_features=nb_random_features,
feature_map_type=FeatureMapType.SMOrf,
iter_before_redraw=1 if should_redraw else 100,
)
v0 = att(batch, batch, batch)
assert att.feature_map is not None
f0 = att.feature_map.features
v1 = att(batch, batch, batch)
f1 = att.feature_map.features
# There should not have been a redraw after v0
assert should_redraw != torch.allclose(v0, v1)
assert should_redraw != torch.allclose(f0, f1) # type: ignore
check(should_redraw=True)
check(should_redraw=False)
@pytest.mark.parametrize("feature", ["sm_orf", "sm_hyp", "sm_reg"])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("normalize_inputs", [True, False])
@pytest.mark.parametrize("device", [_device])
def test_favor_approximation_accuracy(feature, causal, normalize_inputs, device):
# Run two attentions in parallel, the normal scaled dot product and the favor approximation
torch.random.manual_seed(0)
query, key, value = (
_get_rng_data(device),
_get_rng_data(device),
_get_rng_data(device),
)
for x in (query, key, value):
x.requires_grad = True
# Build the two attention heads
sdp_attention = ScaledDotProduct(dropout=0.0, causal=causal).to(device)
approx_attention = FavorAttention(
dropout=0.0,
causal=causal,
dim_head=10,
feature_map_type=FeatureMapType(feature),
normalize_inputs=normalize_inputs,
).to(device)
with torch.cuda.amp.autocast(enabled=_device.type == "cuda"):
standard_attention_result = sdp_attention(query, key, value)
approx_attention_result = approx_attention(query, key, value)
mismatch = torch.mean(
(standard_attention_result - approx_attention_result) ** 2
).item()
if causal:
# FIXME(@lefaudeux) the causal case seems significantly worse, not obvious why,
# could be worth investigating
assert mismatch < 0.6
else:
assert mismatch < 0.23
# Check trainability
torch.sum(approx_attention_result).backward()
if __name__ == "__main__":
_plot_distribution(SMOrf)
| EXA-1-master | exa/libraries/xformers/tests/test_favor.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
import xformers
try:
from xformers.triton import FusedLayerNorm
from xformers.triton.utils import gpu_capabilities_older_than_70
_triton_available = xformers._is_triton_available()
except ImportError:
logging.warning("Triton is not available, some optimizations will not be tested.")
_triton_available = False
# Testing odd shapes on purpose
SHAPES = [
(384, 128),
(8, 384, 128),
(8, 784, 512),
(4, 2048, 384),
(4, 3136, 1024),
(2, 1024, 2048),
(2, 2048, 4096),
(2, 4096, 4096),
(1, 2048, 12288),
]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [True, False])
def test_layernorm_parity(shape, amp):
"""Check that PyTorch and Triton softmax give the same result"""
# Get the same inputs
torch.random.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
torch.random.manual_seed(0)
X_ = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
eps = 1e-4
# Initialize the two layers, weights are 1 and 0 by default, no randomness
torch_layernorm = torch.nn.LayerNorm(X.shape[-1], eps=eps).to("cuda")
triton_layernorm = FusedLayerNorm(X.shape[-1], affine=True, eps=eps).to("cuda")
with autocast(enabled=amp):
assert torch.allclose(X, X_) # sanity checking, else all hell breaks loose
# Check the forward pass
y_torch = torch_layernorm(X)
y_triton = triton_layernorm(X_)
assert torch.allclose(
y_torch.norm(), y_triton.norm(), atol=1e-3
), f"{torch.norm(y_torch)} vs. {torch.norm(y_triton)}"
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton)
loss_triton.backward()
print(torch.norm(y_torch), torch.norm(y_triton))
print(y_torch[0, :])
print(y_triton[0, :])
# There are 3 items to check:
# - gradient on the inputs
assert torch.allclose(
X.grad, X_.grad
), f"Inputs grad mismatch: {torch.norm(X.grad)} vs. {torch.norm(X_.grad)}"
# - gradient on the layernorm weight
assert torch.allclose(
torch_layernorm.weight.grad, triton_layernorm.weight.grad, atol=1e-3
), (
f"Weight grad mismatch: {torch.norm(torch_layernorm.weight.grad)} vs."
+ f" {torch.norm(triton_layernorm.weight.grad)}"
)
# - gradient on the layernorm bias
assert torch.allclose(
torch_layernorm.bias.grad, triton_layernorm.bias.grad, atol=1e-3
), (
f"Bias grad mismatch: {torch.norm(torch_layernorm.bias.grad)} vs."
+ f" {torch.norm(triton_layernorm.bias.grad)}"
)
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32, torch.bfloat16])
def test_no_contiguous(dtype):
"""Check that we don't choke on non-contigous tensors"""
shape = (8, 384, 128)
# Get the same inputs
torch.random.manual_seed(0)
torch.cuda.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True, dtype=dtype)
X = X.transpose(2, 1).contiguous().transpose(2, 1)
assert not X.is_contiguous()
triton_layernorm = FusedLayerNorm(X.shape[-1]).to(device="cuda", dtype=dtype)
_ = triton_layernorm(X)
| EXA-1-master | exa/libraries/xformers/tests/test_triton_layernorm.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.factory import xFormer, xFormerConfig
from xformers.helpers.hierarchical_configs import (
BasicLayerConfig,
get_hierarchical_configuration,
)
BATCH = 20
SEQ = 512
MODEL = 384
def test_hierarchical_transformer():
image_size = 32
base_hierarchical_configs = [
BasicLayerConfig(
embedding=64,
attention_mechanism="pooling",
patch_size=7,
stride=4,
padding=2,
seq_len=image_size * image_size // 16,
feedforward="MLP",
),
BasicLayerConfig(
embedding=128,
attention_mechanism="pooling",
patch_size=3,
stride=2,
padding=1,
seq_len=image_size * image_size // 64,
feedforward="MLP",
repeat_layer=2,
),
BasicLayerConfig(
embedding=320,
attention_mechanism="scaled_dot_product",
patch_size=3,
stride=2,
padding=1,
seq_len=image_size * image_size // 256,
feedforward="MLP",
),
]
# Fill in the gaps in the config
xformer_config = get_hierarchical_configuration(
base_hierarchical_configs,
residual_norm_style="pre",
use_rotary_embeddings=False,
mlp_multiplier=4,
dim_head=32,
)
config = xFormerConfig(xformer_config)
hierarchical_xformer = xFormer.from_config(config)
# Forward some dummy data
dummy = torch.rand((2, 3, image_size, image_size))
_ = hierarchical_xformer(dummy)
| EXA-1-master | exa/libraries/xformers/tests/test_hierarchical_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention.utils import (
maybe_merge_masks,
reshape_key_padding_mask,
)
def test_reshape_key_padding_mask():
batch_size = 2
num_heads = 2
seq_len = 4
batched_dim = batch_size * num_heads
key_padding_mask = torch.randint(0, 2, (batch_size, seq_len)).to(dtype=torch.bool)
reshaped_mask = reshape_key_padding_mask(
key_padding_mask=key_padding_mask, batched_dim=batched_dim
)
assert reshaped_mask.size() == (batched_dim, 1, seq_len)
merged_mask = maybe_merge_masks(
att_mask=None,
key_padding_mask=key_padding_mask,
batch_size=batch_size,
src_len=seq_len,
num_heads=num_heads,
)
assert torch.equal(merged_mask, reshaped_mask.expand(-1, seq_len, -1))
key_padding_mask = torch.randint(0, 2, (batched_dim, seq_len)).to(dtype=torch.bool)
reshaped_mask = reshape_key_padding_mask(
key_padding_mask=key_padding_mask, batched_dim=batched_dim
)
assert reshaped_mask.size() == (batched_dim, 1, seq_len)
| EXA-1-master | exa/libraries/xformers/tests/test_attention_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
import xformers
try:
from xformers.triton import log_softmax as triton_log_softmax
from xformers.triton import softmax as triton_softmax
_triton_available = xformers._is_triton_available()
except ImportError as e:
logging.warning(
f"Triton is not available, some optimizations will not be tested.\n{e}"
)
_triton_available = False
SHAPES = [
(384, 384),
(2, 384, 384),
(1, 784, 784),
(1, 1024, 1024),
(1, 2048, 2048),
(1, 3136, 3136),
(1, 4096, 4096),
(2, 2, 384, 384),
(2, 2, 2, 384, 384),
]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("amp", [False, True])
@pytest.mark.parametrize("log", [False, True])
@pytest.mark.parametrize("masking", [True, False])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
def test_softmax_parity(shape, amp, log, masking, causal, contiguous):
"""Check that PyTorch and Triton softmax give the same result"""
torch.random.manual_seed(0)
# Check the result of a FW pass
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=False)
if not contiguous:
# Make sure that the buffer is not contiguous
X = X.transpose(-2, -1).contiguous().transpose(-2, -1)
X_ = X.clone()
X.requires_grad = True
X_.requires_grad = True
seq = shape[-1]
mask = torch.zeros((seq, seq)).cuda()
if masking:
mask[torch.rand((seq, seq)) > 0.8] = -float("inf")
mask_triton = mask.clone() if masking else None
if causal:
mask[~torch.tril(torch.ones_like(mask)).bool()] = -float("inf")
with autocast(enabled=amp):
y_torch = (
torch.log_softmax(X + mask, dim=-1)
if log
else torch.softmax(X + mask, dim=-1)
)
y_triton = (
triton_log_softmax(X_, mask_triton, causal)
if log
else triton_softmax(X_, mask_triton, causal)
)
assert torch.allclose(y_torch, y_triton, equal_nan=True)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch.transpose(-2, -1) @ y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton.transpose(-2, -1) @ y_triton)
loss_triton.backward()
assert torch.allclose(
torch.norm(X.grad), torch.norm(X_.grad), equal_nan=True, atol=1e-5
), f"{torch.norm(X.grad)}, {torch.norm(X_.grad)}"
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32, torch.bfloat16])
def test_softmax(dtype):
b, s, d = 8, 64, 32
a = torch.rand(b, s, d, device="cuda", dtype=dtype)
triton_softmax(a)
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.parametrize("log", [False, True])
@pytest.mark.parametrize("masking", [True, False])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_softmax_parity_fallback(log, masking, causal, contiguous, device):
"""Check that the fallback paths are correct"""
torch.random.manual_seed(0)
shape = (16, 16)
# Check the result of a FW pass
X = torch.normal(0, 1, size=shape, device=device, requires_grad=False)
if not contiguous:
# Make sure that the buffer is not contiguous
X = X.transpose(-2, -1).contiguous().transpose(-2, -1)
X_ = X.clone()
X.requires_grad = True
X_.requires_grad = True
seq = shape[1]
mask = torch.zeros((seq, seq), device=device)
if masking:
mask[torch.rand((seq, seq), device=device) > 0.8] = -float("inf")
mask_causal = torch.zeros_like(mask)
if causal:
mask_causal[~torch.tril(torch.ones_like(mask)).bool()] = -float("inf")
y_torch = (
torch.log_softmax(X + mask + mask_causal, dim=-1)
if log
else torch.softmax(X + mask + mask_causal, dim=-1)
)
y_triton = (
triton_log_softmax(X_, mask, causal)
if log
else triton_softmax(X_, mask, causal)
)
assert torch.allclose(y_torch, y_triton, equal_nan=True)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch.transpose(-2, -1) @ y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton.transpose(-2, -1) @ y_triton)
loss_triton.backward()
assert torch.allclose(
torch.norm(X.grad), torch.norm(X_.grad), equal_nan=True, atol=1e-5
), f"{torch.norm(X.grad)}, {torch.norm(X_.grad)}"
| EXA-1-master | exa/libraries/xformers/tests/test_triton_softmax.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/xformers/tests/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import functools
import random
from contextlib import nullcontext
from typing import ContextManager, Optional, Sequence, cast
import pytest
import torch
import xformers.ops.swiglu_op as xsw
torch.backends.cuda.matmul.allow_tf32 = False
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
if torch.cuda.is_available():
_devices = ["cuda"]
_is_sm80 = torch.cuda.get_device_capability(_devices[0])[0] >= 8
else:
_devices = []
_is_sm80 = False
sm80_only = pytest.mark.skipif(not _is_sm80, reason="requires sm80")
def assert_allclose(
# The output of the tested function
out: torch.Tensor,
# The output of the reference implementation
ref: torch.Tensor,
# The output of the reference implementation in f32
ref32: Optional[torch.Tensor] = None,
msg: str = "failed",
atol: Optional[float] = None,
rtol: Optional[float] = None,
) -> None:
"""
Improved version of
```
assert torch.allclose(out, ref)
```
Except that we provide useful error message, and also compare
to the output of the f32 calculation.
"""
out = out.float()
ref = ref.float()
if atol is None:
atol = 1e-8
if rtol is None:
rtol = 1e-5
assert out.shape == ref.shape
compare_to = ref32 if ref32 is not None else ref
assert out.shape == compare_to.shape
if torch.allclose(out, ref, rtol=rtol, atol=atol) or (
ref32 is not None and torch.allclose(out, ref32, rtol=rtol, atol=atol)
):
return
flatten_diff = ((out - compare_to).abs() - atol - compare_to.abs() * rtol).flatten()
max_pos = flatten_diff.argmax()
if ref32 is not None:
flatten_diff_vsf32 = ((ref - ref32).abs() - atol - ref32.abs() * rtol).flatten()
max_pos_vsf32 = flatten_diff_vsf32.argmax()
assert False, (
f"{msg}: "
f"out={out.flatten()[max_pos]} and ref32={compare_to.flatten()[max_pos]} (diff={flatten_diff[max_pos]} > 0)"
f"/ atol={atol}, rtol={rtol}.\n"
f"NOTE: ref vs ref_f32:\n"
f"ref={ref.flatten()[max_pos_vsf32]} and ref32={ref32.flatten()[max_pos_vsf32]} "
f"(diff={flatten_diff_vsf32[max_pos_vsf32]})"
)
else:
assert False, (
f"{msg}: "
f"out={out.flatten()[max_pos]} and ref={compare_to.flatten()[max_pos]} (diff={flatten_diff[max_pos]} > 0)"
f"/ atol={atol}, rtol={rtol}"
)
def generate_test_shapes():
shapes = [
# Format: [inp.shape[0], inp.shape[1], hidden.shape[1]]
# ViT-Giant
(9456, 1536, 2736),
(4440, 1536, 2736),
(4728, 1536, 2736),
# GPT-3 (small)
(2048, 2048, 5632),
# Chinchilla
(2048, 8192, 22016),
]
# Add some random shapes
r = random.Random(0)
for _ in range(20):
shapes.append(
(r.randint(1, 1000) * 8, r.randint(1, 1000) * 8, r.randint(1, 512) * 8)
)
return shapes
_test_shapes = list(generate_test_shapes())
_test_shapes_ids = [str(s) for s in _test_shapes]
_dtypes = [torch.bfloat16, torch.float16]
_ops: Sequence[xsw.SwiGLUOp] = [xsw.SwiGLUFusedOp, xsw.SwiGLUPackedFusedOp]
@functools.lru_cache(maxsize=1)
def create_module_cached(**kwargs) -> xsw.SwiGLU:
return xsw.SwiGLU(**kwargs)
@pytest.mark.parametrize("autocast", [False, True], ids=["regular", "autocast"])
@pytest.mark.parametrize("op", _ops, ids=[x.NAME for x in _ops])
@pytest.mark.parametrize("dtype", _dtypes, ids=[str(x) for x in _dtypes])
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("bias", [False, True], ids=["nobias", "bias"])
@pytest.mark.parametrize("pack_weights", [False, True], ids=["regular", "packed"])
@pytest.mark.parametrize(
"shape",
_test_shapes,
ids=_test_shapes_ids,
)
def test_forward_backward(
shape,
device,
op,
dtype,
autocast: bool,
pack_weights: bool,
bias: bool,
):
torch.manual_seed(shape[0] * shape[1] * shape[2])
FORWARD_ATOL = {torch.float: 2e-6, torch.half: 1e-2, torch.bfloat16: 1e-2}
FORWARD_RTOL = {torch.float: 1e-5, torch.half: 4e-3, torch.bfloat16: 4e-3}
BACKWARD_ATOL = {
torch.float: 3e-4,
torch.half: 0.5,
torch.bfloat16: 4.0, # !!
}
BACKWARD_RTOL = {
torch.float: 2e-3,
torch.half: 1e-2,
torch.bfloat16: 4e-2,
}
if not op.supports(
xsw.SwiGLUOpDispatch(
device=device,
dtype=dtype,
dtype_autocast_gpu=dtype if autocast and device == "cuda" else None,
packed_weights=pack_weights,
bias_enabled=bias,
)
):
pytest.skip("Not supported by operator")
inp_model_dtype = torch.float if autocast else dtype
x = torch.randn(shape[:2], device=device, dtype=inp_model_dtype)
module = copy.deepcopy(
create_module_cached(
in_features=shape[1],
hidden_features=shape[2],
bias=bias,
_pack_weights=pack_weights,
)
)
x_f32: Optional[torch.Tensor]
ref_f32: Optional[torch.Tensor]
module_f32: Optional[torch.nn.Module]
if dtype != torch.float:
x_f32, module_f32 = x.to(device).to(torch.float), module.to(device)
x_f32.requires_grad_()
ref_f32 = module_f32(x_f32)
else:
x_f32, module_f32, ref_f32 = None, None, None
x, module = x.to(device).to(inp_model_dtype), module.to(device).to(inp_model_dtype)
x.requires_grad_()
# Forward
cm = cast(
ContextManager,
torch.autocast("cuda", dtype=dtype) if autocast else nullcontext(),
)
with cm:
ref = module(x)
out = xsw.swiglu(x, *module._ordered_params(), op=op)
if ref_f32 is None:
ref_f32 = ref
assert_allclose(
out, ref, ref_f32, "fw", atol=FORWARD_ATOL[dtype], rtol=FORWARD_RTOL[dtype]
)
# Backward
grad = torch.randn_like(ref)
def backward_gather_grads(inp, output):
output.backward(grad.to(output.dtype))
grads = {}
for name, param in module.named_parameters():
grads[name] = param.grad.clone()
param.grad = None
grads["x"] = inp.grad.clone()
inp.grad = None
return grads
grads_ref = backward_gather_grads(x, ref)
grads_out = backward_gather_grads(x, out)
grads_ref32 = (
backward_gather_grads(x_f32, ref_f32) if module_f32 is not None else grads_ref
)
assert list(grads_ref.keys()) == list(grads_out.keys())
for name, gref in grads_ref.items():
gout = grads_out[name]
assert_allclose(
gout,
gref,
grads_ref32.get(name),
f"{name}.grad",
atol=BACKWARD_ATOL[dtype],
rtol=BACKWARD_RTOL[dtype],
)
# Ensure `gout >> atol`, so that the test is meaningful
assert gout.norm(2) > BACKWARD_ATOL[dtype] / BACKWARD_RTOL[dtype]
| EXA-1-master | exa/libraries/xformers/tests/test_swiglu.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import pytest
import torch
import xformers.components.attention.attention_patterns as AP
from xformers.components.attention.sparsity_config import (
BigBirdSparsityConfig,
BSLongformerSparsityConfig,
DenseSparsityConfig,
FixedSparsityConfig,
VariableSparsityConfig,
)
# baseline implementations
def _local_1d_pattern(attn_size: int, window_size: int) -> torch.Tensor:
assert (
window_size % 2 == 1
), "The window size is assumed to be odd (counts self-attention + 2 wings)"
h_win_size = window_size // 2
attn_shape = (attn_size, attn_size)
full_attn = torch.ones(attn_shape, dtype=torch.bool)
mask = torch.tril(full_attn, diagonal=h_win_size)
mask &= ~torch.tril(full_attn, diagonal=-(h_win_size + 1))
return mask
def _generate_2d_grid(H, W):
i = torch.arange(H)
j = torch.arange(W)
i, j = torch.meshgrid(i, j)
return i, j
def _horizontal_axial_2d_distance(H, W, p=2.0):
i, _ = _generate_2d_grid(H, W)
ij = i.reshape(-1, 1).float()
d = torch.cdist(ij, ij, p=p)
return d
def _vertical_axial_2d_distance(H, W, p=2.0):
_, j = _generate_2d_grid(H, W)
ij = j.reshape(-1, 1).float()
d = torch.cdist(ij, ij, p=p)
return d
def _local_2d_distance(H, W, p=2.0):
# axial is a special case with p=0 and distance=2
i, j = _generate_2d_grid(H, W)
ij = torch.stack([i.flatten(), j.flatten()], 1).float()
d = torch.cdist(ij, ij, p=p)
return d
def _local_2d_gaussian_distribution(H, W, sigma=1.0):
d = _local_2d_distance(H, W, p=2.0) ** 2
d = torch.exp(-0.5 * sigma ** (-2.0) * d)
return d
@pytest.mark.parametrize("window_size", [3, 7, 11])
@pytest.mark.parametrize("attn_size", [50, 51, 64])
def test_local_1d_pattern(attn_size, window_size):
mask = AP.local_1d_pattern(attn_size, window_size).float()
mask_ref = _local_1d_pattern(attn_size, window_size).float()
assert torch.allclose(mask, mask_ref)
@pytest.mark.parametrize("p", [0, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_horizontal_axial_2d_distance(H, W, p):
d = AP.horizontal_axial_2d_distance(H, W, p=p)
d_ref = _horizontal_axial_2d_distance(H, W, p=p)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("p", [0, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_vertical_axial_2d_distance(H, W, p):
d = AP.vertical_axial_2d_distance(H, W, p=p)
d_ref = _vertical_axial_2d_distance(H, W, p=p)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("p", [0, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_local_2d_distance(H, W, p):
d = AP.local_2d_distance(H, W, p=p)
d_ref = _local_2d_distance(H, W, p=p)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("sigma", [0.5, 1, 2])
@pytest.mark.parametrize("W", [5, 7, 10])
@pytest.mark.parametrize("H", [5, 7, 10])
def test_local_2d_gaussian_distribution(H, W, sigma):
d = AP.local_2d_gausian_distribution(H, W, sigma=sigma)
d_ref = _local_2d_gaussian_distribution(H, W, sigma=sigma)
assert torch.allclose(d, d_ref)
@pytest.mark.parametrize("window_size", [2, 4])
@pytest.mark.parametrize("W", [8, 16])
@pytest.mark.parametrize("H", [8, 16])
def test_swin_attention_pattern(H, W, window_size):
# test non-shifted case
d = AP.swin_attention_pattern(H, W, window_size, shift_size=0)
# partition the self-attention into regions of window_size
# similar to the window_partition function from the original paper
h = H // window_size
w = W // window_size
d = d.reshape(h, window_size, w, window_size, h, window_size, w, window_size)
product = itertools.product(range(h), range(w))
for y, x in product:
# every region should fully attend to itself
assert torch.all(d[y, :, x, :, y, :, x, :])
for y2, x2 in product:
if y == y2 or x == x2:
continue
# different regions shouldn't attend between each other
assert torch.all(~d[y, :, x, :, y2, :, x2, :])
# test shifted case
# in the shifted case, the self-attention should be the same
# as in the non-shifted case, when we pad the inputs, apply the operations and then
# remove the padding from the result
d_shifted = AP.swin_attention_pattern(
H, W, window_size, shift_size=window_size // 2
)
# add padding and remove shift
h = H + window_size
w = W + window_size
d_padded = AP.swin_attention_pattern(h, w, window_size, shift_size=0)
d_padded = d_padded.reshape(h, w, h, w)
# remove padding elements
half_size = window_size // 2
s = slice(half_size, -half_size)
d_padded = d_padded[s, s, s, s].reshape(H * W, H * W)
assert torch.all(d_padded == d_shifted)
@pytest.mark.parametrize("k", [2, 3])
@pytest.mark.parametrize("W", [8, 15])
@pytest.mark.parametrize("H", [8, 15])
def test_dilated_2d_pattern(H, W, k):
d = AP.dilated_2d_pattern(H, W, k)
d = d.reshape(H, W, H, W)
product_HW = itertools.product(range(H), range(W))
product_kk = itertools.product(range(k), range(k))
for h, w in product_HW:
i = h % k
j = w % k
# every kth element is taken
assert torch.all(d[h, w][i::k, j::k])
for ii, jj in product_kk:
if ii == i and jj == j:
continue
# and the other elements are discarded
assert torch.all(~d[h, w][ii::k, jj::k])
def test_pattern_to_layout():
BLOCK = 16
SIZE = 128
LAYOUT_SIZE = SIZE // BLOCK
# All ones
mask1 = torch.ones((SIZE, SIZE), dtype=torch.bool)
layout1 = AP.pattern_to_layout(mask1, BLOCK)
ref1 = torch.ones((LAYOUT_SIZE, LAYOUT_SIZE), dtype=torch.long)
assert torch.allclose(layout1, ref1)
# Diagonal -> expect block diagonal
mask2 = torch.eye(SIZE, dtype=torch.bool)
layout2 = AP.pattern_to_layout(mask2, BLOCK)
ref2 = torch.eye(LAYOUT_SIZE, dtype=torch.long)
assert torch.allclose(layout2, ref2)
# Lower triangular, without the diagonal
# note that the layout will need to have the diagonal, else the coefficients close enough would not be computed
mask3 = torch.tril(torch.ones((SIZE, SIZE)), diagonal=-1).to(torch.bool)
layout3 = AP.pattern_to_layout(mask3, BLOCK)
ref3 = torch.tril(torch.ones((LAYOUT_SIZE, LAYOUT_SIZE)), diagonal=0).to(torch.long)
assert torch.allclose(layout3, ref3)
# Handle heads properly
mask = torch.cat((mask1, mask2, mask3))
layout = AP.pattern_to_layout(mask, BLOCK)
assert torch.allclose(layout, torch.cat((ref1, ref2, ref3)))
# Catch problematic dimensions
mask_off = torch.ones((SIZE + 3, SIZE), dtype=torch.bool)
with pytest.raises(AssertionError):
AP.pattern_to_layout(mask_off, BLOCK)
def test_alibi_pattern():
mask = AP.alibi_pattern(1e-3, (16, 128, 128))
# Minor, check that all the top left corners are True
assert torch.sum(mask[:, 0, 0]) == 16
def test_quick_layouts():
seq_size = 128
block_size = 16
num_heads = 2
# Fixed
assert torch.allclose(
AP.quick_fixed_layout(num_heads, block_size, seq_size),
torch.Tensor(
[
[
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
],
[
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
],
]
).long(),
)
# BSLongformer
assert torch.allclose(
AP.quick_bslongformer_layout(num_heads, block_size, seq_size),
torch.Tensor(
[
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 1],
],
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 1],
],
]
).long(),
)
# Variable
assert torch.allclose(
AP.quick_variable_layout(num_heads, block_size, seq_size),
torch.Tensor(
[
[
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
],
[
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
],
]
).long(),
)
# BigBird (just the shape)
assert AP.quick_bigbird_layout(num_heads, block_size, seq_size).shape == torch.Size(
[num_heads, seq_size // block_size, seq_size // block_size]
)
def test_layout_to_pattern():
torch.allclose(
AP.layout_to_pattern(
layout=torch.Tensor([[[0, 1], [1, 0]], [[1, 0], [0, 1]]]), block_size=2
),
torch.Tensor(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0],
],
[
[1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
],
]
),
)
def test_dense_sparsity_config():
sc = DenseSparsityConfig(num_heads=1, block_size=16)
with pytest.raises(expected_exception=ValueError):
sc.setup_layout(seq_len=17)
assert torch.allclose(
sc.make_layout(seq_len=32), torch.Tensor([[[1, 1], [1, 1]]]).long()
)
def test_big_bird_sparsity_config():
sc = BigBirdSparsityConfig(
num_heads=1,
block_size=16,
num_random_blocks=2,
num_sliding_window_blocks=1,
num_global_blocks=1,
)
with pytest.raises(expected_exception=ValueError):
sc.make_layout(seq_len=16)
sc = BigBirdSparsityConfig(
num_heads=1,
block_size=16,
num_random_blocks=1,
num_sliding_window_blocks=2,
num_global_blocks=1,
)
with pytest.raises(expected_exception=ValueError):
sc.make_layout(seq_len=16)
sc = BigBirdSparsityConfig(
num_heads=1,
block_size=16,
num_random_blocks=1,
num_sliding_window_blocks=1,
num_global_blocks=2,
)
with pytest.raises(expected_exception=ValueError):
sc.make_layout(seq_len=16)
with pytest.raises(expected_exception=NotImplementedError):
BigBirdSparsityConfig(num_heads=1, attention="directional")
def test_bslongformer_sparsity_config():
sc = BSLongformerSparsityConfig(num_heads=1, global_block_end_indices=[1])
assert torch.allclose(
sc.make_layout(128),
torch.Tensor(
[
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 1],
]
]
).long(),
)
with pytest.raises(expected_exception=ValueError):
BSLongformerSparsityConfig(num_heads=1, global_block_end_indices=[])
with pytest.raises(expected_exception=ValueError):
BSLongformerSparsityConfig(num_heads=1, global_block_end_indices=[-1])
def test_fixed_sparsity_config():
# chech that the case end < num_blocks is correct
sc = FixedSparsityConfig(num_heads=1, horizontal_global_attention=True)
assert torch.allclose(
sc.make_layout(112),
torch.Tensor(
[
[
[1, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
]
]
).long(),
)
with pytest.raises(expected_exception=ValueError):
FixedSparsityConfig(num_heads=1, num_local_blocks=3, num_global_blocks=2)
with pytest.raises(expected_exception=NotImplementedError):
FixedSparsityConfig(num_heads=1, attention="directional")
with pytest.raises(expected_exception=ValueError):
FixedSparsityConfig(
num_heads=1, attention="unidirectional", horizontal_global_attention=True
)
with pytest.raises(expected_exception=ValueError):
FixedSparsityConfig(
num_heads=1,
num_different_global_patterns=2,
different_layout_per_head=False,
)
with pytest.raises(expected_exception=ValueError):
FixedSparsityConfig(
num_heads=1,
num_different_global_patterns=10,
num_local_blocks=4,
num_global_blocks=1,
)
def test_variable_sparsity_config():
sc = VariableSparsityConfig(num_heads=1, global_block_end_indices=[1])
assert torch.allclose(
sc.make_layout(128),
torch.Tensor(
[
[
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
]
]
).long(),
)
with pytest.raises(expected_exception=ValueError):
VariableSparsityConfig(num_heads=1, global_block_end_indices=[])
with pytest.raises(expected_exception=ValueError):
VariableSparsityConfig(num_heads=1, global_block_end_indices=[-1])
| EXA-1-master | exa/libraries/xformers/tests/test_attention_patterns.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.attention import AttentionMask
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_mask_creation():
# Check that we can create from boolean
bool_mask = torch.rand((256, 256)) > 0.5
additive_mask = AttentionMask.from_bool(bool_mask)
assert (bool_mask == additive_mask.to_bool()).all()
bool_mask = torch.rand((2, 256, 256)) > 0.5
additive_mask = AttentionMask.from_bool(bool_mask)
assert (bool_mask == additive_mask.to_bool()).all()
assert additive_mask.ndim == bool_mask.ndim
# Check that we can create from multiplicative
ref_mask = torch.randint(0, 2, (256, 256))
mul_mask = ref_mask.float()
additive_mask = AttentionMask.from_multiplicative(mul_mask)
assert (ref_mask.bool() == additive_mask.to_bool()).all()
# Check the causal mask
causal_mask = AttentionMask.make_causal(256, 256)
assert (torch.tril(torch.ones(256, 256)).bool() == causal_mask.to_bool()).all()
assert causal_mask.is_causal
causal_mask = AttentionMask.make_causal(256)
assert (torch.tril(torch.ones(256, 256)).bool() == causal_mask.to_bool()).all()
causal_mask = AttentionMask.make_causal(256, 128)
assert (torch.tril(torch.ones(256, 128)).bool() == causal_mask.to_bool()).all()
# Check that we can add masks
bool_mask_1 = torch.rand((256, 256)) > 0.5
add_mask_1 = AttentionMask.from_bool(bool_mask_1)
bool_mask_2 = torch.rand((256, 256)) > 0.5
add_mask_2 = AttentionMask.from_bool(bool_mask_2)
assert ((add_mask_1 + add_mask_2).to_bool() == (bool_mask_1 & bool_mask_2)).all()
# Check type handling
additive_mask = AttentionMask.from_bool(torch.rand((256, 256)) > 0.5)
additive_mask = additive_mask.to(device=torch.device("cuda"))
assert "cuda" in str(additive_mask.values.device)
# Check that the causal flag is maintained
additive_mask = AttentionMask.make_causal(256, 256)
additive_mask = additive_mask.to(device=torch.device("cuda"))
assert additive_mask.is_causal
| EXA-1-master | exa/libraries/xformers/tests/test_attention_mask.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components import NormalizationType, PreNorm
class Passthrough(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, *args):
return args
@pytest.mark.parametrize("normalization", [n.value for n in NormalizationType])
def test_pre_norm(normalization):
# Check that passing the same tensor a bunch of times skips the extra normalizations
x = torch.rand((3, 3), requires_grad=True)
wrap = PreNorm(
d_norm=3, sublayer=Passthrough(), normalization=normalization, use_triton=False
)
outputs = wrap(inputs=[x, x, x])
assert id(outputs[0]) == id(outputs[1])
# Check the BW pass
torch.sum(outputs[0]).backward()
| EXA-1-master | exa/libraries/xformers/tests/test_residual.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components import MultiHeadDispatch
# Automatically test all the registered attentions
from xformers.components.attention import ATTENTION_REGISTRY, build_attention
DEVICES = (
[torch.device("cpu")] if not torch.cuda.is_available() else [torch.device("cuda")]
)
BATCH = 2
SEQ = 128 if torch.cuda.is_available() else 16
MODEL = 128 if torch.cuda.is_available() else 32
assert ATTENTION_REGISTRY.keys(), "Attention layers should have been registered"
@pytest.mark.parametrize("heads", [4])
@pytest.mark.parametrize("attn_dropout", [0.0, 0.3])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("rules", [4])
@pytest.mark.parametrize("q_compose", [False, True])
@pytest.mark.parametrize("dim_selection", [MODEL // 2, None])
@pytest.mark.parametrize("num_rules", [2])
@pytest.mark.parametrize("qk_rule", [True, False])
@pytest.mark.parametrize("nonlinear", [True, False])
@pytest.mark.parametrize("device", DEVICES)
def test_build_and_run(
heads: int,
attn_dropout: float,
causal: bool,
rules: int,
q_compose: bool,
dim_selection: int,
num_rules: int,
qk_rule: bool,
nonlinear: bool,
device: torch.device,
):
torch.manual_seed(42)
test_config = {
"name": "compositional",
"dropout": attn_dropout,
"causal": causal,
"seq_len": SEQ,
"dim_model": MODEL,
"num_heads": heads,
"num_rules": num_rules,
"q_compose": q_compose,
"rules": rules,
"dim_selection": dim_selection,
"qk_rule": qk_rule,
"nonlinear": nonlinear,
}
attention = build_attention(test_config)
# build a multi head dispatch to test this attention mechanism
multi_head = MultiHeadDispatch(
seq_len=SEQ,
dim_model=MODEL,
num_heads=heads,
attention=attention,
residual_dropout=0.0,
).to(device)
# Check that a shuffled input produces the same results
seqs = [SEQ, SEQ // 2]
for seq in seqs:
# Check that we can pass a smaller sequence
inputs = torch.rand(BATCH, seq, MODEL, device=device)
shuffle = torch.randperm(inputs.shape[1])
inputs_shuffled = inputs[:, shuffle, :].clone()
results = multi_head(inputs, inputs, inputs)
results_shuffled = multi_head(inputs_shuffled, inputs_shuffled, inputs_shuffled)
if attn_dropout == 0.0 and num_rules == 1 and not causal:
assert (results[:, shuffle, :] - results_shuffled).abs().max() < 1e-3
# Test the non-self-attention codepath
att = multi_head(inputs, inputs_shuffled, inputs)
# Check that dropout actually drops some values
if attn_dropout > 0:
att_2 = multi_head(inputs, inputs_shuffled, inputs)
assert (att != att_2).any()
| EXA-1-master | exa/libraries/xformers/tests/test_compositional_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from xformers import _is_triton_available
from xformers.components.attention._sputnik_sparse import SparseCS
from xformers.components.attention.attention_mask import AttentionMask
from xformers.components.attention.core import scaled_dot_product_attention
if _is_triton_available():
from xformers.triton.utils import gpu_capabilities_older_than_70
_is_blocksparse_available = (
_is_triton_available() and not gpu_capabilities_older_than_70()
)
if _is_blocksparse_available:
import triton.testing
_devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
def test_core_attention():
b, s, d = 2, 400, 8
prob = 0.95
a = torch.rand(b, s, d)
m = torch.rand(b, s, s) > prob
m = m.to_sparse()
# Check that the sparse and dense computations are equivalent
r_sparse = scaled_dot_product_attention(a, a, a, m)
r_dense = scaled_dot_product_attention(a, a, a, m.to_dense())
assert torch.allclose(r_sparse, r_dense)
def test_core_attention_mask_types():
b, s, d = 4, 90, 16
prob = 0.8 # make sure that we trigger the sparse kernels
a = torch.rand(b, s, d)
mask = torch.rand(b, s, s) > prob
# mask of bools
r_dense_bool = scaled_dot_product_attention(a, a, a, mask)
r_sparse_bool = scaled_dot_product_attention(a, a, a, mask.to_sparse())
assert torch.allclose(r_dense_bool, r_sparse_bool)
# Test additive mask. Mask of 0's and -infs.
float_mask_add = torch.zeros_like(mask, dtype=torch.float)
float_mask_add = float_mask_add.masked_fill(mask, float("-inf"))
r_dense_add = scaled_dot_product_attention(a, a, a, float_mask_add)
r_sparse_add = scaled_dot_product_attention(a, a, a, float_mask_add.to_sparse())
# Now properly handled
assert torch.allclose(r_dense_add, r_sparse_add)
# Test additive mask with mismatched batch dim
d = b // 2
mask = torch.rand(d, s, s) > prob
float_mask_add = torch.zeros_like(mask, dtype=torch.float)
float_mask_add = float_mask_add.masked_fill(mask, float("-inf"))
# Make sure masking doesn't return errors
r_dense_add = scaled_dot_product_attention(a, a, a, float_mask_add)
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_dense_no_mask(device):
b, s, d = 8, 64, 32
a = torch.rand(b, s, d, device=device)
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, att_mask=None)
expected_device = torch.float16 if device == "cuda" else torch.float32
assert r.dtype == expected_device
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_dense(device):
b, s, d = 8, 64, 32
prob = 0.9
a = torch.rand(b, s, d, device=device)
m = torch.rand(s, s, device=device) > prob
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, m)
expected_device = torch.float16 if device == "cuda" else torch.float32
assert r.dtype == expected_device
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_sparse(device):
b, s, d = 8, 64, 32
prob = 0.9
a = torch.rand(b, s, d, device=device)
m = torch.rand(s, s, device=device) > prob
m = m.to_sparse()
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, m)
expected_device = torch.float32
assert r.dtype == expected_device
@pytest.mark.parametrize("device", _devices)
def test_amp_attention_sparsecs(device):
b, s, d = 8, 64, 32
prob = 0.9
a = torch.rand(b, s, d, device=device)
m = torch.rand(s, s, device=device) > prob
m = SparseCS(m, device)
with torch.cuda.amp.autocast():
r = scaled_dot_product_attention(a, a, a, m)
expected_device = torch.float32
assert r.dtype == expected_device
@pytest.mark.skipif(
not _is_blocksparse_available, reason="Blocksparse is not available"
)
@pytest.mark.parametrize("device", ["cuda"])
@pytest.mark.parametrize("data_type", [torch.float16, torch.float32])
def test_switch_blocksparse(device, data_type):
b, s, d = 8, 128, 32
a = torch.rand(b, s, d, device=device, dtype=data_type)
# Custom causal mask
m_custom = torch.triu(
torch.ones(s, s, device=device, dtype=a.dtype) * float("-inf"), diagonal=1
)
m_custom_bool = m_custom != float("-inf")
m_sparse = SparseCS(m_custom_bool, device)
# Mask with causal flag
m_att_mask = AttentionMask.make_causal(s, s, device, dtype=a.dtype)
def kernel():
return scaled_dot_product_attention(a, a, a, m_att_mask)
# Check that a switch to blocksparse is only triggered by causal flag
with torch.cuda.amp.autocast():
r_custom = scaled_dot_product_attention(a, a, a, m_custom)
r_sparse = scaled_dot_product_attention(a, a, a, m_sparse)
r_att_mask = triton.testing.catch_oor(kernel, pytest)
expected_device = torch.float32
assert r_sparse.dtype == expected_device
if r_custom.dtype == r_att_mask.dtype:
assert torch.allclose(r_custom, r_att_mask, atol=1e-6, rtol=1e-2)
else: # r_custom fp16, r_att_mask fp32
assert torch.allclose(r_custom, r_att_mask.half(), atol=1e-6, rtol=1e-2)
@pytest.mark.skipif(
not _is_blocksparse_available, reason="Blocksparse is not available"
)
@pytest.mark.parametrize("device", ["cuda"])
def test_switch_blocksparse_dims(device):
b, s, d, nh = 8, 128, 32, 8
hs = d // nh
data_type = torch.float32
a = torch.rand(b, nh, s, hs, device=device, dtype=data_type)
# Mask with causal flag
m = AttentionMask.make_causal(s, s, device, dtype=a.dtype)
def kernel():
return scaled_dot_product_attention(a, a, a, m)
# Check that passing qkv with shape (B, nh, S, hs) is properly handled
with torch.cuda.amp.autocast():
r = triton.testing.catch_oor(kernel, pytest)
expected_device = torch.float32
assert r.dtype == expected_device
@pytest.mark.skipif(
not _is_blocksparse_available, reason="Blocksparse is not available"
)
@pytest.mark.parametrize("device", ["cuda"])
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("drop_prob", [0.0, 0.3])
def test_switch_blocksparse_dropout(device, training, drop_prob):
b, s, d = 8, 128, 32
a = torch.rand(b, s, d, device=device)
m = AttentionMask.make_causal(s, s, device)
dropout = nn.Dropout(drop_prob)
dropout.train(training).cuda()
def kernel1():
return scaled_dot_product_attention(a, a, a, m)
def kernel2():
return scaled_dot_product_attention(a, a, a, m, dropout)
with torch.cuda.amp.autocast():
r = triton.testing.catch_oor(kernel1, pytest)
r_drop = triton.testing.catch_oor(kernel2, pytest)
# Check for dropout when applicable
if dropout.p and dropout.training:
assert (r_drop != r).any()
else:
assert torch.allclose(r, r_drop)
| EXA-1-master | exa/libraries/xformers/tests/test_core_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pytest
import torch
from torch.cuda.amp.autocast_mode import autocast
import xformers
from xformers.components import Activation, build_activation
_triton_available = xformers._is_triton_available()
if _triton_available:
try:
import triton # noqa: F401
from xformers.triton import FusedLinear
from xformers.triton.k_activations import get_triton_activation_index
from xformers.triton.k_fused_matmul_fw import fused_matmul
from xformers.triton.utils import gpu_capabilities_older_than_70
except ImportError:
logging.warning(
"Triton is not available, some optimizations will not be tested."
)
_triton_available = False
SHAPES = [(128, 256), (8, 384, 128), (8, 784, 512)]
@pytest.mark.skipif(not _triton_available, reason="Triton is not available")
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("dtype", [torch.float16])
def test_fused_matmul(shape, dtype):
"""Check that the matrix multiply kernel and Pytorch's give the same results"""
# TODO: fix or remove this
pytest.skip("This is broken")
torch.random.manual_seed(0)
# Raw fused matrix multiply first, to catch gross errors
a = torch.normal(0, 1, size=(shape[-2], shape[-1]), dtype=dtype, device="cuda")
b = torch.normal(0, 1, size=(shape[-1], shape[-2]), dtype=dtype, device="cuda")
# Test that not passing any bias is fine
res_torch = a @ b
res_triton, _ = fused_matmul(
a, b.transpose(0, 1).contiguous(), bias=None, activation=0
)
torch.testing.assert_close(res_torch, res_triton)
# Now test with a real FMA
c = -torch.randn((shape[-2],), dtype=dtype, device="cuda")
res_torch = torch.addmm(c, a, b)
res_triton, _ = fused_matmul(a, b.transpose(1, 0).contiguous(), c)
torch.testing.assert_close(
res_torch,
res_triton,
atol=1e-3,
rtol=1e-3,
msg="Fused matmul broken",
)
# Now check that adding an activation to the mix still produces valid results
# NOTE: SquaredReLU fails, some outlier representation issue but the eyeballed results look reasonable
# could be due to a different accumulation out of the box (tf32 for instance)
for activation in filter(
lambda x: x not in (Activation.SquaredReLU, Activation.StarReLU), Activation
):
torch_activation = build_activation(activation.value)
res_torch = torch_activation(torch.addmm(c, a, b))
triton_activation_index = get_triton_activation_index(activation)
print(activation, triton_activation_index)
res_triton, _ = fused_matmul(
a, b.transpose(1, 0).contiguous(), c, triton_activation_index
)
torch.testing.assert_close(
res_torch,
res_triton,
atol=1e-3,
rtol=1e-3,
msg=f"Fused matmul broken with activation {activation}",
)
@pytest.mark.skipif(
not _triton_available or gpu_capabilities_older_than_70(),
reason="Triton requires a SM70+ GPU",
)
@pytest.mark.parametrize("activation", [None] + [a.value for a in Activation]) # type: ignore
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("amp", [True])
def test_fused_linear_parity(shape, activation: Activation, bias: bool, amp: bool):
"""Check that PyTorch and fused linear layers give the same result"""
# TODO: fix or remove this
pytest.skip("This is broken")
torch.random.manual_seed(0)
# Instantiate pytorch and fused layers, same initialization
X = torch.normal(0, 1, size=shape, device="cuda")
X.requires_grad_()
torch_linear = torch.nn.Linear(shape[-1], shape[-1] // 2, bias=bias).to("cuda")
torch_sequence = torch.nn.Sequential(torch_linear, build_activation(activation))
torch.random.manual_seed(0)
X_ = torch.normal(0, 1, size=shape, device="cuda")
X_.requires_grad_()
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize the
# `FusedLinear` import.
triton_fused_linear = FusedLinear(
shape[-1], shape[-1] // 2, bias=bias, activation=activation
).to("cuda")
# Now check parity
torch_linear.train()
triton_fused_linear.train()
torch_linear.zero_grad()
triton_fused_linear.zero_grad()
torch.testing.assert_close(
triton_fused_linear.weight,
torch_linear.weight,
atol=1e-3,
rtol=1e-3,
msg="Broken test setup",
)
torch.testing.assert_close(X, X_, atol=1e-3, rtol=1e-3, msg="Broken test setup")
with autocast(enabled=amp):
y_torch = torch_sequence(X)
y_triton = triton_fused_linear(X_)
grad = torch.randn_like(y_torch)
# Check that BW also gives the same result
y_torch.backward(grad)
y_triton.backward(grad)
torch.testing.assert_close(X, X_, atol=1e-3, rtol=1e-3)
# Input grad being correct checks both the loss + some of the backward pass
assert X.grad is not None and X_.grad is not None
torch.testing.assert_close(X.grad, X_.grad, atol=1e-3, rtol=1e-3)
# Check that the linear layer bias are also properly trainable
if bias:
assert (
triton_fused_linear.bias is not None
and triton_fused_linear.bias.grad is not None
)
assert torch_linear.bias is not None and torch_linear.bias.grad is not None
torch.testing.assert_close(
torch_linear.bias.grad,
triton_fused_linear.bias.grad,
atol=1e-3,
rtol=1e-3,
)
# Check that the linear layer weights are also properly trainable
assert (
torch_linear.weight.grad is not None
and triton_fused_linear.weight.grad is not None
)
torch.testing.assert_close(
torch_linear.weight.grad,
triton_fused_linear.weight.grad,
atol=1e-3,
rtol=1e-3,
)
| EXA-1-master | exa/libraries/xformers/tests/test_triton_fused_linear.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
import xformers.ops as xops
from .utils import assert_allclose
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
@cuda_only
@pytest.mark.parametrize("with_scaling", [False, True])
@pytest.mark.parametrize(
"out_shape", [(48, 1, 257 * 1536), (48, 257, 1536), (192, 50, 1536)]
)
def test_scaled_index_add(out_shape, with_scaling: bool) -> None:
torch.manual_seed(0)
alpha = 0.73
dtype = torch.float16
B_out, M, D = out_shape
B_src = int(B_out * 0.6)
inp = torch.randn([B_out, M, D], device="cuda", dtype=dtype, requires_grad=True)
src = torch.randn([B_src, M, D], device="cuda", dtype=dtype, requires_grad=True)
TENSORS = {"inp": inp, "src": src}
if with_scaling:
scaling = torch.randn([D], device="cuda", dtype=dtype, requires_grad=True)
TENSORS["scaling"] = scaling
else:
scaling = torch.Tensor()
index_py = [i for i in range(src.shape[0])]
random.Random(B_out).shuffle(index_py)
index = torch.tensor(index_py, dtype=torch.int64, device="cuda")
if with_scaling:
ref_src_scaled = scaling.float() * src.float()
else:
ref_src_scaled = src.float()
ref_out = torch.index_add(
inp.float(), dim=0, source=ref_src_scaled, index=index, alpha=alpha
).to(dtype)
grad_output = torch.randn_like(ref_out)
ref_out.backward(grad_output)
ref_grads = {k: v.grad for k, v in TENSORS.items()}
for v in TENSORS.values():
v.grad = None
# Test FW
out = xops.scaled_index_add(
input=inp.clone(),
index=index,
source=src,
scaling=scaling if with_scaling else None,
alpha=alpha,
)
assert_allclose(out, ref_out, "fw", atol=4e-3, rtol=1e-3)
# Test BW
out.backward(grad_output)
for k, v in TENSORS.items():
atol = 1e-5
rtol = 1e-5
# NOTE: Ordering of operations is not 100% the same as PT, hence the small numeric diff
if k == "scaling":
atol, rtol = 5e-2, 1e-2
assert_allclose(v.grad, ref_grads[k], f"{k}.grad", atol=atol, rtol=rtol) # type: ignore
@cuda_only
@pytest.mark.parametrize("D", [1536])
def test_index_select_cat(D) -> None:
torch.manual_seed(0)
dtype = torch.float16
srcs = [
torch.randn([48, 25 * D]),
torch.randn([192, 50 * D]),
]
src = torch.cat([s.view([-1, D]) for s in srcs], dim=0).cuda().to(dtype)
src.requires_grad_(True)
indices = []
sources = []
elements_i = 0
for source_i in srcs:
index = [i for i in range(source_i.shape[0])]
random.Random(source_i.shape[0]).shuffle(index)
indices.append(
torch.tensor(
index[: int(0.6 * source_i.shape[0])], dtype=torch.int64, device="cuda"
)
)
sources.append(
src[
elements_i : elements_i + source_i.shape[0] * source_i.shape[1] // D
].reshape(source_i.shape)
)
elements_i += source_i.shape[0] * source_i.shape[1] // D
# PT implem
ref_out = torch.cat([s[i].flatten() for s, i in zip(sources, indices)], dim=0)
gradient_out = torch.randn_like(ref_out)
ref_out.backward(gradient_out)
assert src.grad is not None
ref_grad = src.grad.clone()
src.grad = None
# xFormers implem
out = xops.index_select_cat(sources, indices)
assert_allclose(out, ref_out, "fw")
out.backward(gradient_out)
assert src.grad is not None
assert_allclose(src.grad, ref_grad, "src.grad")
| EXA-1-master | exa/libraries/xformers/tests/test_indexing.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components import PatchEmbeddingConfig, build_patch_embedding
from xformers.components.positional_embedding import (
POSITION_EMBEDDING_REGISTRY,
build_positional_embedding,
)
BATCH = 20
SEQ = 512
MODEL = 384
assert (
POSITION_EMBEDDING_REGISTRY.keys()
), "Positional encoding layers should have been registered"
@pytest.mark.parametrize("encoding_name", POSITION_EMBEDDING_REGISTRY.keys())
@pytest.mark.parametrize("dropout", [0.0, 0.2])
def test_dimensions(encoding_name: str, dropout: float):
test_config = {
"name": encoding_name,
"dim_model": MODEL,
"vocab_size": 32,
"dropout": dropout,
"seq_len": SEQ,
}
# dummy, just check construction and dimensions in the FW pass
encoding = build_positional_embedding(test_config)
inputs = (torch.rand(BATCH, SEQ) * 10).abs().to(torch.int)
_ = encoding(inputs)
# Test that inputs having an embedding dimension would also work out
if "name" == "sine":
inputs = (torch.rand(BATCH, SEQ, MODEL) * 10).abs().to(torch.int)
_ = encoding(inputs)
def test_patch_embedding():
patch_embedding_config = {
"in_channels": 3,
"out_channels": 64,
"kernel_size": 7,
"stride": 4,
"padding": 2,
}
# dummy, just check construction and dimensions in the FW pass
patch_emb = build_patch_embedding(PatchEmbeddingConfig(**patch_embedding_config))
# Check BHWC
inputs = torch.rand(BATCH, 32 * 32, 3)
out = patch_emb(inputs)
assert out.shape[-1] == 64
# Check BCHW
inputs = torch.rand(BATCH, 3, 32, 32)
out = patch_emb(inputs)
assert out.shape[-1] == 64
| EXA-1-master | exa/libraries/xformers/tests/test_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
# needed to register custom ops
import xformers # noqa: F401
import xformers.components.attention.core
from xformers.components.attention._sputnik_sparse import _csr_to_coo
from xformers.components.attention.core import (
_broadcast_batch,
_create_random_sparsity,
_sparse_bmm,
)
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
_devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
def _baseline_matmul_with_sparse_mask(
a: torch.Tensor, b: torch.Tensor, mask: torch.Tensor
) -> torch.Tensor:
assert a.ndim == b.ndim
assert mask.ndim == a.ndim
assert a.shape[-1] == b.shape[-2]
assert a.shape[-2] == mask.shape[-2], f"{a.shape}, {mask.shape}"
assert b.shape[-1] == mask.shape[-1], f"{b.shape}, {mask.shape}"
assert a.shape[:-2] == b.shape[:-2], f"{a.shape}, {b.shape}"
assert a.shape[:-2] == mask.shape[:-2], f"{a.shape}, {mask.shape}"
idxs = mask.indices().unbind()
b = b.transpose(-2, -1)
# compute matmul for elements within the mask
val = (a[idxs[:-2] + (idxs[-2], slice(None))] * b[idxs[:-2] + (idxs[-1], slice(None))]).sum(-1) # type: ignore
out_shape = a.shape[:-1] + (b.shape[-2],)
res = torch.sparse_coo_tensor(torch.stack(idxs), val, out_shape)
return res
def _baseline_matmul_with_dense_mask(
a: torch.Tensor, b: torch.Tensor, mask: torch.Tensor
) -> torch.Tensor:
res = a @ b
res[~mask] = float("-inf")
return res
def _baseline_sparse_bmm(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
# need to use torch.sparse.mm to get gradients wrt sparse matrix a
# TODO implement this in C++ / CUDA as this is slow!
out = []
for ai, bi in zip(a, b):
out.append(torch.sparse.mm(ai, bi))
return torch.stack(out, dim=0)
@pytest.mark.parametrize("is_sparse", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
def test_matmul_with_mask(device, contiguous, is_sparse):
B, L, K = 8, 30, 32
prob = 0.5
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, K, L, device=device)
if not contiguous:
a = a.transpose(-2, -1).contiguous().transpose(-2, -1)
b = b.transpose(-2, -1).contiguous().transpose(-2, -1)
mask = torch.rand(B, L, L, device=device) > prob
fn = torch.ops.xformers.matmul_with_mask
fn_gt = _baseline_matmul_with_dense_mask
if is_sparse:
mask = mask.to_sparse()
fn_gt = _baseline_matmul_with_sparse_mask
res = fn(a, b, mask)
res_gt = fn_gt(a, b, mask)
if is_sparse:
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("is_sparse", [True, False])
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
def test_matmul_with_mask_backward(device, contiguous, is_sparse):
if device == "cuda" and is_sparse is False:
# Skip test for now due to bug in torch 1.8
# See https://github.com/pytorch/pytorch/issues/54975
# Broken CUDA / torch 1.8 combination, awaiting an update
return
B, L, K = 8, 10, 16
prob = 0.5
a = torch.rand(B, L, K, device=device, requires_grad=True)
b = torch.rand(B, K, L, device=device, requires_grad=True)
if not contiguous:
a = a.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
b = b.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
mask = torch.rand(B, L, L, device=device) > prob
fn = torch.ops.xformers.matmul_with_mask
fn_gt = _baseline_matmul_with_dense_mask
if is_sparse:
mask = mask.to_sparse()
fn_gt = _baseline_matmul_with_sparse_mask
def compute_grads(f):
out = f(a, b, mask)
if is_sparse:
out = out.to_dense()
out.sum().backward()
compute_grads(fn)
grad_a = a.grad.clone()
grad_b = b.grad.clone()
a.grad = None
b.grad = None
compute_grads(fn_gt)
assert torch.allclose(grad_a, a.grad)
assert torch.allclose(grad_b, b.grad)
@pytest.mark.parametrize("device", _devices)
def test_sddmm_sputnik(device):
B, L, M, K = 8, 30, 16, 32
prob = 0.5
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device).transpose(-2, -1)
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
fn = xformers.components.attention.core._matmul_with_mask
mask = mask.to_sparse()
res = fn(a, b, mask_csr)
res_gt = fn(a, b, mask)
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@cuda_only
@pytest.mark.parametrize("prob", [0.5, 1])
@pytest.mark.parametrize("K", [32, 17])
@pytest.mark.parametrize("M", [30, 17])
@pytest.mark.parametrize("L", [30, 17])
def test_sddmm_csr(L, M, K, prob):
device = torch.device("cuda")
# TODO add more checks for different nnz
B = 8
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
row_indices = mask_csr.row_indices
row_offsets = mask_csr.row_offsets
column_indices = mask_csr.column_indices
fn = torch.ops.xformers.csr_sddmm
fn_gt = torch.ops.xformers.sddmm_sputnik
res = fn(a, b, row_indices, row_offsets, column_indices)
res_gt = fn_gt(a, b, row_indices, row_offsets, column_indices)
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt, atol=1e-6)
@cuda_only
@pytest.mark.parametrize("nnz", [0, 4, 16, 20, 36])
def test_sddmm_csr_per_nnz(nnz):
device = torch.device("cuda")
B = 8
L, M, K = 1024, 1024, 32
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = torch.zeros(L, M, dtype=torch.bool, device=device)
mask.view(-1)[: nnz - 1] = True
mask[-1, -1] = True
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
row_indices = mask_csr.row_indices
row_offsets = mask_csr.row_offsets
column_indices = mask_csr.column_indices
fn = torch.ops.xformers.csr_sddmm
fn_gt = torch.ops.xformers.sddmm_sputnik
res = fn(a, b, row_indices, row_offsets, column_indices)
res_gt = fn_gt(a, b, row_indices, row_offsets, column_indices)
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt, atol=1e-6)
@cuda_only
@pytest.mark.parametrize("prob", [0.5, 1])
@pytest.mark.parametrize("K", [32, 17])
@pytest.mark.parametrize("M", [30, 17])
@pytest.mark.parametrize("L", [30, 17])
def test_sddmm_coo(L, M, K, prob):
device = torch.device("cuda")
# TODO add more checks for different nnz
B = 8
a = torch.rand(B, L, K, device=device)
b = torch.rand(B, M, K, device=device)
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
row_indices = mask_csr.row_indices
row_offsets = mask_csr.row_offsets
column_indices = mask_csr.column_indices
fn = torch.ops.xformers.coo_sddmm
fn_gt = torch.ops.xformers.sddmm_sputnik
# convert from csr to coo
row_coo, _ = _csr_to_coo(L, M, row_offsets, column_indices)
res = fn(a, b, row_indices, row_coo, column_indices)
res_gt = fn_gt(a, b, row_indices, row_offsets, column_indices)
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt, atol=1e-6)
@pytest.mark.parametrize("device", _devices)
def test_sddmm_sputnik_backward(device):
contiguous = True
B, L, M, K = 8, 10, 16, 32
prob = 0.5
a = torch.rand(B, L, K, device=device, requires_grad=True)
b = torch.rand(B, M, K, device=device).transpose(-2, -1).requires_grad_(True)
if not contiguous:
a = a.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
b = b.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
mask = _create_random_sparsity(
torch.ones(B, L, M, dtype=torch.bool, device=device), prob
)
mask_csr = xformers.components.attention.core.SparseCS(mask, device)
fn = xformers.components.attention.core._matmul_with_mask
mask = mask.to_sparse()
out_csr = fn(a, b, mask_csr)
out_csr.values.sum().backward()
grad_a = a.grad.clone()
grad_b = b.grad.clone()
a.grad = None
b.grad = None
# fn(a[None], b[None], mask).coalesce().values().sum().backward() # TODO check why this fails
fn(a, b, mask).to_dense().sum().backward()
assert torch.allclose(grad_a, a.grad, atol=1e-7)
assert torch.allclose(grad_b, b.grad, atol=1e-7)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax_sputnik(device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core._softmax
a = a.to_sparse()
res = fn(a_csr)
res_gt = fn(a)
res = res.to_dense()
res_gt = res_gt.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax_sputnik_backward(device):
B, L = 8, 30
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core._softmax
a = a.to_sparse()
a_csr.values.requires_grad_(True)
fn(a_csr).values.sum().backward()
grad_a = a_csr.values.grad.clone()
a.requires_grad_(True)
fn(a).coalesce().values().sum().backward()
assert torch.allclose(
grad_a, a.grad.coalesce().values().reshape_as(grad_a), atol=1e-7
)
@pytest.mark.parametrize("device", _devices)
def test_spmm_sputnik(device):
B, L, K = 8, 30, 32
prob = 0.5
a = _create_random_sparsity(torch.rand(B, L, L, device=device), prob)
b = torch.rand(B, L, K, device=device)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core.bmm
a = a.to_sparse()
res = fn(a_csr, b)
res_gt = fn(a, b)
res = res
res_gt = res_gt
assert res.dtype == res_gt.dtype
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("device", _devices)
def test_spmm_sputnik_backward(device):
B, M, L, K = 8, 16, 30, 32
prob = 0.5
a = _create_random_sparsity(torch.rand(B, M, L, device=device), prob)
b = torch.rand(B, L, K, device=device)
b.requires_grad_(True)
a_csr = xformers.components.attention.core.SparseCS(a, device)
fn = xformers.components.attention.core.bmm
a = a.to_sparse()
a.requires_grad_(True)
a_csr.values.requires_grad_(True)
fn(a_csr, b).sum().backward()
grad_a = a_csr.values.grad.clone()
grad_b = b.grad.clone()
b.grad = None
fn(a, b).sum().backward()
assert torch.allclose(
grad_a, a.grad.coalesce().values().reshape_as(grad_a), atol=1e-7
)
assert torch.allclose(grad_b, b.grad, atol=1e-7)
@cuda_only
def test_csr_transpose():
B, L, K = 8, 30, 40
prob = 0.5
device = torch.device("cuda")
a = _create_random_sparsity(torch.rand(B, L, K, device=device), prob)
a_csr = xformers.components.attention.core.SparseCS(a, device)
res = a_csr.transpose()
res2 = res.transpose()
assert torch.allclose(res.to_dense(), a.transpose(-2, -1))
assert torch.allclose(res2.to_dense(), a)
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("prob", [0.95, 0.996]) # cover > 0.995
@pytest.mark.parametrize("N", [32, 64, 96]) # cover > 64
def test_sparse_bmm(device, contiguous, prob, N):
B, M = 8, 64
a = torch.rand(B, M, N, device=device)
a[a < prob] = 0
a = a.to_sparse()
b = torch.rand(B, N, M, device=device)
if not contiguous:
a = a + a
b = b.transpose(-2, -1).contiguous().transpose(-2, -1)
res = _sparse_bmm(a, b)
res_gt = _baseline_sparse_bmm(a, b)
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("device", _devices)
def test_sparse_bmm_backward(device, contiguous):
if device == "cuda":
# Skip test for now due to bug in torch 1.8
# See https://github.com/pytorch/pytorch/issues/54975
# Broken CUDA / torch 1.8 combination, awaiting an update
return
B, L, K = 8, 10, 16
prob = 0.5
a = torch.rand(B, L, K, device=device)
a[a < prob] = 0
a = a.to_sparse()
b = torch.rand(B, K, L, device=device, requires_grad=True)
if not contiguous:
a = a + a
b = b.detach().transpose(-2, -1).contiguous().transpose(-2, -1).requires_grad_()
a.requires_grad_(True)
def compute_grads(f):
out = f(a, b)
out.sum().backward()
compute_grads(_sparse_bmm)
grad_a = a.grad.clone().coalesce()
grad_b = b.grad.clone()
a.grad = None
b.grad = None
compute_grads(_baseline_sparse_bmm)
new_grad_a = a.grad.coalesce()
assert torch.allclose(grad_a.indices(), new_grad_a.indices())
assert torch.allclose(grad_a.values(), new_grad_a.values())
assert torch.allclose(grad_b, b.grad)
@pytest.mark.parametrize("device", _devices)
def test_sparse_coo_broadcast(device):
B, L, K = 8, 10, 16
prob = 0.5
a = torch.rand(L, K, device=device)
a[a < prob] = 0
a_sparse = a.to_sparse()
res = _broadcast_batch(a_sparse, B)
res_gt = a[None, :, :].expand(B, L, K)
assert torch.allclose(res.to_dense(), res_gt)
| EXA-1-master | exa/libraries/xformers/tests/test_custom_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode
import xformers.profiler
from xformers.profiler.slow_ops_profiler import GemmOpComputeFlops, flop_mapping
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
# Not using the PyTorch profiler, as it causes segfaults
# in the CI ~30% of the time
TEST_SCHEDULE = tuple(
x
for x in xformers.profiler.api.DEFAULT_SCHEDULE
if x[0] is not xformers.profiler.PyTorchProfiler
)
class GEMMShapeDispatcher(TorchDispatchMode):
def __init__(self) -> None:
super().__init__()
self.mnk = (0, 0, 0)
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
compute_flops = flop_mapping[func._overloadpacket]
if isinstance(compute_flops, GemmOpComputeFlops):
self.mnk = compute_flops._get_mnk(args)
return func(*args)
def test_gemm_flops() -> None:
M, N, K = 13, 17, 53
a = torch.empty([M, K])
b = torch.empty([K, N])
x = torch.empty([K])
with GEMMShapeDispatcher() as disp:
a @ b
assert disp.mnk == (M, N, K)
with GEMMShapeDispatcher() as disp:
a @ x
assert disp.mnk == (M, 1, K)
with GEMMShapeDispatcher() as disp:
torch.nn.functional.linear(a, b.transpose(0, 1))
assert disp.mnk == (M, N, K)
with GEMMShapeDispatcher() as disp:
torch.addmm(torch.empty([1, 1]), a, b)
assert disp.mnk == (M, N, K)
B = 3
ba = torch.empty([B, M, K])
bb = torch.empty([B, K, N])
with GEMMShapeDispatcher() as disp:
ba @ bb
assert disp.mnk == (B * M, N, K)
with GEMMShapeDispatcher() as disp:
ba @ bb[:1]
assert disp.mnk == (B * M, N, K)
with GEMMShapeDispatcher() as disp:
ba[:1] @ bb
assert disp.mnk == (B * M, N, K)
with GEMMShapeDispatcher() as disp:
ba @ bb[0]
assert disp.mnk == (B * M, N, K)
with GEMMShapeDispatcher() as disp:
torch.addbmm(torch.empty([1, 1]), ba, bb)
assert disp.mnk == (B * M, N, K)
@cuda_only
def test_profiler_dispatcher_stream_workaround() -> None:
x = torch.zeros([10, 10], device="cuda")
with xformers.profiler.profile(
"test_profiler_dispatcher_stream_workaround", schedule=TEST_SCHEDULE
):
for _ in range(20):
x.record_stream(torch.cuda.Stream()) # type: ignore
xformers.profiler.step()
@pytest.mark.parametrize(
"device_bs_mm",
[("cpu", 512, 1)]
+ (
[
# GPU bound
("cuda", 4096, 8),
# CPU bound on GPU
("cuda", 1, 1),
]
if torch.cuda.is_available()
else []
),
)
def test_profiler_overhead(device_bs_mm) -> None:
PROFILER_MAX_STEPS_OVERHEAD = 30
device, bs, model_mult = device_bs_mm
model = torch.nn.Sequential(
torch.nn.Linear(1024, 512 * model_mult),
torch.nn.Linear(512 * model_mult, 1024),
)
model.to(device)
inp = torch.randn([bs, 1024], device=device)
optim = torch.optim.Adam(model.parameters())
def one_step() -> None:
model(inp).sum().backward()
optim.step()
optim.zero_grad()
# Warmup
for _ in range(2):
one_step()
# Run with profiler
with xformers.profiler.profile(
"test_profiler_overhead", module=model, schedule=TEST_SCHEDULE
):
for _ in range(PROFILER_MAX_STEPS_OVERHEAD):
one_step()
assert not model._forward_hooks
assert not model._forward_pre_hooks
assert not model._backward_hooks
assert _get_current_dispatch_mode() is None
| EXA-1-master | exa/libraries/xformers/tests/test_profiler.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
from typing import List, Optional, Sequence, Tuple, Type, TypeVar
import pytest
import torch
from scipy.stats import binom_test
from torch.utils.checkpoint import checkpoint
import xformers.ops
from xformers.ops import fmha
from xformers.ops.fmha.common import AttentionOpBase
from .utils import assert_allclose
torch.backends.cuda.matmul.allow_tf32 = False
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
compute_capability = (0, 0)
if torch.cuda.is_available():
compute_capability = torch.cuda.get_device_capability("cuda")
sm75_or_better_only = pytest.mark.skipif(
compute_capability < (7, 5), reason="requires sm75+"
)
_devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
ALL_FW_OPS: Sequence[Type[fmha.common.AttentionFwOpBase]] = [
fmha.cutlass.FwOp,
fmha.flash.FwOp,
fmha.triton.FwOp,
fmha.small_k.FwOp,
]
ALL_BW_OPS: Sequence[Type[fmha.common.AttentionBwOpBase]] = [
fmha.cutlass.BwOp,
fmha.flash.BwOp,
fmha.triton.BwOp,
fmha.small_k.BwOp,
]
T = TypeVar(
"T", Type[fmha.common.AttentionFwOpBase], Type[fmha.common.AttentionBwOpBase]
)
def _filter_unsupported_ops(ops: Sequence[T]) -> Sequence[T]:
return [
op
for op in ops
if (
"cpu" in op.SUPPORTED_DEVICES
or op.CUDA_MINIMUM_COMPUTE_CAPABILITY <= compute_capability
)
and op.is_available()
]
ALL_FW_OPS = _filter_unsupported_ops(ALL_FW_OPS)
ALL_BW_OPS = _filter_unsupported_ops(ALL_BW_OPS)
def sample_random_supported_fw(
inp: fmha.Inputs, seed: int
) -> Type[fmha.common.AttentionFwOpBase]:
r = random.Random(seed)
fw_ops = list(ALL_FW_OPS)
r.shuffle(fw_ops)
for op in fw_ops:
if op.supports(inp):
return op
raise NotImplementedError(f"Could not find a FW operator for: {inp}")
def generate_test_shapes_B_Mq_Mkv_H_K_Kv(op):
shapes = []
for B in op._TEST_BATCH_SIZES:
for Mq in [32, 256]:
for Mkv in [32, 64, 256]:
for K in op._TEST_K:
shapes.append((B, Mq, Mkv, 1, K, K))
Mq = 256
Mkv = 128
K = 32
H = 1
# Weird values of parameters
for M in [2, 3, 15, 31, 32, 34, 68, 72, 90, 132, 136]:
shapes.append((B, M, Mkv, H, K, K))
shapes.append((B, Mq, M, H, K, K))
for _K in [1, 2, 3, 31, 34, 36, 38, 40, 64, 256 + 2, 256 + 8, 512]:
if _K <= op.SUPPORTED_MAX_K:
shapes.append((B, Mq, Mkv, H, _K, _K))
# Different value for K / Kv
if op.SUPPORTS_DIFFERENT_VALUE_EMBED:
for _K in [32, 36, 64, 256 + 8]:
shapes.append((B, Mq, Mkv, H, K, _K))
shapes.append((B, Mq, Mkv, H, _K, K))
# Exotic sizes
for K in op._TEST_K:
shapes.append((B, 16, 1024, H, K, K))
shapes.append((B, 1024, 16, H, K, K))
# Some number of heads
for H in [3, 5, 12]:
shapes.append((max(1, B // H), Mq, Mkv, H, K, K))
# Add some random shapes
if op in [
fmha.cutlass.FwOp,
fmha.cutlass.BwOp,
fmha.flash.BwOp,
]:
K_CHOICES = [8 * i for i in range(1, 256 // 8)]
r = random.Random(0)
for _ in range(20):
B = r.randint(1, 400)
Mq = r.randint(1, 500)
Mkv = r.randint(1, 500)
H = r.randint(2, 11)
B = max(B // H, 1)
K = r.choice(K_CHOICES)
Kv = r.choice(K_CHOICES)
if not op.SUPPORTS_DIFFERENT_VALUE_EMBED:
Kv = K
shapes.append((B, Mq, Mkv, H, K, Kv))
return shapes
def _generate_op_device_dtype_biasT_B_Mq_Mkv_H_K_Kv(
ops_list: Sequence[Type[fmha.AttentionOpBase]], max_shapes_per_op: int = 65000
):
r = random.Random(0)
combination = []
ids = []
for op in ops_list:
op_count = 0
for shape in generate_test_shapes_B_Mq_Mkv_H_K_Kv(op):
has_one = False
for device in _devices:
if device not in op.SUPPORTED_DEVICES:
continue
for dtype in op.SUPPORTED_DTYPES:
bias_type = r.choice(list(op.SUPPORTED_ATTN_BIAS_TYPES))
# Avoid using too much memory
if bias_type not in [
type(None),
fmha.attn_bias.LowerTriangularMask,
]:
B, Mq, Mkv, H, K, Kv = shape
B = min(B, 12)
if (
bias_type
is fmha.attn_bias.BlockDiagonalCausalFromBottomRightMask
):
Mq, Mkv = min(Mkv, Mq), max(Mkv, Mq) + 2
shape = (B, Mq, Mkv, H, K, Kv)
combination.append((op, device, dtype, bias_type, *shape))
ids.append(
f"{op.NAME}-{device}-{str(dtype)}-{bias_type.__name__}"
f"-{'-'.join([str(s) for s in shape])}"
)
has_one = True
if has_one:
op_count += 1
if op_count > max_shapes_per_op:
break
# Some specific shapes for which we want to run without any mask
bias_type = type(None)
for shape in (
# Some strides/dims don't fit on an uint16
(1, 128, 128, 300, 128, 128),
(13, 1, 67, 200, 8, 8),
(1, 1 + 2**16, 4, 1, 8, 8),
(1, 4, 1 + 2**16, 1, 8, 8),
# TODO: Some strides don't fit on an uint32
# Crashes on Flash, Errors on Cutlass
# (1, 1, 64000, 300, 128, 128)
):
for device in _devices:
if device not in op.SUPPORTED_DEVICES:
continue
for dtype in op.SUPPORTED_DTYPES:
combination.append((op, device, dtype, bias_type, *shape))
ids.append(
f"{op.NAME}-{device}-{str(dtype)}-{bias_type.__name__}"
f"-{'-'.join([str(s) for s in shape])}"
)
return {
"argvalues": combination,
"ids": ids,
}
parametrize_opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv = pytest.mark.parametrize(
"opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv",
**_generate_op_device_dtype_biasT_B_Mq_Mkv_H_K_Kv(ALL_FW_OPS),
)
parametrize_opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv__xs = pytest.mark.parametrize(
"opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv",
**_generate_op_device_dtype_biasT_B_Mq_Mkv_H_K_Kv(ALL_FW_OPS, max_shapes_per_op=1),
)
parametrize_opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv = pytest.mark.parametrize(
"opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv",
**_generate_op_device_dtype_biasT_B_Mq_Mkv_H_K_Kv(ALL_BW_OPS),
)
parametrize_opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv__xs = pytest.mark.parametrize(
"opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv",
**_generate_op_device_dtype_biasT_B_Mq_Mkv_H_K_Kv(ALL_BW_OPS, max_shapes_per_op=1),
)
def ref_attention(q, k, v, attn_bias=None, drop_mask=None, p=0.0, scale=None):
if q.ndim == 4:
assert p == 0.0
return ref_attention_bmhk(q, k, v, attn_bias=attn_bias)
q = q.float()
k = k.float()
v = v.float()
scale = scale if scale is not None else (1 / q.shape[-1] ** 0.5)
q = q * scale
attn = q @ k.transpose(-2, -1)
if attn_bias is not None:
if isinstance(attn_bias, xformers.ops.AttentionBias):
# Always create in B,H,Mq,Mk format
attn_bias_tensor = attn_bias.materialize(
(q.shape[0], 1, q.shape[1], k.shape[1]),
device=q.device,
dtype=torch.float32,
)
else:
attn_bias_tensor = attn_bias
if attn_bias_tensor.ndim == 4:
assert q.shape[0] == attn_bias_tensor.shape[0] * attn_bias_tensor.shape[1]
attn_bias_tensor = attn_bias_tensor.reshape(
[-1, *attn_bias_tensor.shape[2:]]
)
attn = attn + attn_bias_tensor.float()
attn = attn.softmax(-1)
if drop_mask is not None:
attn = attn * (drop_mask / (1 - p))
return attn @ v
def ref_attention_bmhk(q, k, v, attn_bias, scale=None) -> torch.Tensor:
assert q.ndim == 4
def T(t):
return t.permute((0, 2, 1, 3)).reshape(
[t.shape[0] * t.shape[2], t.shape[1], t.shape[3]]
)
if isinstance(attn_bias, xformers.ops.AttentionBias):
attn_bias = attn_bias.materialize(
(q.shape[0], q.shape[2], q.shape[1], k.shape[1]),
device=q.device,
dtype=torch.float32,
).reshape([q.shape[0] * q.shape[2], q.shape[1], k.shape[1]])
out = ref_attention(T(q), T(k), T(v), attn_bias, scale=scale)
out = out.reshape([q.shape[0], q.shape[2], q.shape[1], v.shape[3]])
return out.permute((0, 2, 1, 3))
def _rand_seqlens(
r: random.Random,
bs: int,
q_len: int,
kv_len: int,
more_keys_than_queries_per_block: bool,
) -> Tuple[Sequence[int], Sequence[int]]:
if more_keys_than_queries_per_block:
assert kv_len >= q_len
q_len *= bs
kv_len *= bs
seqlens_q: List[int] = []
seqlens_k: List[int] = []
step_q = [max(1, q_len // 10), max(2, q_len // 2)]
step_k = [max(1, kv_len // 10), max(2, kv_len // 2)]
while sum(seqlens_q) < q_len and sum(seqlens_k) < kv_len:
num_queries = r.randrange(*step_q)
seqlens_q.append(num_queries)
if more_keys_than_queries_per_block:
# Must select at least `num_queries` keys
# But also leave enough keys for later
keys_left = kv_len - sum(seqlens_k, 0)
queries_left = q_len - sum(seqlens_q[:-1], 0)
assert keys_left >= queries_left
seqlens_k.append(num_queries + r.randrange(0, keys_left - queries_left))
else:
seqlens_k.append(r.randrange(*step_k))
seqlens_q[-1] = q_len - sum(seqlens_q[:-1])
seqlens_k[-1] = kv_len - sum(seqlens_k[:-1])
return seqlens_q, seqlens_k
def _rand_seqlens_padded_k(
r: random.Random, bs: int, q_len: int, kv_len: int
) -> Tuple[Sequence[int], Sequence[int]]:
# we need qk_seqlens to be of len bsz. k_seqlens must be <= kv_len
# no constraints on q_seqlens, but they must still sum to total_len
k_seqlens = [r.randint(1, kv_len - 1) for _ in range(bs)]
q_len *= bs
q_idx = {0, q_len}
while len(q_idx) < bs + 1:
q_idx.add(r.randint(1, q_len - 1))
s = sorted(q_idx)
q_seqlens = [e - b for b, e in zip(s[:-1], s[1:])]
return q_seqlens, k_seqlens
def _create_aligned_bias(B: int, H: int, Mq: int, Mkv: int, **kwargs) -> torch.Tensor:
align_to = 8
return (
torch.randn(
(
B,
H,
Mq,
align_to * ((Mkv + align_to - 1) // align_to),
),
**kwargs,
)
* 3
)[:, :, :, :Mkv]
def create_attn_bias(
bias_type,
batch_size: int,
num_heads: int,
q_len: int,
kv_len: int,
device,
dtype,
requires_grad: bool,
fmt: str,
op: Type[AttentionOpBase],
):
if bias_type is None or isinstance(None, bias_type):
return None
r = random.Random("-".join(map(str, [batch_size, q_len, kv_len, dtype, fmt])))
if bias_type is torch.Tensor:
if fmt == "BMK":
batch_size *= num_heads
num_heads = 1
# `small_k` only supports an expanded 1d bias
if op in [fmha.small_k.FwOp, fmha.small_k.BwOp]:
attn_bias = (
torch.randn(
(batch_size, num_heads, 1, kv_len), device=device, dtype=dtype
)
* 3
)
attn_bias = attn_bias.expand(batch_size, num_heads, q_len, kv_len)
else:
attn_bias = _create_aligned_bias(
batch_size,
num_heads,
q_len,
kv_len,
device=device,
dtype=dtype,
)
# make sure it also works if the first columns are partially masked out
attn_bias[0, 0, q_len - 1 :, : num_heads - 2] = -math.inf
if requires_grad:
attn_bias.requires_grad_(True)
return attn_bias
if bias_type is fmha.attn_bias.LowerTriangularMask:
return fmha.attn_bias.LowerTriangularMask()
if bias_type is fmha.attn_bias.LowerTriangularMaskWithTensorBias:
attn_bias = _create_aligned_bias(
batch_size,
num_heads,
q_len,
kv_len,
device=device,
dtype=dtype,
)
if requires_grad:
attn_bias.requires_grad_(True)
return fmha.attn_bias.LowerTriangularMaskWithTensorBias(attn_bias)
if bias_type in [
fmha.attn_bias.BlockDiagonalMask,
fmha.attn_bias.BlockDiagonalCausalMask,
fmha.attn_bias.BlockDiagonalCausalFromBottomRightMask,
]:
# This bias is not supported in BMK format
assert fmt == "BMHK"
block_diag = fmha.attn_bias.BlockDiagonalMask.from_seqlens(
*_rand_seqlens(
r,
batch_size,
q_len,
kv_len,
more_keys_than_queries_per_block=bias_type
is fmha.attn_bias.BlockDiagonalCausalFromBottomRightMask,
)
)
if bias_type is fmha.attn_bias.BlockDiagonalCausalMask:
block_diag = block_diag.make_causal()
if bias_type is fmha.attn_bias.BlockDiagonalCausalFromBottomRightMask:
block_diag = block_diag.make_causal_from_bottomright()
return block_diag
if bias_type == fmha.attn_bias.BlockDiagonalCausalWithOffsetPaddedKeysMask:
assert fmt == "BMHK"
q, k = _rand_seqlens_padded_k(r, batch_size, q_len, kv_len)
g_block_diag = (
fmha.attn_bias.BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
q_seqlen=q,
kv_padding=kv_len,
kv_seqlen=k,
causal_diagonal=torch.tensor(
[r.randint(0, kk) for kk in k], dtype=torch.int32
),
)
)
return g_block_diag
assert False, f"Unsupported bias type: {bias_type}"
def get_bias_grad(attn_bias, clear: bool = False) -> Optional[torch.Tensor]:
tensor_with_grad: Optional[torch.Tensor] = None
if isinstance(attn_bias, torch.Tensor):
tensor_with_grad = attn_bias
if isinstance(attn_bias, fmha.attn_bias.LowerTriangularMaskWithTensorBias):
tensor_with_grad = attn_bias._bias
if tensor_with_grad is not None:
grad = tensor_with_grad.grad
if clear:
tensor_with_grad.grad = None
return grad
return None
def create_tensors(
op: Type[AttentionOpBase],
device,
dtype,
attn_bias_type,
B,
q_len,
kv_len,
h,
k,
kv,
*,
attn_bias_requires_grad: bool = False,
fmt: str = "BMK",
):
torch.manual_seed(B * q_len + kv_len * k + kv)
scale = 3
if fmt == "BMK":
query = torch.randn((B * h, q_len, k), device=device, dtype=dtype).mul_(scale)
key = torch.randn((B * h, kv_len, k), device=device, dtype=dtype).mul_(scale)
value = torch.randn((B * h, kv_len, kv), device=device, dtype=dtype).mul_(scale)
else:
assert fmt == "BMHK"
query = torch.randn((B, q_len, h, k), device=device, dtype=dtype).mul_(scale)
key = torch.randn((B, kv_len, h, k), device=device, dtype=dtype).mul_(scale)
value = torch.randn((B, kv_len, h, kv), device=device, dtype=dtype).mul_(scale)
if fmt == "BMK" and not fmha.common._is_bias_type_supported_in_BMK(attn_bias_type):
attn_bias_type = None
attn_bias = None
if attn_bias_type is not None:
attn_bias = create_attn_bias(
attn_bias_type,
batch_size=B,
num_heads=h,
q_len=q_len,
kv_len=kv_len,
dtype=dtype,
device=device,
requires_grad=attn_bias_requires_grad,
fmt=fmt,
op=op,
)
if isinstance(
attn_bias,
(
fmha.attn_bias.BlockDiagonalMask,
fmha.attn_bias.BlockDiagonalCausalWithOffsetPaddedKeysMask,
),
):
query, key, value = [
x.reshape([1, -1, *x.shape[2:]]) for x in [query, key, value]
]
inputs = fmha.Inputs(query=query, key=key, value=value, attn_bias=attn_bias)
reasons = op.not_supported_reasons(inputs)
if reasons:
err_msg = f"{op.NAME}: unsupported ({'/'.join(reasons)})"
# Ensure we free memory to avoid OOMs
del query, key, value, attn_bias, inputs
pytest.skip(err_msg)
return query, key, value, attn_bias
def bmhk2bmk(tensor) -> torch.Tensor:
return (
tensor.permute((0, 2, 1, 3))
.contiguous()
.view([tensor.shape[0] * tensor.shape[2], tensor.shape[1], tensor.shape[3]])
)
def bmk2bmhk(tensor, num_heads: int) -> torch.Tensor:
return tensor.reshape([-1, num_heads, tensor.shape[1], tensor.shape[2]]).permute(
(0, 2, 1, 3)
)
@pytest.mark.parametrize("fmt", ["BMK", "BMHK"])
@pytest.mark.parametrize("packed", [False, True])
@parametrize_opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
def test_forward(
opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv,
packed,
fmt,
):
(
op,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
) = opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
if packed and not (k == kv and q_len == kv_len):
pytest.skip(
f"packed incompatible with `k ({k}) != kv ({kv})` or `q_len ({q_len}) != kv_len ({kv_len})`"
)
if fmt == "BMK" and not fmha.common._is_bias_type_supported_in_BMK(bias_type):
pytest.skip("BMK incompatible with this bias")
query, key, value, attn_bias = create_tensors(
*opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv, fmt="BMHK" if packed else fmt
)
if packed:
c = torch.stack([query, key, value], 2)
if fmt == "BMK":
# bm3hk -> 3bhmk -> 3Bmk
c = c.permute(2, 0, 3, 1, 4).view([3, -1, q_len, k])
query, key, value = c[0], c[1], c[2]
# Re-create bias in the right format
attn_bias = create_attn_bias(
bias_type=bias_type,
batch_size=batch_size,
num_heads=h,
q_len=q_len,
kv_len=kv_len,
device=device,
dtype=dtype,
requires_grad=False,
fmt=fmt,
op=op,
)
else:
# bm3hk -> 3 x bmhk
query, key, value = xformers.ops.unbind(c, 2)
assert not query.is_contiguous()
out = xformers.ops.memory_efficient_attention_forward(
query, key, value, attn_bias, op=op
)
assert not out.isnan().any(), "Output has NaNs"
out2 = xformers.ops.memory_efficient_attention_forward(
query, key, value, attn_bias, op=op
)
assert torch.allclose(out, out2, atol=0.0, rtol=0.0), "Non-deterministic behavior"
ref = ref_attention(query, key, value, attn_bias)
assert out.shape == ref.shape, out.shape
assert_allclose(
out.float(),
ref,
atol=op.ERROR_ATOL[dtype],
rtol=op.ERROR_RTOL.get(dtype, 1e-5),
)
@pytest.mark.parametrize("k_len", [5, 6, 32])
@pytest.mark.parametrize("batch_size", [1, 4])
@pytest.mark.parametrize("kv_len", [128, 512])
@pytest.mark.parametrize("q_len", [128, 512])
@pytest.mark.parametrize("device", _devices)
def test_key_query_all_ones(device, q_len, kv_len, batch_size, k_len):
scale = 3
query = torch.ones((batch_size, q_len, k_len), device=device)
key = torch.ones((batch_size, kv_len, k_len), device=device)
value = torch.randn((batch_size, kv_len, k_len), device=device) * scale
out = xformers.ops.memory_efficient_attention(query, key, value)
# this should be equivalent to the average over value
ref = value.mean(1, keepdim=True).expand_as(query)
assert_allclose(out, ref, atol=1e-5)
def _block_diag_reshape_lse(
lse: torch.Tensor, q_seqinfo: fmha.attn_bias._SeqLenInfo
) -> torch.Tensor:
"""LSE can be padded, let's remove the padding"""
parts = []
for slice, (start, end) in zip(lse.unbind(0), q_seqinfo.intervals()):
parts.append(slice[:, : end - start])
return torch.cat(parts, dim=1).unsqueeze(1)
@parametrize_opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
def test_logsumexp(opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv):
(
op,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
) = opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
query, key, value, attn_bias = create_tensors(
*opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv, fmt="BMK"
)
_out, lse = xformers.ops.memory_efficient_attention_forward_requires_grad(
query,
key,
value,
op=op,
attn_bias=attn_bias,
)
attn = (query.float() / k**0.5) @ key.float().transpose(-2, -1)
if attn_bias is not None:
if isinstance(attn_bias, xformers.ops.AttentionBias):
tensor_bias = attn_bias.materialize(
(query.shape[0], 1, query.shape[1], key.shape[1]),
device=query.device,
dtype=torch.float32,
)
else:
assert isinstance(attn_bias, torch.Tensor)
tensor_bias = attn_bias
if tensor_bias.ndim == 4:
tensor_bias = tensor_bias.reshape([-1, *tensor_bias.shape[2:]])
attn = attn + tensor_bias.float()
ref_lse = attn.logsumexp(-1)
if isinstance(attn_bias, fmha.attn_bias.BlockDiagonalMask):
lse = _block_diag_reshape_lse(lse, attn_bias.q_seqinfo)
assert_allclose(lse[:, 0, : ref_lse.shape[1]], ref_lse, atol=2e-4)
@pytest.mark.parametrize("fmt", ["BMK", "BMHK"])
@pytest.mark.parametrize("grad_out_contiguous", [False, True])
@parametrize_opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
def test_backward(
opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv,
grad_out_contiguous,
fmt,
):
(
op_bw,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
) = opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
attn_bias_requires_grad = (
random.Random(q_len + kv_len * batch_size).randint(0, 1) > 0
)
query, key, value, attn_bias = create_tensors(
*opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv,
attn_bias_requires_grad=attn_bias_requires_grad,
fmt=fmt,
)
op_fw = (
sample_random_supported_fw(
fmha.Inputs(query=query, key=key, value=value, attn_bias=attn_bias),
seed=q_len * kv + kv_len * k,
)
if op_bw != fmha.cutlass.BwOp
else fmha.cutlass.FwOp
)
qkv = None
if (
fmt == "BMHK"
and query.shape[3] == value.shape[3]
and query.shape[1] == value.shape[1]
):
qkv = torch.stack([query, key, value], 2)
qkv.requires_grad_(True)
# bm3hk -> 3 x bmhk
query, key, value = xformers.ops.unbind(qkv, 2)
assert not query.is_contiguous()
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
if not op_bw.supports(fmha.Inputs(query, key, value, attn_bias)):
pytest.skip("inputs not supported")
out = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias, op=(op_fw, op_bw)
)
grad_out = torch.ones_like(out)
if grad_out_contiguous is False:
grad_out = torch.tensor([1.0], dtype=query.dtype, device=device)[
None, None, :
].expand_as(out)
out.backward(grad_out)
if qkv is None and op_bw == fmha.cutlass.BwOp:
assert query.stride() == query.grad.stride()
grads = []
if qkv is None:
grads = [query.grad, key.grad, value.grad]
query.grad = None
key.grad = None
value.grad = None
else:
grads = [qkv.grad]
qkv.grad = None
if attn_bias_requires_grad:
attn_bias_grad = get_bias_grad(attn_bias, clear=True)
if attn_bias_grad is not None:
grads.append(attn_bias_grad)
ref = ref_attention(query, key, value, attn_bias)
ref.backward(grad_out)
assert_allclose(
out.float(),
ref.float(),
"fw pass",
atol=op_fw.ERROR_ATOL[dtype],
rtol=op_fw.ERROR_RTOL.get(dtype, 1e-5),
)
del out
del grad_out
del ref
atol = op_bw.ERROR_ATOL[dtype]
rtol = op_bw.ERROR_RTOL[dtype]
grads_ref = []
grads_name = []
if qkv is None:
assert isinstance(query.grad, torch.Tensor)
assert isinstance(key.grad, torch.Tensor)
assert isinstance(value.grad, torch.Tensor)
grads_ref = [query.grad, key.grad, value.grad]
grads_name = ["query", "key", "value"]
else:
assert isinstance(qkv.grad, torch.Tensor)
grads_ref = [qkv.grad]
grads_name = ["qkv"]
if attn_bias_requires_grad:
attn_bias_grad = get_bias_grad(attn_bias)
if attn_bias_grad is not None:
grads_ref.append(attn_bias.grad)
grads_name.append("bias")
del query
del key
del value
del qkv
assert len(grads_ref) == len(
grads
), "Wrong number of gradients (maybe bias grad didn't backprop?)"
for name, calc_grad, ref_grad in zip(grads_name, grads, grads_ref):
assert_allclose(
calc_grad,
ref_grad,
msg=f"{op_fw.NAME}+{op_bw.NAME}:{name}",
atol=atol,
rtol=rtol,
)
def _vec_binom_test(x, n, p):
"""
vectorized implementation of scipy.stats.binom_test
this makes our tests much faster
reference: https://github.com/scipy/scipy/blob/v1.8.0/scipy/stats/_morestats.py#L2609-L2702
"""
import numpy as np
from scipy.stats import distributions
x = np.atleast_1d(x)
d = distributions.binom.pmf(x, n, p)[:, None]
rerr = 1 + 1e-7
# x < p * n case
i = np.arange(np.ceil(p * n), n + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d * rerr, axis=1)
pval1 = distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)
# other case
i = np.arange(np.floor(p * n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d * rerr, axis=1)
pval2 = distributions.binom.cdf(y - 1, n, p) + distributions.binom.sf(x - 1, n, p)
pval = np.where(x < p * n, pval1, pval2)
pval = np.minimum(1.0, pval)
return pval
def _get_drop_mask(op, batch_size, q_len, kv_len, p, device):
if op == fmha.cutlass.FwOp:
mask = torch.empty((batch_size, 1, q_len, kv_len), device=device)
rand_uniform = torch.ops.xformers._cutlass_rand_uniform(p, mask)
mask = (rand_uniform > p).to(torch.float32)
mask = mask.reshape(batch_size, q_len, kv_len)
else:
mask = torch.empty((batch_size, q_len, kv_len), device=device)
mask = torch.ops.xformers._temp_dropout(mask, p)
return mask
@cuda_only
@pytest.mark.parametrize("seed", [42, 124])
@pytest.mark.parametrize("p", [0.3, 0.7])
@pytest.mark.parametrize("k_len", [32])
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("kv_len", [3, 15, 32, 33])
@pytest.mark.parametrize("q_len", [2, 33])
@pytest.mark.parametrize("op", ALL_FW_OPS, ids=list(map(lambda t: t.NAME, ALL_FW_OPS)))
def test_dropout(op, q_len, kv_len, batch_size, k_len, p, seed):
device = "cuda"
scale = 3
query = torch.randn((batch_size, q_len, k_len), device=device) * scale
key = torch.randn((batch_size, kv_len, k_len), device=device) * scale
value = torch.randn((batch_size, kv_len, k_len), device=device) * scale
attn_bias = None
inputs_for_support_check = fmha.Inputs(query, key, value, attn_bias, p, None)
if not op.supports(inputs_for_support_check):
del query, key, value, attn_bias
pytest.skip(f"{op.NAME}: unsupported input")
torch.manual_seed(seed)
out = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias, p, op=(op, None)
)
torch.manual_seed(seed)
out2 = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias, p, op=(op, None)
)
assert_allclose(out, out2, "dropout reproducibility")
torch.manual_seed(seed)
mask = _get_drop_mask(op, batch_size, q_len, kv_len, p, device)
ref = ref_attention(query, key, value, attn_bias, mask, p)
assert_allclose(out, ref, atol=2e-4), f"{(out - ref).abs().max()}"
num_trials = 1000
p_val_tol = 1e-6
keep_prob = 1 - p
masks = []
for i in range(num_trials):
mask = _get_drop_mask(op, batch_size, q_len, kv_len, p, device)
masks.append(mask.clone().cpu())
masks = torch.stack(masks, dim=0)
p_value = binom_test(masks.sum(), masks.numel(), p=keep_prob)
assert p_value > p_val_tol, p_value
masks = masks.sum(0).flatten()
p_values = _vec_binom_test(masks, num_trials, p=keep_prob)
assert all(p_values > p_val_tol)
def _test_dropout_backward(q_len, kv_len, batch_size, k, p, op, dtype):
if dtype is torch.bfloat16 and compute_capability < (8, 0):
pytest.skip("bf16 requires Sm80")
if not op.is_available():
pytest.skip()
scale = 3
device = "cuda"
query = torch.randn((batch_size, q_len, k), device=device, dtype=dtype) * scale
key = torch.randn((batch_size, kv_len, k), device=device, dtype=dtype) * scale
value = torch.randn((batch_size, kv_len, k), device=device, dtype=dtype) * scale
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
grad_out = torch.ones_like(query)
assert op.supports(fmha.Inputs(query=query, key=key, value=value, p=p))
seed = 42
torch.manual_seed(seed)
out = xformers.ops.memory_efficient_attention(query, key, value, p=p, op=(op, None))
out.backward(grad_out)
grad_q = query.grad
grad_k = key.grad
grad_v = value.grad
query.grad = None
key.grad = None
value.grad = None
torch.manual_seed(seed)
mask = _get_drop_mask(op, batch_size, q_len, kv_len, p, device)
ref = ref_attention(query, key, value, None, mask, p)
ref.backward(grad_out)
atol, rtol = (
fmha.AttentionBwOpBase.ERROR_ATOL[dtype],
fmha.AttentionBwOpBase.ERROR_RTOL[dtype],
)
assert_allclose(
grad_v,
value.grad,
"grad_v",
atol=atol,
rtol=rtol,
)
# TODO: Investigate why precision is worse
if dtype in [torch.float16, torch.bfloat16]:
atol = atol * 2 + 0.15
rtol = rtol * 2
assert_allclose(
grad_q,
query.grad,
"grad_q",
atol=atol,
rtol=rtol,
)
assert_allclose(
grad_k,
key.grad,
"grad_k",
atol=atol,
rtol=rtol,
)
@cuda_only
@pytest.mark.parametrize("p", [0.3, 0.7])
@pytest.mark.parametrize("k", [5, 6, 32])
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("kv_len", [3, 15, 32, 33])
@pytest.mark.parametrize("q_len", [2, 33])
def test_dropout_backward_small_k(q_len, kv_len, batch_size, k, p):
_test_dropout_backward(
q_len, kv_len, batch_size, k, p, op=fmha.small_k.FwOp, dtype=torch.float32
)
@cuda_only
@pytest.mark.parametrize("p", [0.000001, 0.3, 0.7])
@pytest.mark.parametrize("k", [16, 128, 256])
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("kv_len", [3, 248, 256])
@pytest.mark.parametrize("q_len", [3, 248, 256])
@pytest.mark.parametrize("dt", ["f16", "bf16", "f32"])
def test_dropout_backward_cutlass(dt, q_len, kv_len, batch_size, k, p):
_test_dropout_backward(
q_len,
kv_len,
batch_size,
k,
p,
op=fmha.cutlass.FwOp,
dtype={"f16": torch.float16, "bf16": torch.bfloat16, "f32": torch.float32}[dt],
)
@pytest.mark.parametrize("k_len", [32])
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize("kv_len", [3 * 32])
@pytest.mark.parametrize("q_len", [3 * 32])
@pytest.mark.parametrize("device", _devices)
def test_memory_efficient_attention_full_block_masked(
device, q_len, kv_len, batch_size, k_len
):
op_fw = fmha.small_k.FwOp
op_bw = fmha.small_k.BwOp
scale = 3
query = torch.randn((batch_size, q_len, k_len), device=device) * scale
key = torch.randn((batch_size, kv_len, k_len), device=device) * scale
value = torch.randn((batch_size, kv_len, k_len), device=device) * scale
# in this case, most of the blocks in a row get masked
attn_bias = torch.full((3, 32), float("-inf"), device=device)
attn_bias[:2, :4] = 0
attn_bias = attn_bias.flatten()[None, None, :].expand(1, q_len, -1)
out = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias, op=(op_fw, op_bw)
)
ref = ref_attention(query, key, value, attn_bias)
assert_allclose(
out, ref, atol=op_fw.ERROR_ATOL[query.dtype], rtol=op_fw.ERROR_RTOL[query.dtype]
)
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
grad_out = torch.ones_like(query)
out = xformers.ops.memory_efficient_attention(query, key, value, attn_bias)
out.backward(grad_out)
grad_q = query.grad
grad_k = key.grad
grad_v = value.grad
query.grad = None
key.grad = None
value.grad = None
ref = ref_attention(query, key, value, attn_bias)
ref.backward(grad_out)
atol = op_bw.ERROR_ATOL[query.dtype]
rtol = op_bw.ERROR_RTOL[query.dtype]
assert_allclose(grad_q, query.grad, "grad_q", atol=atol, rtol=rtol)
assert_allclose(grad_k, key.grad, "grad_k", atol=atol, rtol=rtol)
assert_allclose(grad_v, value.grad, "grad_v", atol=atol, rtol=rtol)
@pytest.mark.parametrize("fmt", ["BMK", "BMHK"])
@parametrize_opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv__xs
def test_lowlevel_api_shapes(opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv, fmt):
query, key, value, attn_bias = create_tensors(
*opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv, fmt=fmt
)
grad_out = torch.ones_like(query)
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
out, lse = xformers.ops.memory_efficient_attention_forward_requires_grad(
query, key, value, attn_bias
)
assert out.ndim == query.ndim
dq, dk, dv = xformers.ops.memory_efficient_attention_backward(
grad_out, out, lse, query, key, value, attn_bias
)
assert dq.shape == query.shape
assert dk.shape == key.shape
assert dv.shape == value.shape
@parametrize_opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv__xs
def test_cuda_streams(
opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv,
):
(
op,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
) = opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
if device != "cuda":
pytest.skip("Not CUDA")
bias_type = None
opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv = [
op,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
]
s_hipri = torch.cuda.Stream(priority=-1)
s_lopri = torch.cuda.Stream(priority=0)
query, key, value, attn_bias = create_tensors(
*opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv, fmt="BMHK"
)
torch.cuda.synchronize()
with torch.cuda.stream(s_lopri):
torch.cuda._sleep(100_000_000) # wait 100m cycles
query *= 2
s_hipri.wait_stream(s_lopri)
with torch.cuda.stream(s_hipri):
# If the kernel is scheduled in the main stream
# `query * 2` has not been executed yet
out = xformers.ops.memory_efficient_attention(query, key, value, op=(op, None))
# Test that `s_lopri` is still sleeping
# and that `query *= 2` has not been executed yet
query2_main_stream = query * 2
torch.cuda.synchronize()
assert torch.allclose(query2_main_stream, query), "Need to increase sleep time"
ref = ref_attention(query, key, value)
assert out.shape == ref.shape, out.shape
assert_allclose(
out.float(),
ref.float(),
atol=op.ERROR_ATOL[dtype],
rtol=op.ERROR_RTOL.get(dtype, 1e-5),
)
@parametrize_opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv__xs
def test_custom_scale(opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv):
p = 0.0
scale = 1.0
(
op_bw,
device,
dtype,
_,
_,
q_len,
kv_len,
_,
k,
_,
) = opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
torch.manual_seed(q_len + kv_len + k)
if device != "cuda":
pytest.skip("Not CUDA")
query, key, value, attn_bias = create_tensors(
*opBW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv, fmt="BMK"
)
inputs = fmha.Inputs(
query=query, key=key, value=value, attn_bias=attn_bias, scale=scale
)
op_fw = sample_random_supported_fw(inputs, seed=q_len * k + kv_len * k)
grad_out = torch.ones_like(query)
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
reasons = op_fw.not_supported_reasons(inputs)
if reasons:
pytest.skip(f"{op_fw.NAME}: unsupported ({'/'.join(reasons)})")
reasons = op_bw.not_supported_reasons(inputs)
if reasons:
pytest.skip(f"{op_bw.NAME}: unsupported ({'/'.join(reasons)})")
# NOTE: we still need to scale the inputs to not blowup
# the pre-softmax values (numerical stability)
s = k**-0.5
out = xformers.ops.memory_efficient_attention(
query * s, key, value, attn_bias, p, scale, op=(op_fw, op_bw)
)
out.backward(grad_out)
grad_q, grad_k, grad_v = query.grad, key.grad, value.grad
query.grad = key.grad = value.grad = None
ref = ref_attention(query * s, key, value, attn_bias, None, p, scale)
ref.backward(grad_out)
ref_grad_q, ref_grad_k, ref_grad_v = query.grad, key.grad, value.grad
query.grad = key.grad = value.grad = None
atol = op_fw.ERROR_ATOL[dtype]
rtol = op_fw.ERROR_RTOL[dtype]
assert_allclose(out.float(), ref.float(), "out", atol=atol, rtol=rtol)
atol = op_bw.ERROR_ATOL[dtype]
rtol = op_bw.ERROR_RTOL[dtype]
assert_allclose(grad_q, ref_grad_q, "grad_q", atol=atol, rtol=rtol)
assert_allclose(grad_k, ref_grad_k, "grad_k", atol=atol, rtol=rtol)
assert_allclose(grad_v, ref_grad_v, "grad_v", atol=atol, rtol=rtol)
def apply_attention(query, key, value, attn_bias, op_fw, proj):
x = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias=attn_bias, op=(op_fw, None)
)
x = proj(x)
return x
@pytest.mark.parametrize("use_reentrant", [False, True])
@parametrize_opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv__xs
def test_grad_checkpointing(
opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv,
use_reentrant,
):
fmt = "BMHK"
(
op,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
) = opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv
bias_type = None
opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv = (
op,
device,
dtype,
bias_type,
batch_size,
q_len,
kv_len,
h,
k,
kv,
)
query, key, value, attn_bias = create_tensors(
*opFW_device_dtype_biasT_B_Mq_Mkv_H_K_Kv,
fmt=fmt,
)
qkv = None
if (
fmt == "BMHK"
and query.shape[3] == value.shape[3]
and query.shape[1] == value.shape[1]
):
qkv = torch.stack([query, key, value], 2)
qkv.requires_grad_(True)
# bm3hk -> 3 x bmhk
query, key, value = xformers.ops.unbind(qkv, 2)
assert not query.is_contiguous()
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
proj = torch.nn.Linear(kv, k, device=device, dtype=dtype)
x = query
for _ in range(5):
x = checkpoint(
apply_attention,
x,
key,
value,
attn_bias,
op,
proj,
use_reentrant=use_reentrant,
)
x.mean().backward()
ALL_FW_OPS_NO_SMALLK = [op for op in ALL_FW_OPS if op is not fmha.small_k.FwOp]
@pytest.mark.parametrize(
"op", ALL_FW_OPS_NO_SMALLK, ids=[op.NAME for op in ALL_FW_OPS_NO_SMALLK]
)
def test_unsupported_cpu(op: Type[fmha.AttentionFwOpBase]):
q = torch.empty([1, 1, 1, 32])
with pytest.raises(ValueError):
fmha.memory_efficient_attention(q, q, q, op=(op, None))
@cuda_only
@pytest.mark.parametrize(
"op", ALL_FW_OPS_NO_SMALLK, ids=[op.NAME for op in ALL_FW_OPS_NO_SMALLK]
)
def test_unsupported_stride_lastdim(op: Type[fmha.AttentionFwOpBase]):
q = torch.empty([1, 1, 32, 4], device="cuda", dtype=torch.float16).permute(
0, 1, 3, 2
)
try:
fmha.memory_efficient_attention(q, q, q, op=(op, None))
except ValueError as e:
if "Only work on pre-MLIR triton for now" in str(e):
pytest.skip("Only work on pre-MLIR triton for now")
q = q.contiguous()
fmha.memory_efficient_attention(q, q, q, op=(op, None))
@cuda_only
@pytest.mark.parametrize(
"op", ALL_FW_OPS_NO_SMALLK, ids=[op.NAME for op in ALL_FW_OPS_NO_SMALLK]
)
def test_unsupported_stride_alignment(op: Type[fmha.AttentionFwOpBase]):
q = torch.empty([1, 2, 2, 33], device="cuda", dtype=torch.float16)[:, :, :, :32]
try:
fmha.memory_efficient_attention(q, q, q, op=(op, None))
except ValueError as e:
if "Only work on pre-MLIR triton for now" in str(e):
pytest.skip("Only work on pre-MLIR triton for now")
q = q.contiguous()
fmha.memory_efficient_attention(q, q, q, op=(op, None))
@sm75_or_better_only
def test_unsupported_dropout_combine_flash_cutlass() -> None:
q = torch.empty(
[1, 4, 1, 16], device="cuda", dtype=torch.float16, requires_grad=True
)
with pytest.raises(ValueError):
out = fmha.memory_efficient_attention(
q, q, q, p=0.1, op=(fmha.cutlass.FwOp, fmha.flash.BwOp)
)
out.backward(out)
with pytest.raises(ValueError):
out = fmha.memory_efficient_attention(
q, q, q, p=0.1, op=(fmha.flash.FwOp, fmha.cutlass.BwOp)
)
out.backward(out)
def test_attn_bias_causal() -> None:
m = -math.inf
causal_mask = torch.tensor([[0, m], [0, 0], [0, 0]])
tensor_bias = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
attn_bias = fmha.attn_bias.LowerTriangularMask()
assert_allclose(attn_bias.materialize(causal_mask.shape), causal_mask, "causal")
attn_bias = attn_bias.add_bias(tensor_bias)
assert_allclose(
attn_bias.materialize(causal_mask.shape),
tensor_bias + causal_mask,
"causal+tensor_bias",
)
def test_attn_bias_torch_tensor() -> None:
tensor_bias = torch.tensor([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0]])
attn_bias = fmha.attn_bias.LowerTriangularMaskWithTensorBias(tensor_bias)
m = -math.inf
causal_bias = torch.tensor([[0, m, m], [0, 0, m]])
assert_allclose(
attn_bias.materialize((2, 3)), causal_bias + tensor_bias, "tensor_bias+causal"
)
def test_attn_bias_blockdiag() -> None:
queries = [
torch.randn([1, 3, 1, 8]),
torch.randn([1, 2, 1, 8]),
torch.randn([1, 5, 1, 8]),
]
attn_bias, q = fmha.BlockDiagonalMask.from_tensor_list(queries)
# Verify mask
as_tensor = attn_bias.materialize((10, 10))
assert int((as_tensor != -math.inf).sum().item()) == 3 * 3 + 2 * 2 + 5 * 5
assert_allclose(as_tensor[0:3, 0:3], torch.zeros([3, 3]), "batch0")
assert_allclose(as_tensor[3:5, 3:5], torch.zeros([2, 2]), "batch1")
assert_allclose(as_tensor[5:, 5:], torch.zeros([5, 5]), "batch2")
# Verify we can split it back
queries2 = attn_bias.split(q)
assert len(queries) == len(queries2)
for q1, q2 in zip(queries, queries2):
assert_allclose(q1, q2)
def test_attn_bias_blockdiag_batched() -> None:
queries = [
torch.randn([1, 3, 1, 8]),
torch.randn([3, 2, 1, 8]),
torch.randn([1, 5, 1, 8]),
]
attn_bias, q = fmha.BlockDiagonalMask.from_tensor_list(queries)
# Verify mask
as_tensor = attn_bias.materialize((14, 14))
assert int((as_tensor != -math.inf).sum().item()) == 3 * 3 + 3 * 2 * 2 + 5 * 5
assert_allclose(as_tensor[0:3, 0:3], torch.zeros([3, 3]), "batch0")
assert_allclose(as_tensor[3:5, 3:5], torch.zeros([2, 2]), "batch1.0")
assert_allclose(as_tensor[5:7, 5:7], torch.zeros([2, 2]), "batch1.1")
assert_allclose(as_tensor[7:9, 7:9], torch.zeros([2, 2]), "batch1.2")
assert_allclose(as_tensor[9:, 9:], torch.zeros([5, 5]), "batch2")
# Verify we can split it back
queries2 = attn_bias.split(q)
assert len(queries) == len(queries2)
for q1, q2 in zip(queries, queries2):
assert_allclose(q1, q2)
def test_attn_bias_blockdiag_crossattn_causal() -> None:
# Q / KV have different seqlen
list_q = [
torch.randn([1, 3, 1, 8]),
torch.randn([2, 1, 1, 8]),
]
list_k = [
torch.randn([1, 2, 1, 8]),
torch.randn([2, 3, 1, 8]),
]
attn_bias, q, k, _ = fmha.attn_bias.BlockDiagonalMask.from_tensor_lists_qkv(
list_q, list_k
)
# Verify mask
as_tensor = attn_bias.materialize((q.shape[1], k.shape[1]))
assert int((as_tensor != -math.inf).sum().item()) == 3 * 2 + 2 * 3 * 1
assert_allclose(as_tensor[0:3, 0:2], torch.zeros([3, 2]), "batch0")
assert_allclose(as_tensor[3:4, 2:5], torch.zeros([1, 3]), "batch1.0")
assert_allclose(as_tensor[4:, 5:], torch.zeros([1, 3]), "batch1.1")
# Also test causal version
as_tensor = attn_bias.make_causal().materialize((q.shape[1], k.shape[1]))
assert_allclose(
as_tensor[3:4, 2:5],
fmha.attn_bias.LowerTriangularMask().materialize((1, 3)),
"batch1.0[causal]",
)
# Verify we can split it back
list_q2 = attn_bias.split_queries(q)
assert len(list_q) == len(list_q2)
for q1, q2 in zip(list_q, list_q2):
assert_allclose(q1, q2)
with pytest.raises(ValueError):
attn_bias.split_queries(k)
list_k2 = attn_bias.split_kv(k)
assert len(list_k) == len(list_k2)
for k1, k2 in zip(list_k, list_k2):
assert_allclose(k1, k2)
def test_attn_bias_blockdiag_crossattn_causal_with_prefix_qk_cond() -> None:
list_q = [
torch.randn([1, 3, 1, 8]),
]
list_k = [
torch.randn([1, 2, 1, 8]),
]
attn_bias, q, k, _ = fmha.attn_bias.BlockDiagonalMask.from_tensor_lists_qkv(
list_q, list_k
)
with pytest.raises(ValueError):
attn_bias.make_causal_from_bottomright()
def test_attn_bias_blockdiag_crossattn_causal_with_prefix() -> None:
# Q / KV have different seqlen
list_q = [
torch.randn([1, 2, 1, 8]),
torch.randn([2, 2, 1, 8]),
]
list_k = [
torch.randn([1, 2, 1, 8]),
torch.randn([2, 5, 1, 8]),
]
attn_bias, q, k, _ = fmha.attn_bias.BlockDiagonalMask.from_tensor_lists_qkv(
list_q, list_k
)
as_tensor = attn_bias.make_causal_from_bottomright().materialize(
(q.shape[1], k.shape[1])
)
m = -math.inf
assert_allclose(
as_tensor[0:2, 0:2],
torch.tensor([[0, m], [0, 0]], dtype=torch.float32),
"batch1.1[causal_with_prefix]",
)
assert_allclose(
as_tensor[2:4, 2:7],
torch.tensor([[0, 0, 0, 0, m], [0, 0, 0, 0, 0]], dtype=torch.float32),
"batch2.1[causal_with_prefix]",
)
assert_allclose(
as_tensor[4:6, 7:12],
torch.tensor([[0, 0, 0, 0, m], [0, 0, 0, 0, 0]], dtype=torch.float32),
"batch2.2[causal_with_prefix]",
)
@cuda_only
def test_attn_bias_padded() -> None:
bsize, n_heads, d, padding = 8, 3, 8, 32
# Q / KV have different seqlen
k = torch.randn((bsize, padding, n_heads, d)).cuda().half()
k_seqlen = [5, 8, 7, 1, 9, 3, 12, 32]
other = bsize - 1
v = torch.randn((bsize, padding, n_heads, d)).cuda().half()
n_q_first = 4
q = [
torch.randn((1, n_q_first, n_heads, d)).cuda().half(),
torch.randn((1, other, n_heads, d)).cuda().half(),
]
q_cat = torch.cat([x.view(1, -1, n_heads, d) for x in q], dim=1)
causal_diagonal = torch.tensor(
[0] + [i - 1 for i in k_seqlen[1:]], dtype=torch.int32
).cuda()
q_seqlen = [n_q_first] + [1] * other
attn_bias = fmha.attn_bias.BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
q_seqlen=q_seqlen,
kv_seqlen=k_seqlen,
causal_diagonal=causal_diagonal,
kv_padding=padding,
)
v = v.view(1, -1, n_heads, d)
k = k.view(1, -1, n_heads, d)
scores = (q_cat.transpose(1, 2) @ k.transpose(1, 2).transpose(2, 3)).float()
assert not scores.isnan().any()
mask = torch.full_like(scores, -float("inf"))
for i, (slen, spos, qlen) in enumerate(
zip(k_seqlen, causal_diagonal.tolist(), q_seqlen)
):
kseq_start = i * padding
qstart = sum(q_seqlen[:i])
mask[:, :, qstart : qstart + qlen, kseq_start : kseq_start + slen] = torch.triu(
mask[:, :, qstart : qstart + qlen, kseq_start : kseq_start + slen].float(),
diagonal=spos + 1,
).float()
scores += mask
assert not scores.isnan().any()
# 1,3,10,8 @ 1,3,8,256 -> 1,3,10,256
scores = torch.nn.functional.softmax(scores, -1).half()
# torch.Size([1, 3, 3, 32]) @ torch.Size([1, 3, 32, 8])
output = scores @ v.transpose(1, 2) # 1,3,10,256 @ 1,3,256, 8 -> 1,3,10,8
output = output.transpose(1, 2).contiguous()
fmha_output = fmha.memory_efficient_attention_forward(
q_cat, k, v, attn_bias, scale=1.0
)
# assert torch.allclose(output, fmha_output)
assert_allclose(
output,
fmha_output,
atol=fmha.cutlass.FwOp.ERROR_ATOL[torch.float16],
rtol=fmha.cutlass.FwOp.ERROR_RTOL[torch.float16],
)
def test_attn_bias_from_seqlens() -> None:
bias = fmha.attn_bias.BlockDiagonalMask.from_seqlens([3, 5, 1])
out = bias.split(torch.randn([1, 3 + 5 + 1, 16]))
assert len(out) == 3
assert tuple(out[0].shape) == (1, 3, 16)
@cuda_only
def test_attn_bias_blockdiag_doc() -> None:
"""IMPORTANT:
This is the example in the doc for `BlockDiagonalMask`.
If this example needs to be updated, please also update the doc
"""
import torch
from xformers.ops import fmha
K = 16
dtype = torch.float16
device = "cuda"
list_x = [
torch.randn([1, 3, 1, K], dtype=dtype, device=device),
torch.randn([1, 6, 1, K], dtype=dtype, device=device),
torch.randn([1, 2, 1, K], dtype=dtype, device=device),
]
attn_bias, x = fmha.BlockDiagonalMask.from_tensor_list(list_x)
linear = torch.nn.Linear(K, K * 3).to(device=device, dtype=dtype) # type: ignore
q, k, v = linear(x).reshape([1, -1, 1, 3, K]).unbind(-2)
out = fmha.memory_efficient_attention(q, k, v, attn_bias=attn_bias)
list_out = attn_bias.split(out)
print(list_out[0].shape) # [1, 3, 1, K]
assert tuple(list_out[0].shape) == (1, 3, 1, K)
@cuda_only
class TestAttnBias:
@staticmethod
def create_tensors(
dtype,
B: int = 2,
Mq: int = 32,
Mkv: int = 32,
H: int = 3,
K: int = 16,
Kv: int = 16,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
return (
torch.randn([B, Mq, H, K], device="cuda", dtype=dtype) * 3,
torch.randn([B, Mkv, H, K], device="cuda", dtype=dtype) * 3,
torch.randn([B, Mkv, H, Kv], device="cuda", dtype=dtype) * 3,
torch.randn([B, H, Mq, Mkv], device="cuda", dtype=dtype) * 3,
)
@staticmethod
def pad_bias(bias: torch.Tensor) -> torch.Tensor:
align_to = 16
if (bias.shape[-1] % align_to) == 0:
return bias
pad_count = align_to - (bias.shape[-1] % align_to)
return torch.nn.functional.pad(bias, [0, pad_count])[:, :, :, : bias.shape[-1]]
def test_f16_biasf32(self) -> None:
q, k, v, bias = self.create_tensors(torch.float16)
fmha.memory_efficient_attention(q, k, v, attn_bias=bias)
bias = bias.to(torch.float32)
with pytest.raises((ValueError, RuntimeError)):
fmha.memory_efficient_attention(q, k, v, attn_bias=bias)
def test_f32_biasf16(self) -> None:
q, k, v, bias = self.create_tensors(torch.float32)
fmha.memory_efficient_attention(q, k, v, attn_bias=bias)
bias = bias.to(torch.float16)
with pytest.raises((ValueError, RuntimeError)):
fmha.memory_efficient_attention(q, k, v, attn_bias=bias)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16])
def test_wrong_alignment(self, dtype) -> None:
op = fmha.cutlass.FwOp
q, k, v, bias = self.create_tensors(dtype, Mq=7, Mkv=5)
try:
fmha.memory_efficient_attention(q, k, v, attn_bias=bias, op=(op, None))
return
except (ValueError, RuntimeError):
pass
# This case is not supported, likely due to padding issues
# Let's make sure it works with padding
assert bias.ndim == 4, bias.shape
bias_padded = self.pad_bias(bias)
out = fmha.memory_efficient_attention(
q, k, v, attn_bias=bias_padded, op=(op, None)
).float()
ref_out = ref_attention_bmhk(q, k, v, bias)
assert_allclose(
out, ref_out, atol=op.ERROR_ATOL[dtype], rtol=op.ERROR_RTOL[dtype]
)
def test_permuted_attn_bias(self) -> None:
op = fmha.cutlass.FwOp
dtype = torch.float16
q, k, v, bias = self.create_tensors(dtype, Mq=7, Mkv=7)
bias = bias.transpose(-1, -2) # now `stride(-1) != 1`
# Either it works, or it raises an exception
# but we should never get a CUDA error
try:
out = fmha.memory_efficient_attention(
q, k, v, attn_bias=bias, op=(op, None)
).float()
ref_out = ref_attention_bmhk(q, k, v, bias)
assert_allclose(
out, ref_out, atol=op.ERROR_ATOL[dtype], rtol=op.ERROR_RTOL[dtype]
)
except (ValueError, RuntimeError):
pass
SM_AND_SHMEM_KBYTES = [
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications-technical-specifications-per-compute-capability
(50, 64),
(60, 64),
(70, 96),
(75, 64),
(80, 163),
(86, 99),
(89, 99),
# (90, 227),
]
@cuda_only
@pytest.mark.parametrize("dtype_str", ["f32", "f16", "bf16"])
@pytest.mark.parametrize(
"sm_shmem",
SM_AND_SHMEM_KBYTES,
ids=[f"cc{sm}_shmem{shmem}kb" for sm, shmem in SM_AND_SHMEM_KBYTES],
)
def test_has_kernel_for(sm_shmem: Tuple[int, int], dtype_str: str) -> None:
dtype = {"f32": torch.float, "f16": torch.half, "bf16": torch.bfloat16}[dtype_str]
sm, shmem_kbytes = sm_shmem
if sm < 80 and dtype_str == "bf16":
return
for k in [16, 32, 64, 128, 256]:
assert torch.ops.xformers._has_cutlassF_kernel_for(
dtype, sm, shmem_kbytes * 1024, k
), f"k={k}"
assert torch.ops.xformers._has_cutlassB_kernel_for(
dtype, sm, shmem_kbytes * 1024, k
), f"k={k}"
| EXA-1-master | exa/libraries/xformers/tests/test_mem_eff_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
# Automatically fetch all registered attentions and Feedforwards
from xformers.components import Activation
from xformers.components.attention import ATTENTION_REGISTRY, AttentionMask
from xformers.components.feedforward import FEEDFORWARD_REGISTRY
from xformers.factory import (
xFormerDecoderBlock,
xFormerDecoderConfig,
xFormerEncoderBlock,
xFormerEncoderConfig,
)
from xformers.helpers.test_utils import init_torch_distributed_local
BATCH = 2
SEQ = 64
MODEL = 64
DROPOUT = 0.5
GLOBAL_ATTENTION_RATIO = 0.1 # 10% of the tokens have a global view
DEVICES = [torch.device("cuda")]
VOCAB_SIZE = 64
@pytest.mark.parametrize("attn_dropout", [0.1])
@pytest.mark.parametrize("residual_dropout", [0.1])
@pytest.mark.parametrize("heads", [1, 2])
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("feedforward_name", FEEDFORWARD_REGISTRY.keys())
@pytest.mark.parametrize("residual_norm_style", ["pre", "post", "deepnorm"])
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("reversible", [True, False])
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_xformer_encoder_block(
attention_name: str,
feedforward_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
activation: Activation,
residual_norm_style: str,
device: torch.device,
reversible: bool,
):
block_size = 16
attention_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": False,
"window_size": SEQ // 8 + 1,
"seq_len": SEQ,
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"dim_model": MODEL,
"num_heads": heads,
"dim_head": MODEL // heads,
"layout": torch.eye(SEQ // block_size, SEQ // block_size, dtype=torch.long),
"block_size": block_size,
"num_rules": 2, # Compositional Attention
}
multi_head_config = {
"num_heads": heads,
"dim_model": MODEL,
"residual_dropout": residual_dropout,
"attention": attention_config,
}
feedforward_config = {
"name": feedforward_name,
"dim_model": MODEL,
"dropout": DROPOUT,
"activation": activation,
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"gate": "top_2",
}
if feedforward_name == "MixtureOfExperts":
init_torch_distributed_local()
position_encoding_config = {
"name": "sine",
"dim_model": MODEL,
"seq_len": SEQ,
"vocab_size": VOCAB_SIZE,
}
block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style=residual_norm_style,
reversible=reversible,
)
# Test that the whole block can be instantiated
block = xFormerEncoderBlock.from_config(block_config).to(device)
# Check that the dimensions make sense, to a FW pass
inputs = torch.rand(BATCH, SEQ, device=device)
_ = block(inputs)
# Check that we support attention masking, at least interface wise (do not check correctness yet)
att_mask_tensor = torch.ones(SEQ, SEQ, dtype=torch.bool, device=device)
att_mask = AttentionMask.from_bool(att_mask_tensor)
if block.supports_attention_mask:
_ = block(inputs, att_mask=att_mask)
_ = block(inputs, att_mask=att_mask_tensor)
else:
with pytest.raises(AssertionError):
# Check that passing an attention mask to a mechanism which does not support it raises
# an exception
_ = block(inputs, att_mask=att_mask)
# Check that we support input masking, at least interface wise (do not check correctness yet)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
_ = block(inputs, input_mask=input_mask)
@pytest.mark.parametrize("attn_dropout", [0.1])
@pytest.mark.parametrize("residual_dropout", [0.1])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("heads", [1, 2])
@pytest.mark.parametrize("activation", [a.value for a in Activation])
@pytest.mark.parametrize("rotary_embeddings", [False, True])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("feedforward_name", FEEDFORWARD_REGISTRY.keys())
@pytest.mark.parametrize("residual_norm_style", ["pre", "post", "deepnorm"])
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_xformer_decoder_block(
attention_name: str,
rotary_embeddings: bool,
feedforward_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
causal: bool,
activation: Activation,
residual_norm_style: str,
device: torch.device,
):
block_size = 16
attention_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": causal,
"window_size": SEQ // 8 + 1,
"seq_len": SEQ,
"dim_head": MODEL // heads,
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"layout": torch.eye(SEQ // block_size, SEQ // block_size, dtype=torch.long),
"block_size": block_size,
"num_rules": 2, # Compositional Attention
}
multi_head_config = {
"num_heads": heads,
"dim_model": MODEL,
"residual_dropout": residual_dropout,
"attention": attention_config,
"use_rotary_embeddings": rotary_embeddings,
}
feedforward_config = {
"name": feedforward_name,
"dim_model": MODEL,
"dropout": DROPOUT,
"activation": activation,
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"gate": "top_2",
}
if feedforward_name == "MixtureOfExperts":
init_torch_distributed_local()
position_encoding_config = {
"name": "sine",
"dim_model": MODEL,
"seq_len": SEQ,
"vocab_size": VOCAB_SIZE,
}
encoder_block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style=residual_norm_style,
)
decoder_block_config = xFormerDecoderConfig(
dim_model=MODEL,
multi_head_config_masked=multi_head_config,
multi_head_config_cross=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style=residual_norm_style,
)
# Test that the whole block can be instantiated
encoder_block = xFormerEncoderBlock.from_config(encoder_block_config).to(device)
decoder_block = xFormerDecoderBlock.from_config(decoder_block_config).to(device)
# Check that the dimensions make sense, to a FW pass
inputs = torch.rand(BATCH, SEQ, device=device)
encoded = encoder_block(inputs)
_ = decoder_block(
inputs, encoded
) # NOTE: does not make a lot of sense, just checking dimensions
# Check that we support masking, at least interface wise (do not check correctness yet)
att_mask_tensor = torch.ones(SEQ, SEQ, dtype=torch.bool, device=device)
att_mask = AttentionMask.from_bool(att_mask_tensor)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
encoded = encoder_block(inputs)
if decoder_block.supports_attention_mask:
_ = decoder_block(
inputs, encoded, encoder_att_mask=att_mask, input_mask=input_mask
)
_ = decoder_block(
inputs, encoded, encoder_att_mask=att_mask_tensor, input_mask=input_mask
)
# Test different sequence lengths when encoding and decoding
if (
not decoder_block.requires_same_k_q_dimensions
and not decoder_block.requires_squared_context_length
):
if not causal or not decoder_block.causal_attention:
_ = decoder_block(inputs[:, :-16], encoded)
else:
# Check that we assert properly
with pytest.raises(AssertionError):
_ = decoder_block(inputs[:, :-16], encoded)
else:
# Check that we assert properly
with pytest.raises(AssertionError):
_ = decoder_block(inputs[:, :-16], encoded)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_embedding_projection():
block_size = 16
attention_config = {
"name": "scaled_dot_product",
"dropout": 0.1,
"causal": False,
"window_size": SEQ // 8 + 1,
"seq_len": SEQ,
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"dim_model": MODEL,
"num_heads": 2,
"dim_head": MODEL // 2,
"layout": torch.eye(SEQ // block_size, SEQ // block_size, dtype=torch.long),
"block_size": block_size,
"num_rules": 2, # Compositional Attention
}
multi_head_config = {
"num_heads": 2,
"dim_model": MODEL,
"residual_dropout": 0.1,
"attention": attention_config,
}
feedforward_config = {
"name": "MLP",
"dim_model": MODEL,
"dropout": DROPOUT,
"activation": "relu",
"hidden_layer_multiplier": 4,
"number_of_experts": 4,
"gate": "top_2",
}
position_encoding_config = {
"name": "sine",
"dim_model": 2 * MODEL,
"seq_len": SEQ,
"vocab_size": VOCAB_SIZE,
}
block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style="pre",
reversible=False,
)
device = torch.device("cuda")
# Test that the whole block can be instantiated
block = xFormerEncoderBlock.from_config(block_config).to(device)
# Check that the dimensions make sense, to a FW pass
inputs = torch.rand(BATCH, SEQ, device=device)
_ = block(inputs)
# Check that we support attention masking, at least interface wise (do not check correctness yet)
if block.supports_attention_mask:
att_mask = torch.ones(SEQ, SEQ, dtype=torch.bool, device=device)
_ = block(inputs, att_mask=att_mask)
# Check that we support input masking, at least interface wise (do not check correctness yet)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
_ = block(inputs, input_mask=input_mask)
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a CUDA device"
)
def test_simplicial_embedding(
device: torch.device,
):
attention_config = {
"name": "scaled_dot_product",
"dropout": 0.1,
"causal": False,
"window_size": SEQ // 8 + 1,
"seq_len": SEQ,
"dim_model": MODEL,
"num_heads": 4,
}
multi_head_config = {
"num_heads": 4,
"dim_model": MODEL,
"residual_dropout": 0.1,
"attention": attention_config,
}
feedforward_config = {
"name": "MLP",
"dim_model": MODEL,
"dropout": DROPOUT,
"activation": "relu",
"hidden_layer_multiplier": 4,
}
position_encoding_config = {
"name": "sine",
"dim_model": MODEL,
"seq_len": SEQ,
"vocab_size": VOCAB_SIZE,
}
block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style="pre",
reversible=False,
simplicial_embeddings={"L": 4},
)
# Test that the whole block can be instantiated
block = xFormerEncoderBlock.from_config(block_config).to(device)
# Check that the dimensions make sense, to a FW pass
inputs = torch.rand(BATCH, SEQ, device=device)
_ = block(inputs)
# Check that we support attention masking, at least interface wise (do not check correctness yet)
att_mask = torch.ones(SEQ, SEQ, dtype=torch.bool, device=device)
_ = block(inputs, att_mask=att_mask)
# Check that we support input masking, at least interface wise (do not check correctness yet)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
_ = block(inputs, input_mask=input_mask)
# Check that a faulty L is caught
block_config = xFormerEncoderConfig(
dim_model=MODEL,
multi_head_config=multi_head_config,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style="pre",
reversible=False,
simplicial_embeddings={"L": 3},
)
# Test that the whole block can be instantiated
with pytest.raises(AssertionError):
block = xFormerEncoderBlock.from_config(block_config).to(device)
_ = block(inputs)
| EXA-1-master | exa/libraries/xformers/tests/test_block_factory.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
def assert_allclose(
out: torch.Tensor,
ref: torch.Tensor,
msg: str = "failed",
atol: float = 1e-8,
rtol: float = 1e-5,
) -> None:
assert out.shape == ref.shape, f"Shape: {out.shape} (expected: {ref.shape})"
flatten_diff = ((out - ref).abs() - atol - ref.abs() * rtol).flatten()
max_pos = flatten_diff.argmax()
max_diff = flatten_diff[max_pos]
num_different = torch.count_nonzero(flatten_diff > 0)
percentage = num_different / flatten_diff.numel()
del flatten_diff
assert torch.allclose(out, ref, rtol=rtol, atol=atol), (
f"{msg}: "
f"out={out.flatten()[max_pos]} and ref={ref.flatten()[max_pos]} (diff={max_diff} > 0)"
f"/ atol={atol}, rtol={rtol}"
f"/ total failing elements: {num_different}, percentage={percentage}"
)
| EXA-1-master | exa/libraries/xformers/tests/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.attention import maybe_sparsify
from xformers.components.attention._sputnik_sparse import _dense_to_sparse
from xformers.components.attention.core import SparseCS, _create_random_sparsity
B = 2
M = 16 # not a nice round number, on purpose
_devices_list = ["cpu", "cuda:0"] if torch.cuda.is_available() else ["cpu"]
_devices = [torch.device(d) for d in _devices_list]
@pytest.mark.parametrize("device", _devices)
def test_logical_and(device):
mask = _create_random_sparsity(torch.ones(B, M, M, dtype=torch.bool), 0.1)
mask_cs = SparseCS(mask, device)
# Check that we cannot & two sparse matrices (for now)
with pytest.raises(Exception):
_ = mask_cs & mask_cs
# Check that & ones returns the same values
mask_ones = mask_cs & torch.ones_like(mask, dtype=torch.bool, device=device)
assert torch.allclose(mask_cs.to_dense().long(), mask_ones.to_dense().long())
# Check that & the inverse returns 0 all around
mask_not = ~mask.to(device)
assert (mask_cs & mask_not).values.numel() == 0
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("seq", [12, 32, 128])
def test_dense_sparse(seq, device):
# Check that we can .to_dense() without crashing
mask = torch.rand(seq, seq, device=device) > 0.1
mask_cs = SparseCS(mask, device)
mask_back_forth = SparseCS(mask_cs.to_dense(), device)
assert torch.allclose(mask_cs.to_dense().long(), mask_back_forth.to_dense().long())
@pytest.mark.parametrize("device", _devices)
def test_device(device):
mask = _create_random_sparsity(
torch.ones(B, M, M, dtype=torch.bool, device=device), 0.1
)
assert mask.device.type == device.type
sparse_mask = maybe_sparsify(mask)
assert sparse_mask.device.type == device.type
def _baseline_dense_to_sparse(matrix):
import numpy as np
# Extract the nonzero values.
values = matrix.compress((matrix != 0).flatten())
# Calculate the offset of each row.
mask = (matrix != 0).astype(np.int32)
row_offsets = np.concatenate(([0], np.cumsum(np.add.reduce(mask, axis=1))), axis=0)
# Create the row indices and sort them.
# note: use torch.argsort to make it compatible as sorting is not stable in PyTorch
row_indices = torch.argsort(-1 * torch.as_tensor(np.diff(row_offsets))).numpy()
# Extract the column indices for the nonzero values.
x = mask * (np.arange(matrix.shape[1]) + 1)
column_indices = x.compress((x != 0).flatten())
column_indices = column_indices - 1
# Cast the desired precision.
values = torch.as_tensor(values.astype(np.float32))
row_indices, row_offsets, column_indices = [
torch.as_tensor(x.astype(np.int32))
for x in [row_indices, row_offsets, column_indices]
]
return values, row_indices, row_offsets, column_indices
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("seq", [12, 32, 128])
def test_dense_to_sparse(seq, device):
matrix = torch.rand(seq, seq, device=device)
matrix[matrix > 0.9] = 0
baseline_res = _baseline_dense_to_sparse(matrix.cpu().numpy())
res = _dense_to_sparse(matrix, device=device)
_idx_to_name = ["values", "row_indices", "row_offsets", "column_indices"]
for idx, (bi, i) in enumerate(zip(baseline_res, res)):
if idx != 1:
# row_indices is the result of an argsort, which is not stable
# for same number of elements
assert torch.allclose(bi.to(device), i), f"error in {_idx_to_name[idx]}"
assert bi.dtype == i.dtype
assert i.device == device
| EXA-1-master | exa/libraries/xformers/tests/test_sparsecs.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
# needed to register custom ops
import xformers # noqa: F401
from xformers.ops import masked_matmul
from xformers.sparse import BlockSparseTensor, SparseCSRTensor
cuda_only = pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
_devices = ["cpu", "cuda:0"] if torch.cuda.is_available() else ["cpu"]
_tensor_types = [BlockSparseTensor, SparseCSRTensor]
def _create_blocksparse_tensor(
device, block_size=32, Z=8, C=2, H=64, W=64, dtype=torch.float32
):
layout = torch.randint(2, (C, H // block_size, W // block_size), device=device)
layout[:, :, 0] = 1
layout[:, 0, :] = 1
values = torch.randn(Z, layout.sum(), block_size, block_size, device=device).to(
dtype
)
return BlockSparseTensor(values, layout)
def _create_csr_tensor(device, dtype, shape, sparsity, divisible_by=4):
matrix = torch.rand(shape, dtype=torch.float32, device=device).to(dtype)
assert matrix.ndim == 3
keep = torch.rand_like(matrix[0], dtype=torch.float32) > sparsity
nonzero = torch.nonzero(keep)
nnz = nonzero.shape[0]
# NOTE: need to make it a multiple of 4 for sputnik
nonzero = nonzero[: (nnz - nnz % divisible_by)]
i, j = nonzero.unbind(1)
output = torch.zeros_like(matrix)
bdim = torch.arange(matrix.shape[0], device=matrix.device)[:, None]
output[bdim, i, j] = matrix[bdim, i, j]
return SparseCSRTensor.from_dense(output)
def _create_tensor(tensor_type, device, dtype, shape, sparsity):
if tensor_type == BlockSparseTensor:
block_size = 16
return _create_blocksparse_tensor(
device=device, dtype=dtype, block_size=block_size
)
elif tensor_type == SparseCSRTensor:
return _create_csr_tensor(
device=device, dtype=dtype, shape=shape, sparsity=sparsity
)
def _seed():
torch.random.manual_seed(42)
torch.cuda.manual_seed_all(42)
def _get_dtype_atol(tensor_type, device: str):
_seed()
if tensor_type == BlockSparseTensor and "cuda" in device:
# Upstream GPU blocksparse (Triton op) uses TF32 by default for all internal computations
# TF32 has the precision of fp16 but the range of fp32
# See https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True # type: ignore
return torch.float32, 1e-1
# Force pytorch to keep its computations as float32 (will default to tf32 with recent cuda and ampere+ GPU)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False # type: ignore
return torch.float32, 1e-5
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("func", [torch.add, torch.mul])
def test_sparse_binary_ops(func, device):
# TODO: add for BlockSparseTensor as well
N, H, W = 8, 64, 64
sparsity = 0.5
shape = (N, H, W)
a_sparse = _create_tensor(
SparseCSRTensor, device, dtype=torch.float32, shape=shape, sparsity=sparsity
)
a = a_sparse.to_dense()
b = a
b_sparse = a_sparse
res = func(a_sparse, b_sparse).to_dense()
res_gt = func(a, b)
assert torch.allclose(res, res_gt)
@pytest.mark.parametrize("tensor_type", _tensor_types)
@pytest.mark.parametrize("device", _devices)
def test_masked_matmul(tensor_type, device):
N, C, H, W, L = 8, 2, 64, 64, 32
sparsity = 0.7
dtype, atol = _get_dtype_atol(tensor_type, device)
shape0 = (N, C, H, W)
shape1 = (N, C, H, L)
shape2 = (N, C, W, L)
if tensor_type != BlockSparseTensor:
shape0 = shape0[1:]
shape1 = shape1[1:]
shape2 = shape2[1:]
mask_sparse = _create_tensor(
tensor_type, device, dtype=torch.bool, shape=shape0, sparsity=sparsity
)
mask = mask_sparse.to_dense()
a = torch.randn(shape1, device=device, dtype=dtype)
b = torch.randn(shape2, device=device, dtype=dtype)
aa = a.clone()
bb = b.clone()
a.requires_grad_(True)
b.requires_grad_(True)
aa.requires_grad_(True)
bb.requires_grad_(True)
bt = b.transpose(-2, -1)
bbt = bb.transpose(-2, -1)
res_gt = masked_matmul(a, bt, mask)
res = masked_matmul(aa, bbt, mask_sparse)
res_dense = res.to_dense()
res_dense = torch.where(mask, res_dense, torch.full_like(res_dense, float("-inf")))
assert res.dtype == res_gt.dtype
assert torch.allclose(res_dense, res_gt, atol=atol)
# try to workaround non-contiguous issues with triton for now
res_gt.backward(torch.ones_like(res_gt))
res.values().backward(torch.ones_like(res.values()))
assert torch.allclose(a.grad, aa.grad, atol=atol)
assert torch.allclose(b.grad, bb.grad, atol=atol)
@pytest.mark.parametrize("tensor_type", _tensor_types)
@pytest.mark.parametrize("device", _devices)
def test_bmm(tensor_type, device):
N, C, H, W, L = 8, 2, 64, 64, 32
dtype, atol = _get_dtype_atol(tensor_type, device)
sparsity = 0.8
shape0 = (N, C, H, W)
shape1 = (N, C, W, L)
if tensor_type != BlockSparseTensor:
shape0 = shape0[1:]
shape1 = shape1[1:]
a_sparse = _create_tensor(
tensor_type, device, dtype=dtype, shape=shape0, sparsity=sparsity
)
a = a_sparse.to_dense()
mask = a != 0
a_sparse.requires_grad_(True)
a.requires_grad_(True)
b = torch.randn(shape1, device=device, dtype=dtype)
b2 = b.clone()
b.requires_grad_(True)
b2.requires_grad_(True)
res_gt = a @ b
res = a_sparse @ b2
assert res.dtype == res_gt.dtype
assert torch.allclose(
res, res_gt, atol=atol
), f"{torch.max(torch.abs(res-res_gt))} - tolerance: {atol}"
res_gt.sum().backward()
res.sum().backward()
a_grad = a.grad.clone().detach()
a_grad[~mask] = 0
assert torch.allclose(b.grad, b2.grad, atol=atol)
assert torch.allclose(
a_grad, a_sparse.grad.to_dense(), atol=atol
), f"{torch.max(torch.abs(a_grad-a_sparse.grad.to_dense()))}"
@pytest.mark.parametrize("tensor_type", _tensor_types)
@pytest.mark.parametrize("device", _devices)
def test_sparse_softmax(tensor_type, device):
N, C, H, W = 8, 2, 64, 64
dtype, atol = _get_dtype_atol(tensor_type, device)
sparsity = 0.8
shape0 = (N, C, H, W)
if tensor_type != BlockSparseTensor:
shape0 = shape0[1:]
a_sparse = _create_tensor(
tensor_type, device, dtype=dtype, shape=shape0, sparsity=sparsity
)
a = a_sparse.to_dense()
mask = a != 0
a[~mask] = float("-inf")
a_sparse.requires_grad_(True)
a.requires_grad_(True)
res_gt = torch.softmax(a, dim=-1)
res_sparse = torch.softmax(a_sparse, dim=-1)
res = res_sparse.to_dense()
assert res.dtype == res_gt.dtype
assert torch.allclose(
res, res_gt, atol=atol
), f"{torch.max(torch.abs(res- res_gt))}"
# WARNING: gradients are modified in-place!
res_sparse.values().backward(torch.ones_like(res_sparse.values()))
res_gt.backward(torch.ones_like(res_gt))
a_grad = a.grad.clone()
a_grad[~mask] = 0
assert torch.allclose(
a_grad, a_sparse.grad.to_dense(), atol=atol
), f"{torch.max(torch.abs(a_grad- a_sparse.grad.to_dense()))}"
@pytest.mark.parametrize("tensor_type", _tensor_types)
@pytest.mark.parametrize("device", _devices)
def test_deepcopy(tensor_type, device):
import copy
N, C, H, W = 8, 2, 64, 64
dtype = torch.float32
sparsity = 0.8
shape0 = (N, C, H, W)
if tensor_type != BlockSparseTensor:
shape0 = shape0[1:]
a_sparse = _create_tensor(
tensor_type, device, dtype=dtype, shape=shape0, sparsity=sparsity
)
b_sparse = copy.deepcopy(a_sparse)
assert torch.equal(a_sparse, b_sparse)
@pytest.mark.parametrize("tensor_type", _tensor_types)
@pytest.mark.parametrize("device", _devices)
def test_module_buffer(tensor_type, device):
N, C, H, W = 8, 2, 64, 64
dtype = torch.float32
sparsity = 0.8
shape0 = (N, C, H, W)
if tensor_type != BlockSparseTensor:
shape0 = shape0[1:]
a_sparse = _create_tensor(
tensor_type, device, dtype=dtype, shape=shape0, sparsity=sparsity
)
b_sparse = _create_tensor(
tensor_type, device, dtype=dtype, shape=shape0, sparsity=sparsity
)
module = torch.nn.Module()
# test that register_buffer works
module.register_buffer("a_sparse", a_sparse)
assert module.a_sparse is a_sparse
module.to(device)
assert module.a_sparse.device == torch.device(device)
state_dict = module.state_dict()
assert "a_sparse" in state_dict
assert torch.equal(a_sparse.to(device), state_dict["a_sparse"])
module.load_state_dict(state_dict)
module.load_state_dict({"a_sparse": b_sparse})
assert torch.equal(module.a_sparse, b_sparse.to(device))
| EXA-1-master | exa/libraries/xformers/tests/test_sparse_tensors.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Initially suggested by Jason Ramapuram, see
# https://github.com/facebookresearch/xformers/issues/203
import pickle
from copy import deepcopy
import pytest
from torch import nn
from xformers import _is_triton_available
from xformers.factory import xFormer, xFormerConfig
test_config = [
{
"reversible": False,
"block_type": "encoder",
"num_layers": 2,
"dim_model": 768,
"residual_norm_style": "pre",
"multi_head_config": {
"num_heads": 12,
"residual_dropout": 0.1,
"use_rotary_embeddings": True,
"attention": {
"name": "scaled_dot_product",
"dropout": 0.1,
"causal": False,
},
},
"feedforward_config": {
"name": "FusedMLP",
"dropout": 0.1,
"activation": "gelu",
"hidden_layer_multiplier": 4,
},
}
]
class ViT(nn.Module):
def __init__(self, mlp):
super().__init__()
test_config[0]["feedforward_config"]["name"] = mlp
xformer_config = xFormerConfig(test_config)
self.xformer = xFormer.from_config(xformer_config)
MLPs = ["MLP"]
if _is_triton_available():
MLPs.append("FusedMLP")
@pytest.mark.parametrize("mlp", MLPs)
def test_pickling(mlp):
test = ViT(mlp)
_ = pickle.dumps(test)
_ = deepcopy(test)
| EXA-1-master | exa/libraries/xformers/tests/test_pickling.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from xformers.components.positional_embedding import RotaryEmbedding
from xformers.components.positional_embedding.rotary import (
apply_rotary_pos_emb,
rotate_half,
)
DEVICES = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else [
torch.device("cuda")
] # save a bit on CI for now, we have seperate cpu and gpu jobs
)
BATCH = 2
SEQ = 32
HEADS = 2
EMB = 32
def test_helper_methods():
# rotate_half
tens = torch.tensor([[0, 1, 2, 3], [3, 1, 2, 0], [0, 1, 0, 1], [1, 0, 1, 0]])
tens_rotated = rotate_half(tens)
assert torch.equal(
tens_rotated,
torch.tensor([[-2, -3, 0, 1], [-2, 0, 3, 1], [0, -1, 0, 1], [-1, 0, 1, 0]]),
)
# apply_rotary_pos_emb
cos_test = torch.ones((1, 1, 4, 4))
sin_test = cos_test.clone()
q_test = 3 * torch.ones((2, 2, 3, 4))
q_applied = apply_rotary_pos_emb(q_test, cos_test, sin_test)
assert torch.equal(
q_applied,
torch.concat(
(
torch.zeros((2, 2, 3, 2), dtype=torch.float),
6 * torch.ones((2, 2, 3, 2), dtype=torch.float),
),
dim=-1,
),
)
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_rotary_embeddings(device, dtype):
rotary = RotaryEmbedding(EMB).to(device)
# Generate dummy inputs
q = torch.ones(
(BATCH, HEADS, SEQ, EMB), device=device, dtype=dtype
) # uniform on purpose
k = q.clone()
q_rot, k_rot = rotary(q, k)
assert q_rot.dtype == q.dtype
assert k_rot.dtype == k.dtype
# Check that the sequences now encode relative position information
q, k = q.float(), k.float()
q_rot, k_rot = q_rot.float(), k_rot.float()
att = torch.einsum("bhne,bhme->bhnm", q, k)
att_rot = torch.einsum("bhne,bhme->bhnm", q_rot, k_rot)
# - the attention for the same positions is not meaningfully changed
assert torch.allclose(
torch.diag(att[0, 0, :, :]), torch.diag(att_rot[0, 0, :, :]), rtol=0.1
)
# - the post-rotary attention is more focused on the diagonal
diag_max = torch.max(torch.diag(att_rot[0, 0, :, :]))
att_rot -= diag_max
att_rot = (
att_rot <= 1e-4
) # all non diagonal elements had lower attention than diagonal (+ float tolerance)
assert torch.all(att_rot)
# Test that different sequence lengths is ok
_, _ = rotary(q[:, :, :-16, :], k)
| EXA-1-master | exa/libraries/xformers/tests/test_rotary_embeddings.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Tuple
import pytest
import torch
from xformers.components import (
InputProjection,
InputProjectionConfig,
MultiHeadDispatch,
)
# Automatically test all the registered attentions
from xformers.components.attention import (
_DENSITY_THRESHOLD,
ATTENTION_REGISTRY,
build_attention,
)
DEVICES = (
[torch.device("cpu")] if not torch.cuda.is_available() else [torch.device("cuda")]
)
BATCH = 2
SEQ = 128 if torch.cuda.is_available() else 36
MODEL = 128 if torch.cuda.is_available() else 16
GLOBAL_ATTENTION_RATIO = (
_DENSITY_THRESHOLD * 0.9
) # Make sure that we test the sparse implementation, no matter the threshold
assert ATTENTION_REGISTRY.keys(), "Attention layers should have been registered"
_non_order_invariant_attentions = ["visual", "pooling"]
def _get_multihead(
attention_name,
attn_dropout,
res_dropout,
causal,
heads,
device,
skip_output_projection=False,
use_seperate_proj_weights=True,
):
test_config = {
"name": attention_name,
"dropout": attn_dropout,
"causal": causal,
"seq_len": SEQ,
"window_size": SEQ // 8 + 1, # local attention
"attention_query_mask": torch.rand((SEQ, 1)) < GLOBAL_ATTENTION_RATIO,
"dim_model": MODEL,
"num_heads": heads,
"dim_head": MODEL / heads,
"num_rules": 2, # Compositional Attention
"r": 0.5, # random attention, ratio of tokens that the attention can attend to
}
if skip_output_projection:
def noop(x):
return x
test_config["out_proj"] = noop
# Add some blocksparse layout to test the corresponding attention
block_size = 16
test_config["layout"] = torch.eye(
SEQ // block_size, SEQ // block_size, dtype=torch.long
)
test_config["block_size"] = block_size
attention = build_attention(test_config)
# build a multi head dispatch to test this attention mechanism
multi_head = MultiHeadDispatch(
seq_len=SEQ,
dim_model=MODEL,
residual_dropout=res_dropout,
num_heads=heads,
attention=attention,
use_separate_proj_weight=use_seperate_proj_weights,
).to(device)
return multi_head
@pytest.mark.parametrize("attn_dropout", [0.0, 0.3])
@pytest.mark.parametrize("residual_dropout", [0.0, 0.1])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize(
"attention_name", ATTENTION_REGISTRY.keys() - _non_order_invariant_attentions
)
@pytest.mark.parametrize("device", DEVICES)
def test_order_invariance(
attention_name: str,
heads: int,
attn_dropout: float,
residual_dropout: float,
causal: bool,
device: torch.device,
):
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
multi_head = _get_multihead(
attention_name,
attn_dropout,
residual_dropout,
causal,
heads,
device,
use_seperate_proj_weights=False,
)
if (
int(math.sqrt(SEQ)) ** 2 != SEQ
and multi_head.attention.requires_squared_context
):
pytest.skip(f"{attention_name} requires squared sequence lengths")
# Check that we can pass a smaller sequence
seqs = (
[SEQ, SEQ // 2]
if not multi_head.attention.requires_same_k_q_dimensions
else [SEQ]
)
for seq in seqs:
# Check that the attention is invariant to a permutation of K, V
inputs = torch.rand(BATCH, seq, MODEL, device=device)
shuffle = torch.randperm(inputs.shape[1])
inputs_shuffled = inputs[:, shuffle, :].clone()
results = multi_head(inputs, inputs, inputs)
results_shuffled = multi_head(inputs, inputs_shuffled, inputs_shuffled)
torch.allclose(results, results_shuffled)
# Check that the attention is equivariant to a permutation of Q,
# meaning that the result is permuted in the same way
results_shuffled = multi_head(inputs_shuffled, inputs, inputs)
torch.allclose(results[:, shuffle, :], results_shuffled)
# Check that dropout actually drops some values
if attn_dropout > 0:
att_1 = multi_head(inputs, inputs_shuffled, inputs)
att_2 = multi_head(inputs, inputs_shuffled, inputs)
assert (att_1 != att_2).any()
# Test AMP, if available
if device.type == "cuda":
with torch.cuda.amp.autocast(enabled=True):
_ = multi_head(inputs, inputs_shuffled, inputs)
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ["scaled_dot_product"])
@pytest.mark.parametrize("device", DEVICES)
def test_kqv_ordering(
attention_name: str,
heads: int,
device: torch.device,
):
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
# Check kqv are not flipped
# this will not catch all issues, but would catch a V being misplaced
# make k and q complimentary, so that QKt is all zero and attention is uniform
q = torch.cat(
(
torch.rand((1, MODEL // 2), device=device),
torch.zeros((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
k = torch.cat(
(
torch.zeros((1, MODEL // 2), device=device),
torch.rand((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
v = torch.rand(BATCH, SEQ, MODEL, device=device)
# Normal call
res = multi_head(query=q, key=k, value=v)
for i in range(BATCH):
assert torch.allclose(res[i, :, :], res[i, 0, :].unsqueeze(-2))
assert not torch.allclose(res[0, :, :], res[1, :, :])
# Flip qkv, and check that we invert the above check properly
res_false = multi_head(query=v, key=k, value=q)
assert torch.allclose(res_false[0, :, :], res_false[1, :, :])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ["scaled_dot_product"])
@pytest.mark.parametrize("device", DEVICES)
def test_different_seqlen(
attention_name: str,
heads: int,
device: torch.device,
):
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
# Check kqv are not flipped
# this will not catch all issues, but would catch a V being misplaced
# make k and q complimentary, so that QKt is all zero and attention is uniform
q = torch.cat(
(
torch.rand((1, MODEL // 2), device=device),
torch.zeros((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
k = torch.cat(
(
torch.zeros((1, MODEL // 2), device=device),
torch.rand((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
v = torch.rand(BATCH, SEQ, MODEL, device=device)
# Normal call
res = multi_head(query=q, key=k, value=v)
# Changing sequence length by dividing by two to simulate differing sequence length
q2 = torch.cat(
(
torch.rand((1, MODEL // 2), device=device),
torch.zeros((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ // 2, MODEL))
k2 = torch.cat(
(
torch.zeros((1, MODEL // 2), device=device),
torch.rand((1, MODEL // 2), device=device),
),
dim=1,
).expand((BATCH, SEQ // 2, MODEL))
v2 = torch.rand(BATCH, SEQ // 2, MODEL, device=device)
res2 = multi_head(query=q2, key=k2, value=v2)
assert res.shape != res2.shape
@pytest.mark.parametrize("proj_bias", [False, True])
@pytest.mark.parametrize("same_sizes", [False, True])
@pytest.mark.parametrize("same_settings", [False, True])
def test_inproj(proj_bias: bool, same_sizes: bool, same_settings: bool):
test_config = {
"name": "scaled_dot_product",
"dropout": 0.1,
"causal": False,
"seq_len": SEQ,
"window_size": SEQ // 8 + 1,
"num_heads": 1,
"dim_head": MODEL,
}
attention = build_attention(test_config)
# Construct the initial projection, test different options
in_params = InputProjectionConfig(MODEL, MODEL, proj_bias)
if same_settings:
in_proj = InputProjection(in_params, None, None)
out_features = MODEL
else:
out_features = MODEL if same_sizes else MODEL // 2
in_params_flip = InputProjectionConfig(MODEL, out_features, proj_bias)
in_proj = InputProjection(
in_params_flip, # Q proj
in_params_flip, # K proj
in_params, # V proj
)
# build a multi head dispatch to test this attention mechanism
multi_head = MultiHeadDispatch(
seq_len=SEQ,
dim_model=MODEL,
residual_dropout=0.1,
num_heads=1,
attention=attention,
in_proj_container=in_proj,
dim_key=out_features,
dim_value=MODEL,
)
# Check kqv are not flipped
# this will not catch all issues, but would catch a V being misplaced
# make k and q complimentary, so that QKt is all zero and attention is uniform
q = torch.cat(
(
torch.rand((1, MODEL // 2)),
torch.zeros((1, MODEL // 2)),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
k = torch.cat(
(
torch.zeros((1, MODEL // 2)),
torch.rand((1, MODEL // 2)),
),
dim=1,
).expand((BATCH, SEQ, MODEL))
v = torch.rand(BATCH, SEQ, MODEL)
# just check that a FW does not assert out
_ = multi_head(query=q, key=k, value=v)
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("device", DEVICES)
def test_different_kq_dimensions(
attention_name: str,
heads: int,
device: torch.device,
):
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
if multi_head.attention.requires_same_k_q_dimensions:
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip(f"{attention_name} does not support different k, q dimensions yet.")
seq_q = SEQ // 2
q = torch.rand((BATCH, seq_q, MODEL), device=device)
k = torch.rand((BATCH, SEQ, MODEL), device=device)
v = torch.rand((BATCH, SEQ, MODEL), device=device)
res = multi_head(query=q, key=k, value=v)
assert res.shape == torch.Size([BATCH, seq_q, MODEL])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize(
"batch_sizes",
[
(1, BATCH, BATCH),
(BATCH, 1, BATCH),
(BATCH, BATCH, 1),
(1, 1, BATCH),
(BATCH, 1, 1),
(1, BATCH, 1),
],
)
def test_broadcast_batch_dimension(
attention_name: str,
heads: int,
device: torch.device,
batch_sizes: Tuple[int, int, int],
):
Q_BATCH, K_BATCH, V_BATCH = batch_sizes
multi_head = _get_multihead(attention_name, 0.0, 0.0, False, heads, device)
if (
int(math.sqrt(SEQ)) ** 2 != SEQ
and multi_head.attention.requires_squared_context
):
pytest.skip(f"{attention_name} requires squared sequence lengths")
if multi_head.attention.requires_same_k_q_dimensions:
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip(f"{attention_name} does not support different k, q dimensions yet.")
q = torch.rand((Q_BATCH, SEQ, MODEL), device=device)
k = torch.rand((K_BATCH, SEQ, MODEL), device=device)
v = torch.rand((V_BATCH, SEQ, MODEL), device=device)
res = multi_head(query=q, key=k, value=v)
assert res.shape == torch.Size([BATCH, SEQ, MODEL])
@pytest.mark.parametrize("heads", [1, 4])
@pytest.mark.parametrize("attention_name", ["scaled_dot_product", "favor"])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires a CUDA gpu")
def test_causal(
attention_name: str,
heads: int,
):
"""
Make sure that the causal flag is respected.
The input data is orthogonal by design if causal is respected, but if the attention looks ahead this will fail
"""
torch.random.manual_seed(42)
device = torch.device("cuda")
multi_head = _get_multihead(
attention_name,
0.0,
0.0,
causal=True,
heads=heads,
device=device,
skip_output_projection=True,
)
k = (
torch.tril(torch.ones((SEQ, SEQ), device=device), diagonal=0)
.unsqueeze(0)
.expand(1, -1, -1)
)
q = (
torch.triu(torch.ones((SEQ, SEQ), device=device), diagonal=0)
.unsqueeze(0)
.expand(1, -1, -1)
)
v = (
torch.arange(SEQ, device=device)
.float()
.unsqueeze(0)
.unsqueeze(-1)
.expand(1, -1, SEQ)
)
# Make sure that we don´t project, to keep the embeddings orthogonal
multi_head.attention.requires_input_projection = False
res = multi_head(query=q, key=k, value=v).squeeze(0)
# Consolidate along the embedding, if causal was respected the amplitude should be sorted already
res_sum = torch.sum(res, dim=1).cpu()
assert torch.allclose(torch.sort(res_sum)[1], torch.arange(SEQ)) or torch.allclose(
torch.sort(res_sum, descending=True)[1], torch.arange(SEQ)
), res_sum
@pytest.mark.parametrize("attn_dropout", [0.0, 0.1])
@pytest.mark.parametrize("heads", [2])
@pytest.mark.parametrize("attention_name", ATTENTION_REGISTRY.keys())
@pytest.mark.skipif(torch.cuda.is_available(), reason="CUDA gpu not supported yet")
def test_torch_script_ability(
attention_name: str,
heads: int,
attn_dropout: float,
):
if attention_name in {"favor", "global", "local", "random"}:
# pyre-fixme[29]: The library function `pytest.skip` is not supported by Pyre.
pytest.skip(f"{attention_name} does not support scripting yet.")
device = torch.device("cpu")
multi_head = _get_multihead(attention_name, attn_dropout, 0.0, False, heads, device)
if (
int(math.sqrt(SEQ)) ** 2 != SEQ
and multi_head.attention.requires_squared_context
):
pytest.skip(f"{attention_name} requires squared sequence lengths")
# input for tracing the function
q = torch.rand((BATCH, SEQ, MODEL), device=device)
k = torch.rand((BATCH, SEQ, MODEL), device=device)
v = torch.rand((BATCH, SEQ, MODEL), device=device)
# to make sure dropout behaves deterministically
torch.random.manual_seed(42)
# tracing the attention module
traced_multi_head = torch.jit.trace(multi_head, (q, k, v))
# create new random inputs for testing the eager model and traced model
q = torch.rand((BATCH, SEQ, MODEL), device=device)
k = torch.rand((BATCH, SEQ, MODEL), device=device)
v = torch.rand((BATCH, SEQ, MODEL), device=device)
# to make sure dropout behaves deterministically need to set the seed again
torch.random.manual_seed(42)
res = multi_head(query=q, key=k, value=v)
# to make sure dropout behaves deterministically need to set the seed again
torch.random.manual_seed(42)
res_traced = traced_multi_head(query=q, key=k, value=v)
assert torch.allclose(res, res_traced)
# TODO: way more unit tests..
| EXA-1-master | exa/libraries/xformers/tests/test_attentions.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention import GlobalAttention, ScaledDotProduct
def test_global_attention():
b, s, d = 2, 90, 40
torch.cuda.manual_seed(42)
torch.manual_seed(42)
def test_ratio(global_attention_ratio: float):
# Make sure that Global and Normal attention get the same results for the corresponding tokens
a = torch.rand(b, s, d)
config = {
"name": "global",
"dropout": 0.0,
"causal": False,
"max_seq_len": s,
"attention_query_mask": torch.rand((s, 1)) < global_attention_ratio,
}
global_attention = GlobalAttention(**config)
sdp_attention = ScaledDotProduct(**config)
r_global = global_attention(a, a, a)
r_dense = sdp_attention(a, a, a)
# Check that the tokens which have access to the full attention give the same
# results as the monolithic dense scaled_dot_product
mask = config["attention_query_mask"][:, 0]
assert torch.allclose(r_global[:, mask, :], r_dense[:, mask, :])
# Test with different levels of sparsity, to make sure that all the paths are covered
test_ratio(0.02)
test_ratio(0.5)
test_ratio(1.0) # All queries allowed
| EXA-1-master | exa/libraries/xformers/tests/test_global_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore
from xformers.factory.hydra_helper import import_xformer_config_schema
def test_import_schema():
import_xformer_config_schema()
cs = ConfigStore.instance()
groups = cs.list("xformers")
# check all groups registered
assert groups == ["attention", "ff", "pe"]
# check the attention is registered
attentions = cs.list("xformers/attention")
assert "favor_schema.yaml" in attentions
| EXA-1-master | exa/libraries/xformers/tests/test_hydra_helper.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
import xformers.ops
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("dim", [0, 1, 2, 3, 4])
def test_unbind(dim: int, contiguous: bool):
x = torch.randn([10, 20, 4, 10, 3])
x2 = x.clone()
if not contiguous:
perm = list(range(x.ndim))
random.Random(dim).shuffle(perm)
# Let's hope we didn't pick identity
x = x.permute(perm)
x2 = x2.permute(perm)
assert contiguous == x.is_contiguous()
x.requires_grad_(True)
x2.requires_grad_(True)
# FW
tensors = xformers.ops.unbind(x, dim)
tensors2 = torch.unbind(x2, dim)
assert len(tensors) == len(tensors2)
for t1, t2 in zip(tensors, tensors2):
assert torch.allclose(t1, t2)
# BW
grads = torch.unbind(torch.randn(x.shape), dim)
zero = torch.zeros_like(tensors[0])
loss1 = sum(((g * t) for (g, t) in zip(grads, tensors)), zero)
loss2 = sum(((g * t) for (g, t) in zip(grads, tensors2)), zero)
assert torch.allclose(loss1, loss2)
g = torch.randn_like(loss1)
loss1.backward(g)
loss2.backward(g)
assert x.grad is not None
assert x2.grad is not None
assert torch.allclose(x.grad, x2.grad)
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("dim", [0, 1, 2, 3, 4])
def test_unbind_get_stack_strides(dim: int, contiguous: bool):
def not_stacked(t, d):
return xformers.ops.get_stack_strides(t, d) is None
x = torch.randn([10, 20, 4, 4, 3])
ndim = x.ndim
# Non-contiguous tensors
if not contiguous:
x = x.transpose(dim, (dim + 1) % ndim)
assert contiguous == x.is_contiguous()
tensors = xformers.ops.unbind(x, dim)
tensors2 = torch.unbind(x.clone(), dim)
for cat_dim in range(ndim):
permute = list(range(ndim))
permute.pop(dim)
permute.insert(cat_dim, dim)
x_permuted = x.permute(permute)
assert not_stacked([tensors2[0], tensors[1]], cat_dim), "different storage"
assert not_stacked(
[tensors[0], tensors[1].clone()], cat_dim
), "different storage"
def test_slice(s):
slices = [slice(None) for _ in range(ndim)]
slices[cat_dim] = s
reference = x_permuted[tuple(slices)]
stacked = xformers.ops.stack_or_none(tensors[s], cat_dim)
assert stacked is not None
assert (
xformers.ops.get_stack_strides(tensors[s], cat_dim)
== reference.stride()
)
assert torch.allclose(stacked, torch.stack(tensors2[s], cat_dim))
assert stacked.storage().data_ptr() == tensors[0].storage().data_ptr()
# tensors
test_slice(slice(None))
# tensors[1:]
test_slice(slice(1, None))
# tensors[:2]
test_slice(slice(None, 2))
# tensors[::2]
test_slice(slice(None, None, 2))
| EXA-1-master | exa/libraries/xformers/tests/test_unbind.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers.components.attention import OrthoFormerAttention, ScaledDotProduct
from xformers.components.attention.utils import maybe_merge_masks
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires CUDA")
@pytest.mark.parametrize(
"landmark_selection", ["orthogonal", "kmeans", "kmeans_spherical", "random"]
)
@pytest.mark.parametrize("num_landmarks", [30, 33, 905])
@pytest.mark.parametrize("subsample_fraction", [1.0, 0.3])
def test_ortho_attention(
landmark_selection: str, num_landmarks: int, subsample_fraction: float
):
# TODO: conv_kernel_size parameter not set to None fails this test. Investigate.
b, s, d = 8, 900, 32
num_heads = 2
seed = 42
torch.random.manual_seed(seed)
random.seed(seed)
ortho_config = {
"name": "orthoformer",
"dropout": 0.0,
"num_landmarks": num_landmarks,
"num_heads": num_heads,
"landmark_selection": landmark_selection,
"subsample_fraction": subsample_fraction,
}
sdp_config = {
"name": "scaled_dot_product",
"dropout": 0.0,
}
a = torch.rand(b, s, d, device=torch.device("cuda"))
def test_close_to_sdp():
# Make sure that Ortho and Normal attention are not too far off.
ortho_attention = OrthoFormerAttention(**ortho_config).cuda()
sdp_attention = ScaledDotProduct(**sdp_config).cuda()
r_ortho = ortho_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_ortho, r_sdp, rtol=0.02, atol=1e-1)
# Make sure that OrthoFormerAttention and Normal attention are not too far off.
ortho_attention = OrthoFormerAttention(**ortho_config).cuda()
sdp_attention = ScaledDotProduct(**sdp_config).cuda()
r_ortho = ortho_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_ortho, r_sdp, rtol=0.02, atol=1e-1)
def test_att_mask_ignored():
# If an sxs attention mask is passed in, it should be ignored.
# Results should be the same as if no mask was passed in.
ortho_attention = OrthoFormerAttention(**ortho_config).cuda()
sdp_attention = ScaledDotProduct(**sdp_config).cuda()
key_padding_mask = None
att_mask = torch.randint(0, 2, (s, s), device=torch.device("cuda")).to(
dtype=torch.bool
)
sdp_mask = maybe_merge_masks(
att_mask=None,
key_padding_mask=key_padding_mask,
batch_size=b // num_heads,
src_len=s,
num_heads=num_heads,
)
r_ortho = ortho_attention(
a, a, a, att_mask=att_mask, key_padding_mask=key_padding_mask
)
r_sdp = sdp_attention(a, a, a, att_mask=sdp_mask)
assert torch.allclose(r_ortho, r_sdp, rtol=0.02, atol=1e-1)
test_close_to_sdp()
test_att_mask_ignored()
| EXA-1-master | exa/libraries/xformers/tests/test_ortho_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers import _is_triton_available
if _is_triton_available():
from xformers.benchmarks.benchmark_pytorch_transformer import evaluate, train
from xformers.factory.model_factory import xFormer, xFormerConfig
BATCH = 20
SEQ = 32
EMB = 8
VOCAB = 8
HEADS = 4
DROP = 0.1
LAYERS = 2
ACTIVATION = "relu"
_test_config_encoder = {
"block_type": "encoder",
"dim_model": EMB,
"num_layers": LAYERS,
"residual_norm_style": "post",
"multi_head_config": {
"num_heads": HEADS,
"residual_dropout": DROP,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": DROP,
"seq_len": SEQ,
},
"dim_model": EMB,
},
"feedforward_config": {
"name": "MLP",
"dropout": DROP,
"activation": ACTIVATION,
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
_test_config_decoder = {
"block_type": "decoder",
"dim_model": EMB,
"num_layers": LAYERS,
"residual_norm_style": "post",
"multi_head_config_masked": {
"num_heads": HEADS,
"residual_dropout": DROP,
"dim_model": EMB,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": DROP,
"seq_len": SEQ,
},
},
"multi_head_config_cross": {
"num_heads": HEADS,
"residual_dropout": DROP,
"dim_model": EMB,
"bias": True,
"attention": {
"name": "scaled_dot_product",
"dropout": DROP,
"seq_len": SEQ,
},
},
"feedforward_config": {
"name": "MLP",
"dropout": DROP,
"activation": ACTIVATION,
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
_test_config = [_test_config_encoder, _test_config_decoder]
def reset_seeds():
torch.manual_seed(42)
torch.cuda.manual_seed(42)
random.seed(42)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a gpu"
)
def test_pytorch_encoder_parity(device=torch.device("cuda")):
# Build both a xFormers and Pytorch model
reset_seeds()
model_xformers = xFormer.from_config(xFormerConfig([_test_config_encoder])).to(
device
)
print(model_xformers)
model_pytorch = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(
d_model=EMB,
nhead=HEADS,
dim_feedforward=4 * EMB,
dropout=DROP,
activation=ACTIVATION,
batch_first=True, # (batch, seq, feature)
device=device,
),
num_layers=LAYERS,
)
print(model_pytorch)
optim_xformers = torch.optim.SGD(
model_xformers.parameters(), lr=1e-3, momentum=0.9
)
optim_pytorch = torch.optim.SGD(
model_pytorch.parameters(), lr=1e-3, momentum=0.9
)
# Check that both models can be trained to comparable results
eval_start_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_start_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("starting point: ", eval_start_pytorch, eval_start_xformer)
train(model_pytorch, optim_pytorch, "pytorch", 500, BATCH, SEQ, EMB, device)
train(model_xformers, optim_xformers, "xformers", 500, BATCH, SEQ, EMB, device)
# Check that we can classify this dummy example
# Arbitrary threshold
eval_stop_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_stop_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("end point: ", eval_stop_pytorch, eval_stop_xformer)
fit_ratio_xformer = eval_start_xformer / eval_stop_xformer
fit_ratio_pytorch = eval_start_pytorch / eval_stop_pytorch
print("fit ratios: ", fit_ratio_pytorch, fit_ratio_xformer)
# Catch a broken training
assert fit_ratio_xformer > 120
assert fit_ratio_pytorch > 120
# Catch a significant difference in between the two
assert (
abs(eval_start_xformer - eval_start_pytorch) < 1e-6
) # initial eval is about 25, arbitrary limits
assert (
abs(eval_stop_xformer - eval_stop_pytorch) < 1e-1
) # final eval is about 0.2, arbitrary limits
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="This test requires a gpu"
)
def test_pytorch_tranformer_parity(device=torch.device("cuda")):
# Build both a xFormers and Pytorch model
reset_seeds()
model_xformers = xFormer.from_config(xFormerConfig(_test_config)).to(device)
print(model_xformers)
model_pytorch = torch.nn.Transformer(
d_model=EMB,
nhead=HEADS,
num_encoder_layers=LAYERS,
num_decoder_layers=LAYERS,
dim_feedforward=4 * EMB,
dropout=DROP,
activation=ACTIVATION,
batch_first=True, # (batch, seq, feature)
device=device,
)
print(model_pytorch)
optim_xformers = torch.optim.SGD(
model_xformers.parameters(), lr=1e-3, momentum=0.9
)
optim_pytorch = torch.optim.SGD(
model_pytorch.parameters(), lr=1e-3, momentum=0.9
)
# Check that both models can be trained to comparable results
eval_start_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_start_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("starting point: ", eval_start_pytorch, eval_start_xformer)
train(model_xformers, optim_xformers, "xformers", 100, BATCH, SEQ, EMB, device)
train(model_pytorch, optim_pytorch, "pytorch", 100, BATCH, SEQ, EMB, device)
# Check that we can classify this dummy example
# Arbitrary threshold
eval_stop_xformer = evaluate(model_xformers, BATCH, SEQ, EMB, device)
eval_stop_pytorch = evaluate(model_pytorch, BATCH, SEQ, EMB, device)
print("end point: ", eval_stop_pytorch, eval_stop_xformer)
fit_ratio_xformer = eval_start_xformer / eval_stop_xformer
fit_ratio_pytorch = eval_start_pytorch / eval_stop_pytorch
print(fit_ratio_pytorch, fit_ratio_xformer)
assert fit_ratio_xformer > 50
assert fit_ratio_pytorch > 50
| EXA-1-master | exa/libraries/xformers/tests/test_pytorch_transformer_parity.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import xformers
SHAPES = [
(384, 128),
(8 * 384, 128),
(34, 128),
(16, 128),
(16, 512),
(8, 384),
(8, 1024),
(8, 2048),
(8, 4096),
(8, 4096),
(4, 12288),
]
_triton_available = xformers._is_triton_available()
if _triton_available:
try:
import triton
import triton.language as tl
from xformers.triton.sum_strided import sum_2d_dim_0
except (ImportError, ModuleNotFoundError):
_triton_available = False
if _triton_available:
@triton.jit
def k_mean(X, Mean, Var, stride, N, BLOCK_SIZE_N: tl.constexpr):
# fmt: on
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
# Move to this row
x_ptrs = X + row * stride + cols
x = tl.load(x_ptrs, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
# Compute variance
x_mean = tl.sum(x, axis=0) / N
x_zm = x - x_mean
x_zm = tl.where(cols < N, x_zm, 0.0)
x_var = tl.sum(x_zm * x_zm, axis=0) / N
tl.store(Mean + row, x_mean)
tl.store(Var + row, x_var)
def stats(x: torch.Tensor):
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE_N:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps.
num_warps = min(max(BLOCK_SIZE_N // 256, 1), 8)
mean = torch.zeros((M,)).cuda()
var = torch.zeros((M,)).cuda()
# enqueue kernel
# fmt: off
k_mean[(M,)](
x_arg, mean, var,
x_arg.stride(0),
N,
num_warps=num_warps,
BLOCK_SIZE_N=BLOCK_SIZE_N
)
# fmt: on
return mean.reshape(x.shape[:-1]), var.reshape(x.shape[:-1])
def test_mean():
torch.random.manual_seed(0)
a = torch.rand((4, 2048, 384), device=torch.device("cuda"))
mean, var = stats(a)
t_mean = torch.mean(a, dim=-1)
t_var = torch.var(a, dim=-1)
print(mean)
print(t_mean)
print(var)
print(t_var)
assert torch.allclose(mean, t_mean, rtol=1e-1)
assert torch.allclose(var, t_var, rtol=1e-1)
@pytest.mark.parametrize("shape", SHAPES)
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_sum_strided(shape, dtype):
torch.random.manual_seed(0)
a = torch.rand(shape, device=torch.device("cuda"), dtype=dtype)
torch_sum = torch.sum(a, dim=0)
triton_sum = sum_2d_dim_0(a)
assert torch.allclose(
torch_sum, triton_sum, rtol=0.01
), f"{torch_sum}\n{triton_sum}"
def test_sum_strided_asserts():
torch.random.manual_seed(0)
a = torch.rand((128, 256), device=torch.device("cuda"), dtype=torch.float16)
with pytest.raises(AssertionError):
# This kernel is not useful in that case, assert to prevent misuse
sum_2d_dim_0(a.transpose(1, 0))
a = torch.rand((3, 128, 256), device=torch.device("cuda"), dtype=torch.float16)
with pytest.raises(AssertionError):
# This kernel expects 2D tensors, assert to prevent misuse
sum_2d_dim_0(a)
@triton.jit
def k_rand(X, Y, SEED_X, SEED_Y, stride_x, stride_y, N: tl.constexpr):
# fmt: on
"""
Check the random number generation
"""
row = tl.program_id(0)
# Generate random numbers with seed A
rand_offsets = tl.arange(0, N)
seed_x = tl.load(SEED_X + row)
randx, _, _, _ = tl.randint4x(seed_x, rand_offsets)
rand_offsets = tl.arange(0, N)
seed_y = tl.load(SEED_Y + row)
randy, _, _, _ = tl.randint4x(seed_y, rand_offsets)
# Move to this row
tl.store(X + row * stride_x + tl.arange(0, N), randx)
tl.store(Y + row * stride_y + tl.arange(0, N), randy)
def test_rand():
# Check that the random generator used in triton works fine
torch.random.manual_seed(0)
x = torch.zeros((512, 32), device=torch.device("cuda"), dtype=torch.int32)
y = torch.zeros((512, 32), device=torch.device("cuda"), dtype=torch.int32)
M, N = x.shape
seeds_x = torch.randint(65536, (M,), device=x.device)
seeds_y = torch.randint(65536, (M,), device=x.device)
assert not torch.allclose(seeds_x, seeds_y)
# enqueue kernels, one per line
# fmt: off
k_rand[(M,)](
x, y,
seeds_x, seeds_y,
x.stride(0), y.stride(0),
N,
)
# fmt: on
assert not torch.allclose(x, y)
| EXA-1-master | exa/libraries/xformers/tests/test_triton_basics.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import nullcontext
import pytest
import torch
import xformers.factory.weight_init as xformers_weight_init
from xformers.factory import xFormer, xFormerConfig, xFormerWeightInit
BATCH = 2
SEQ = 16
EMB = 16
VOC = 16
DEVICES = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else [
torch.device("cuda")
] # save a bit on CI for now, we have seperate cpu and gpu jobs
)
encoder_configs = {
"reversible": False,
"block_type": "encoder",
"dim_model": EMB,
"residual_norm_style": "pre",
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": VOC,
"dim_model": EMB,
},
"num_layers": 3,
"multi_head_config": {
"num_heads": 4,
"residual_dropout": 0,
"attention": {
"name": "scaled_dot_product",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
"dim_model": EMB,
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
"number_of_experts": 4,
"gate_config": "top_2",
},
}
decoder_configs = {
"block_type": "decoder",
"dim_model": EMB,
"residual_norm_style": "pre",
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": VOC,
"dim_model": EMB,
},
"num_layers": 2,
"multi_head_config_masked": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "scaled_dot_product",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
},
"multi_head_config_cross": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "scaled_dot_product",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
test_configs_list = [encoder_configs, decoder_configs]
test_configs_dict = {"encoder": encoder_configs, "decoder": decoder_configs}
""" Test all the model configurations saved in model_presets. """
@pytest.mark.parametrize("config", [test_configs_list, test_configs_dict])
@pytest.mark.parametrize("reversible", [True, False])
@pytest.mark.parametrize("tie_embedding_weights", [True, False])
@pytest.mark.parametrize("residual_norm_style", ["pre", "post", "deepnorm"])
@pytest.mark.parametrize("device", DEVICES)
def test_presets(
config, reversible, tie_embedding_weights, residual_norm_style, device
):
torch.cuda.manual_seed(42)
torch.manual_seed(42)
# Build the model
if isinstance(config, list):
# Only the encoder can be reversible
config[0]["reversible"] = reversible
config[0]["residual_norm_style"] = residual_norm_style
config[1]["residual_norm_style"] = residual_norm_style
else:
config["encoder"]["reversible"] = reversible
config["encoder"]["residual_norm_style"] = residual_norm_style
config["decoder"]["residual_norm_style"] = residual_norm_style
modelConfig = xFormerConfig(config, tie_embedding_weights)
if isinstance(modelConfig.stack_configs, dict):
for _, blockConfig in modelConfig.stack_configs.items():
assert blockConfig.layer_position
else:
for blockConfig in modelConfig.stack_configs:
assert blockConfig.layer_position
context = (
pytest.raises(AssertionError)
if reversible and (tie_embedding_weights or residual_norm_style == "deepnorm")
else nullcontext()
)
with context:
model = xFormer.from_config(modelConfig).to(device)
def check_against_default(p):
# check that a different gain than 1 was used
vanilla = p.clone()
torch.nn.init.xavier_normal_(p, gain=1)
change = torch.abs((torch.std(vanilla) - torch.std(p)) / torch.std(p))
assert change > 0.1
# Check deepnorm init, if applicable
if residual_norm_style == "deepnorm":
for n, p in model.encoders.named_parameters():
# Check the MHA
if "in_proj_weight" in n:
# self attention projection, check that the value projection has been changed
M, _ = p.shape
K = M // 3
value_rel_std = torch.abs(
torch.std(p[:K, :]) - torch.std(p[-K:, :])
)
qp_rel_std = torch.abs(torch.std(p[:K, :]) - torch.std(p[K:-K, :]))
# Check that the value proj init has been changed by more than the noise
assert (
value_rel_std / qp_rel_std > 2
), f"{(value_rel_std/qp_rel_std)}"
if "v_proj_weight" in n:
check_against_default(p)
if "mha.proj" in n and "weight" in n:
check_against_default(p)
# Check the feedforward
if "feedforward" in n and "weight" in n:
check_against_default(p)
# Dummy inputs, test a forward
inputs = (torch.rand((BATCH, SEQ), device=device) * 10).abs().to(torch.int)
input_mask = torch.randn(SEQ, dtype=torch.float, device=device)
input_mask[input_mask < 0.0] = -float("inf")
outputs = model(
inputs, encoder_input_mask=input_mask, decoder_input_mask=input_mask
)
# Test a BW
loss = torch.sum(torch.abs(outputs))
loss.backward()
# If we requested tied embedding weights, check that this is the case indeed
if tie_embedding_weights and not reversible:
assert model.encoders[0].pose_encoding == model.decoders[0].pose_encoding
@pytest.mark.parametrize("weight_init", [w.value for w in xFormerWeightInit])
@pytest.mark.parametrize("feedforward", ["MLP", "Conv2DFeedforward"])
@pytest.mark.parametrize("deepnorm", [False, True])
@pytest.mark.parametrize("device", DEVICES)
def test_weight_init(weight_init, feedforward, deepnorm, device):
torch.cuda.manual_seed(42)
torch.manual_seed(42)
config = test_configs_dict
if deepnorm:
config["encoder"]["residual_norm_style"] = "deepnorm"
config["encoder"]["feedforward_config"]["name"] = feedforward
config["decoder"]["residual_norm_style"] = "deepnorm"
# Make sure that all the init methods catch all the weights
xformers_weight_init._assert_if_not_initialized = True
# Build the model
config_instance = xFormerConfig( # noqa
config, tie_embedding_weights=False, weight_init=weight_init
)
_ = xFormer.from_config(config_instance).to(device)
| EXA-1-master | exa/libraries/xformers/tests/test_model_factory.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers.components.attention import NystromAttention, ScaledDotProduct
from xformers.components.attention.utils import maybe_merge_masks
@pytest.mark.parametrize("pinverse_original_init", [True, False])
@pytest.mark.parametrize("use_razavi_pinverse", [True, False])
@pytest.mark.parametrize("num_landmarks", [30, 33, 905])
def test_nystrom_attention_close_to_sdp(
pinverse_original_init: bool,
use_razavi_pinverse: bool,
num_landmarks: int,
):
# TODO: conv_kernel_size parameter not set to None fails this test. Investigate.
b, s, d = 2, 900, 40
num_heads = 2
seed = 42
torch.random.manual_seed(seed)
random.seed(seed)
nystrom_config = {
"name": "nystrom",
"dropout": 0.0,
"num_landmarks": num_landmarks,
"num_heads": num_heads,
"pinverse_original_init": pinverse_original_init,
"use_razavi_pinverse": use_razavi_pinverse,
}
sdp_config = {
"name": "scaled_dot_product",
"dropout": 0.0,
}
a = torch.rand(b, s, d)
def test_close_to_sdp():
# Make sure that Nystrom and Normal attention are not too far off.
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
r_nystrom = nystrom_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_nystrom, r_sdp, rtol=0.005, atol=1e-2)
# Make sure that Nystrom and Normal attention are not too far off.
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
r_nystrom = nystrom_attention(a, a, a, att_mask=None)
r_sdp = sdp_attention(a, a, a, att_mask=None)
assert torch.allclose(r_nystrom, r_sdp, rtol=0.005, atol=1e-2)
test_close_to_sdp()
@pytest.mark.parametrize("pinverse_original_init", [True])
@pytest.mark.parametrize("use_razavi_pinverse", [True])
@pytest.mark.parametrize("num_landmarks", [30])
def test_nystrom_attention(
pinverse_original_init: bool,
use_razavi_pinverse: bool,
num_landmarks: int,
):
# TODO: conv_kernel_size parameter not set to None fails this test. Investigate.
b, s, d = 2, 900, 40
num_heads = 2
seed = 42
torch.random.manual_seed(seed)
random.seed(seed)
nystrom_config = {
"name": "nystrom",
"dropout": 0.0,
"num_landmarks": num_landmarks,
"num_heads": num_heads,
"pinverse_original_init": pinverse_original_init,
"use_razavi_pinverse": use_razavi_pinverse,
}
sdp_config = {
"name": "scaled_dot_product",
"dropout": 0.0,
}
a = torch.rand(b, s, d)
def test_att_mask_ignored():
# If an sxs attention mask is passed in, it should be ignored.
# Results should be the same as if no mask was passed in.
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
key_padding_mask = None
att_mask = torch.randint(0, 2, (s, s)).to(dtype=torch.bool)
sdp_mask = maybe_merge_masks(
att_mask=None,
key_padding_mask=key_padding_mask,
batch_size=b // num_heads,
src_len=s,
num_heads=num_heads,
)
r_nystrom = nystrom_attention(
a, a, a, att_mask=att_mask, key_padding_mask=key_padding_mask
)
r_sdp = sdp_attention(a, a, a, att_mask=sdp_mask)
assert torch.allclose(r_nystrom, r_sdp, rtol=0.005, atol=1e-2)
def test_masking():
# FIXME
# nystrom_config["causal"] = True
# sdp_config["causal"] = True
nystrom_attention = NystromAttention(**nystrom_config)
sdp_attention = ScaledDotProduct(**sdp_config)
key_padding_mask = torch.rand((b // num_heads, s)) > 0.1
att_mask = None
mask = maybe_merge_masks(
att_mask,
key_padding_mask,
batch_size=b // num_heads,
src_len=s,
num_heads=num_heads,
)
r_nystrom = nystrom_attention(a, a, a, key_padding_mask=key_padding_mask)
r_sdp = sdp_attention(a, a, a, att_mask=mask)
# Not very close, but more so testing functionality.
assert torch.allclose(
r_nystrom, r_sdp, rtol=0.1, atol=0.5
), f"max diff {torch.max(torch.abs(r_nystrom-r_sdp))}"
# Error when key padding mask doesn't have expected dimensions.
key_padding_mask = torch.randint(0, 2, (s, b)).to(dtype=torch.bool)
with pytest.raises(AssertionError):
nystrom_attention(a, a, a, key_padding_mask=key_padding_mask)
test_att_mask_ignored()
test_masking()
| EXA-1-master | exa/libraries/xformers/tests/test_nystrom_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from xformers.factory.model_factory import xFormer, xFormerConfig
BATCH = 2
SEQ = 64
EMB = 48
VOCAB = 16
DEVICES = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else [torch.device("cuda")] # save a bit on CI, we have seperate cpu and gpu jobs
)
_test_config_encoder = {
"reversible": False,
"block_type": "encoder",
"dim_model": EMB,
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": VOCAB,
"dim_model": EMB,
},
"num_layers": 3,
"multi_head_config": {
"num_heads": 4,
"residual_dropout": 0,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
"dim_model": EMB,
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
_test_config_decoder = {
"block_type": "decoder",
"dim_model": EMB,
"position_encoding_config": {
"name": "vocab",
"seq_len": SEQ,
"vocab_size": VOCAB,
"dim_model": EMB,
},
"num_layers": 2,
"multi_head_config_masked": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
},
"multi_head_config_cross": {
"num_heads": 4,
"residual_dropout": 0,
"dim_model": EMB,
"attention": {
"name": "linformer",
"dropout": 0,
"causal": True,
"seq_len": SEQ,
},
},
"feedforward_config": {
"name": "MLP",
"dropout": 0,
"activation": "relu",
"hidden_layer_multiplier": 4,
"dim_model": EMB,
},
}
# Test a pure encoder, a pure decoder, an encoder/decoder stack
_test_configs = [
[_test_config_encoder, _test_config_decoder],
[_test_config_encoder],
]
def _rev_config(config, flag: bool):
for c in filter(
lambda x: x["block_type"] == "encoder",
config,
):
c["reversible"] = flag
return config
@pytest.mark.parametrize("config", _test_configs)
@pytest.mark.parametrize("device", DEVICES)
def test_reversible_runs(config, device):
# Build both a reversible and non-reversible model
model_non_reversible = xFormer.from_config(
xFormerConfig(_rev_config(config, False))
).to(device)
model_reversible = xFormer.from_config(xFormerConfig(_rev_config(config, True))).to(
device
)
# Dummy inputs, test a forward
inputs = (torch.rand((BATCH, SEQ), device=device) * 10).abs().to(torch.int)
_ = model_non_reversible(inputs)
_ = model_reversible(inputs)
@pytest.mark.parametrize("device", DEVICES)
def test_reversible_no_alternate(device):
# Check that we cannot build a non-coherent stack
with pytest.raises(AssertionError):
rev = dict(_test_config_encoder) # we need to make a copy
rev["reversible"] = True
non_rev = dict(_test_config_encoder)
non_rev["reversible"] = False
_ = xFormer.from_config(xFormerConfig([rev, non_rev])).to(device)
@pytest.mark.parametrize("config", _test_configs)
@pytest.mark.parametrize("device", DEVICES)
def test_reversible_train(config, device):
torch.manual_seed(0)
random.seed(0)
# Dummy inputs, test some training to make sure that we both can approximate the same thing to some extent
# This is not super scientific, more of a foolproof catch
def data():
input_a = torch.zeros((BATCH, SEQ), device=device).to(torch.int)
input_b = (torch.rand((BATCH, SEQ), device=device) * VOCAB).abs().to(torch.int)
target_a = torch.zeros((BATCH, SEQ), device=device)
target_b = torch.ones((BATCH, SEQ), device=device)
if random.random() > 0.5:
return torch.cat([input_a, input_b], dim=0), torch.cat(
[target_a, target_b], dim=0
)
return torch.cat([input_b, input_a], dim=0), torch.cat(
[target_b, target_a], dim=0
)
def step(model: torch.nn.Module, optim: torch.optim.Optimizer):
batch, target = data()
model.train()
optim.zero_grad()
outputs = model(batch)
loss = torch.norm(torch.mean(outputs, dim=-1) - target)
loss.backward()
# Clip grad and error out if we're producing NaNs, part of the unit test
torch.nn.utils.clip_grad_norm_(
model.parameters(), 10.0, norm_type=2.0, error_if_nonfinite=True
)
optim.step()
return loss.item()
def evaluate(model: torch.nn.Module):
batch, target = data()
model.eval()
outputs = model(batch)
return torch.norm(torch.mean(outputs, dim=-1) - target).item()
# Build both a reversible and non-reversible model
model_non_reversible = xFormer.from_config(
xFormerConfig(_rev_config(config, False))
).to(device)
model_reversible = xFormer.from_config(xFormerConfig(_rev_config(config, True))).to(
device
)
optim_rev = torch.optim.SGD(model_reversible.parameters(), lr=1e-3, momentum=0.9)
optim_non_rev = torch.optim.SGD(
model_non_reversible.parameters(), lr=1e-3, momentum=0.9
)
# Check that both models can be trained to comparable results
eval_start_rev = evaluate(model_reversible)
eval_start_non_rev = evaluate(model_non_reversible)
for i in range(100):
print(i, " reversible: ", step(model_reversible, optim_rev))
print(i, " non reversible: ", step(model_non_reversible, optim_non_rev))
# Check that we can classify this dummy example
# Arbitrary threshold
eval_stop_rev = evaluate(model_reversible)
eval_stop_non_rev = evaluate(model_non_reversible)
if len(config) < 2: # only check the encoder case
train_ratio_rev = eval_start_rev / eval_stop_rev
train_ratio_non_rev = eval_start_non_rev / eval_stop_non_rev
# Assert that train ratio > 1 (we trained),
# and reversible is not much worse than non-reversible (it's actually better on this dummy test)
assert train_ratio_rev > 1
assert train_ratio_non_rev > 1
assert train_ratio_rev > train_ratio_non_rev
| EXA-1-master | exa/libraries/xformers/tests/test_reversible.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# type: ignore
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pathlib import Path
from typing import Any, List
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "xFormers"
copyright = "Copyright © 2021 Meta Platforms, Inc"
author = "Facebook AI Research"
root_dir = Path(__file__).resolve().parent.parent.parent
# The full version, including alpha/beta/rc tags
release = (root_dir / "version.txt").read_text().strip()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon", # support NumPy and Google style docstrings
"recommonmark",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
]
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"torch": ("https://pytorch.org/docs/master", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[Any] = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
html_theme = "pytorch_sphinx_theme"
templates_path = ["_templates"]
# Add any paths that contain custom static files (such as style sheets) here,
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"includehidden": True,
"canonical_url": "https://facebookresearch.github.io/xformers",
"pytorch_project": "docs",
"logo_only": True, # default = False
}
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# setting custom stylesheets https://stackoverflow.com/a/34420612
html_context = {"css_files": ["_static/css/customize.css"]}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "xformersdocs"
github_doc_root = "https://github.com/facebookresearch/xformers/tree/main/docs/"
# Over-ride PyTorch Sphinx css
def setup(app):
app.add_config_value(
"recommonmark_config",
{
"url_resolver": lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
"enable_math": True,
"enable_inline_math": True,
"enable_eval_rst": True,
"enable_auto_toc_tree": True,
},
True,
)
app.add_transform(AutoStructify)
app.add_css_file("css/customize.css")
| EXA-1-master | exa/libraries/xformers/docs/source/conf.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytorch_lightning as pl
import torch
from pl_bolts.datamodules import CIFAR10DataModule
from torch import nn
from torchmetrics import Accuracy
from examples.cifar_ViT import Classifier, VisionTransformer
from xformers.factory import xFormer, xFormerConfig
from xformers.helpers.hierarchical_configs import (
BasicLayerConfig,
get_hierarchical_configuration,
)
# This is very close to the cifarViT example, and reuses a lot of the training code, only the model part is different.
# There are many ways one can use xformers to write down a MetaFormer, for instance by
# picking up the parts from `xformers.components` and implementing the model explicitly,
# or by patching another existing ViT-like implementation.
# This example takes another approach, as we define the whole model configuration in one go (dict structure)
# and then use the xformers factory to generate the model. This obfuscates a lot of the model building
# (though you can inspect the resulting implementation), but makes it trivial to do some hyperparameter search
class MetaVisionTransformer(VisionTransformer):
def __init__(
self,
steps,
learning_rate=5e-3,
betas=(0.9, 0.99),
weight_decay=0.03,
image_size=32,
num_classes=10,
dim=384,
attention="scaled_dot_product",
feedforward="MLP",
residual_norm_style="pre",
use_rotary_embeddings=True,
linear_warmup_ratio=0.1,
classifier=Classifier.GAP,
):
super(VisionTransformer, self).__init__()
# all the inputs are saved under self.hparams (hyperparams)
self.save_hyperparameters()
# Generate the skeleton of our hierarchical Transformer
# - This is a small poolformer configuration, adapted to the small CIFAR10 pictures (32x32)
# - Please note that this does not match the L1 configuration in the paper, as this would correspond to repeated
# layers. CIFAR pictures are too small for this config to be directly meaningful (although that would run)
# - Any other related config would work, and the attention mechanisms don't have to be the same across layers
base_hierarchical_configs = [
BasicLayerConfig(
embedding=64,
attention_mechanism=attention,
patch_size=3,
stride=2,
padding=1,
seq_len=image_size * image_size // 4,
feedforward=feedforward,
repeat_layer=1,
),
BasicLayerConfig(
embedding=128,
attention_mechanism=attention,
patch_size=3,
stride=2,
padding=1,
seq_len=image_size * image_size // 16,
feedforward=feedforward,
repeat_layer=1,
),
BasicLayerConfig(
embedding=320,
attention_mechanism=attention,
patch_size=3,
stride=2,
padding=1,
seq_len=image_size * image_size // 64,
feedforward=feedforward,
repeat_layer=1,
),
BasicLayerConfig(
embedding=512,
attention_mechanism=attention,
patch_size=3,
stride=2,
padding=1,
seq_len=image_size * image_size // 256,
feedforward=feedforward,
repeat_layer=1,
),
]
# Fill in the gaps in the config
xformer_config = get_hierarchical_configuration(
base_hierarchical_configs,
residual_norm_style=residual_norm_style,
use_rotary_embeddings=use_rotary_embeddings,
mlp_multiplier=4,
dim_head=32,
)
# Now instantiate the metaformer trunk
config = xFormerConfig(xformer_config)
config.weight_init = "moco"
print(config)
self.trunk = xFormer.from_config(config)
print(self.trunk)
# The classifier head
dim = base_hierarchical_configs[-1].embedding
self.ln = nn.LayerNorm(dim)
self.head = nn.Linear(dim, num_classes)
self.criterion = torch.nn.CrossEntropyLoss()
self.val_accuracy = Accuracy()
def forward(self, x):
x = self.trunk(x)
x = self.ln(x)
x = x.mean(dim=1) # mean over sequence len
x = self.head(x)
return x
if __name__ == "__main__":
pl.seed_everything(42)
# Adjust batch depending on the available memory on your machine.
# You can also use reversible layers to save memory
REF_BATCH = 768
BATCH = 256 # lower if not enough GPU memory
MAX_EPOCHS = 50
NUM_WORKERS = 4
GPUS = 1
torch.cuda.manual_seed_all(42)
torch.manual_seed(42)
# We'll use a datamodule here, which already handles dataset/dataloader/sampler
# - See https://pytorchlightning.github.io/lightning-tutorials/notebooks/lightning_examples/cifar10-baseline.html
# for a full tutorial
# - Please note that default transforms are being used
dm = CIFAR10DataModule(
data_dir="data",
batch_size=BATCH,
num_workers=NUM_WORKERS,
pin_memory=True,
)
image_size = dm.size(-1) # 32 for CIFAR
num_classes = dm.num_classes # 10 for CIFAR
# compute total number of steps
batch_size = BATCH * GPUS
steps = dm.num_samples // REF_BATCH * MAX_EPOCHS
lm = MetaVisionTransformer(
steps=steps,
image_size=image_size,
num_classes=num_classes,
attention="scaled_dot_product",
residual_norm_style="pre",
feedforward="MLP",
use_rotary_embeddings=True,
)
trainer = pl.Trainer(
gpus=GPUS,
max_epochs=MAX_EPOCHS,
precision=16,
accumulate_grad_batches=REF_BATCH // BATCH,
)
trainer.fit(lm, dm)
# check the training
trainer.test(lm, datamodule=dm)
| EXA-1-master | exa/libraries/xformers/examples/cifar_MetaFormer.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.