python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
from pathlib import Path
from typing import Any, Dict
if __name__ == "__main__":
# Get the user requests
parser = argparse.ArgumentParser(
"Collect results from a given batch of distributed results"
)
parser.add_argument("-ck", "--checkpoint_path", required=True)
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
# Go through all the data in the given repo, try to find the end results
root = Path(args.checkpoint_path)
# - list all the mechanisms being benchmarked
results: Dict[str, Any] = {}
for attention in filter(lambda x: x.is_dir(), root.iterdir()):
logging.info(f"\nFound results for {attention.stem}")
task_jsons = attention.glob("*/test_eval_summary.json")
results[attention.stem] = {}
for task in task_jsons:
task_name = task.stem.split("__")[0]
logging.info(f"Logs found for task: {task_name}")
results[attention.stem][task_name] = -1
found_result = False
# - collect the individual results
with open(task, "r") as result_file:
dct = json.load(result_file)
if "test_accu_mean" in dct:
found_result = True
results[attention.stem][task_name] = dct["test_accu_mean"]
logging.info(
f"Final result found for {task_name} at epoch {dct['train_step_idx']}: "
f"{results[attention.stem][task_name]}"
)
else:
break
# - report an error if no result was found
if not found_result:
ERR_TAIL = 30
logging.warning(
f"No result found for {task_name}, showing the error log in {task.parent}"
)
err_log = Path(task.parent).glob("*.err")
print("*****************************************************")
with open(next(err_log), "r") as err_file:
for i, line in enumerate(reversed(err_file.readlines())):
print(line, end="")
if i > ERR_TAIL:
break
print("*****************************************************")
logging.info(f"\nCollected results: {json.dumps(results, indent=2)}")
# - reduction: compute the average
tasks = set(t for v in results.values() for t in v.keys())
# -- fill in the possible gaps
for att in results.keys():
for t in tasks:
if t not in results[att].keys():
results[att][t] = 0.0
# -- add the average value
for att in results.keys():
results[att]["AVG"] = round(sum(results[att][t] for t in tasks) / len(tasks), 2)
# - Format as an array, markdown style
tasks_sort = sorted(
set(t for v in results.values() for t in v.keys()), reverse=True
)
print(
"{0:<20}".format("") + "".join("{0:<20} ".format(t[:10]) for t in tasks_sort)
)
for att in results.keys():
print(
"{0:<20}".format(att)
+ "".join("{0:<20} ".format(results[att][t]) for t in tasks_sort)
)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/batch_fetch_results.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
A script to run multinode training with submitit.
Almost copy-paste from https://github.com/facebookresearch/deit/blob/main/run_with_submitit.py
"""
import argparse
import os
import uuid
from pathlib import Path
import submitit
from xformers.benchmarks.LRA.run_tasks import benchmark, get_arg_parser
def parse_args():
parser = argparse.ArgumentParser(
"Submitit for LRA", parents=[get_arg_parser()], add_help=False
)
parser.add_argument(
"--ngpus", default=1, type=int, help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes", default=1, type=int, help="Number of nodes to request"
)
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument(
"--partition", default="a100", type=str, help="Partition where to submit"
)
parser.add_argument(
"--use_volta32", action="store_true", help="Big models? Use this"
)
parser.add_argument(
"--enforce_host_memory", action="store_true", help="Use if the host OOMs"
)
parser.add_argument(
"--comment",
default="",
type=str,
help="Comment to pass to scheduler, e.g. priority message",
)
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
checkpoint_paths = ["/checkpoint", "/checkpoints"]
for checkpoint_path in checkpoint_paths:
if Path(checkpoint_path).is_dir():
p = Path(f"{checkpoint_path}/{user}/xformers/submitit")
p.mkdir(exist_ok=True, parents=True)
return p
raise RuntimeError(f"No shared folder available - considering {checkpoint_paths}")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
self._setup_gpu_args()
benchmark(self.args)
def checkpoint(self):
self.args.dist_url = get_init_file().as_uri()
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
job_env = submitit.JobEnvironment()
self.args.checkpoint_dir = Path(
str(self.args.checkpoint_dir).replace("%j", str(job_env.job_id))
)
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.checkpoint_dir == "":
args.checkpoint_dir = get_shared_folder() / "%j"
Path(args.checkpoint_dir).mkdir(parents=True, exist_ok=True)
executor = submitit.AutoExecutor(
folder=args.checkpoint_dir, slurm_max_num_timeout=30
)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
args.world_size = args.nodes * args.ngpus
partition = args.partition
kwargs = {
"gpus_per_node": num_gpus_per_node,
"tasks_per_node": num_gpus_per_node, # one task per GPU
"cpus_per_task": 10,
"nodes": nodes,
"timeout_min": timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
"slurm_partition": partition,
"slurm_signal_delay_s": 120,
}
if args.enforce_host_memory:
kwargs["mem_gb"] = (40 * num_gpus_per_node,)
if args.use_volta32:
kwargs["slurm_constraint"] = "volta32gb"
if args.comment:
kwargs["slurm_comment"] = args.comment
executor.update_parameters(
**kwargs,
)
executor.update_parameters(name="lra")
args.dist_url = get_init_file().as_uri()
args.temp_file = str(get_init_file())
trainer = Trainer(args)
job = executor.submit(trainer)
print(f"Submitted job_id: {job.job_id}")
print(f"Logs and checkpoints will be saved at: {args.checkpoint_dir}")
with open(Path(f"{args.checkpoint_dir}") / Path("jobs.txt"), "a") as jobfile:
jobfile.write(f"{job.job_id}\n")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/run_with_submitit.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Dict, Tuple, cast
import pytorch_lightning as pl
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis, flop_count_str
from pytorch_lightning.callbacks import ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.strategies import DDPStrategy
from torch.utils.data import DataLoader
from xformers.benchmarks.LRA.code.dataset import LRADataset
from xformers.benchmarks.LRA.code.model_wrapper import ModelForSC, ModelForSCDual
from xformers.components.attention import ATTENTION_REGISTRY
class Task(str, Enum):
Retrieval = "retrieval"
ListOps = "listops"
Image = "image"
PathfinderBaseline = "pathfinder32-curv_baseline"
PathfinderContour9 = "pathfinder32-curv_contour_length_9"
PathfinderContour14 = "pathfinder32-curv_contour_length_14"
Text = "text"
def load_config(path: str) -> Dict:
with open(Path(path).absolute(), "r") as fileio:
config = json.load(fileio)
# Duplicate the pathfinder configs
config["pathfinder32-curv_baseline"] = config["pathfinder32"]
config["pathfinder32-curv_contour_length_9"] = config["pathfinder32"]
config["pathfinder32-curv_contour_length_14"] = config["pathfinder32"]
return config
def build_model(args: argparse.Namespace, config: Dict) -> nn.Module:
task = args.task
attention_name = args.attention
model = cast(
pl.LightningModule,
ModelForSCDual(config[f"{task}"], attention_name)
if task == Task.Retrieval
else ModelForSC(config[f"{task}"], attention_name),
)
logging.info(model)
summary = pl.utilities.model_summary.LayerSummary(model)
logging.info(f"num_parameter: {summary.num_parameters // 1e3 / 1e3}M")
with torch.no_grad():
# Check the flops
seq_len = config[f"{task}"]["model"]["common"]["seq_len"]
x = torch.rand(1, seq_len).long()
mask = torch.rand(1, seq_len).long()
indices = torch.rand(1, seq_len).long()
flops = FlopCountAnalysis(model.model, (x, mask, indices))
logging.info(f"complexity: {round(flops.total()/1e9, 3)} GFlops")
logging.info(flop_count_str(flops))
return model
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--attention",
type=str,
help=f"Attention mechanism to chose, among {list(ATTENTION_REGISTRY.keys())}. \
A list can be passed to test several mechanisms in sequence",
dest="attention",
required=True,
)
parser.add_argument(
"--task",
type=Task,
help=f"Task to chose, among {[t.value for t in Task]}.",
dest="task",
required=True,
)
parser.add_argument(
"--skip_train",
type=bool,
help="Whether to skip training, and test an existing model",
dest="skip_train",
default=False,
)
parser.add_argument(
"--config",
type=str,
help="Path to the config being used",
dest="config",
default="./config.json",
)
parser.add_argument(
"--checkpoint_dir",
type=str,
help="Path to the checkpoint directory",
dest="checkpoint_dir",
default=f"/checkpoints/{os.getenv('USER')}/xformers",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Path to checkpoint",
)
parser.add_argument(
"--debug",
help="Make it easier to debug a possible issue",
dest="debug",
default=False,
action="store_true",
)
parser.add_argument(
"--world_size",
help="Number of GPUs used",
dest="world_size",
type=int,
default=1,
)
parser.add_argument(
"--sweep_parameters",
help="Rewrite some hyperparameters in the config",
dest="sweep_parameters",
type=dict,
default=None,
)
return parser
def setup_log(args, attention_name, task) -> Tuple[str, TensorBoardLogger]:
experiment_name = f"{task}__{attention_name}"
logger = TensorBoardLogger(
save_dir=args.checkpoint_dir,
name="", # remove lightning_logs subdirectory
version=experiment_name,
)
log_dir = os.path.join(logger._save_dir, experiment_name)
return log_dir, logger
def rewrite_hyper(config, rewrites):
def replace(config_dict, k, v):
if len(k.split(":")) == 1:
config_dict[k] = v
return
first_key = k.split(":")[0]
assert first_key in config_dict, first_key
k = k[len(first_key) + 1 :]
replace(config_dict[first_key], k, v)
for k, v in rewrites.items():
replace(config, k, v)
return config
def build_dataloaders(
args: argparse.Namespace,
config_training: Dict,
num_workers: int = 4,
) -> Dict[str, DataLoader]:
datasets = {}
for component in ("train", "dev", "test"):
datasets[component] = LRADataset(
file_path=f"datasets/{args.task}.{component}.pickle",
seq_len=config_training["seq_len"],
)
# Gradient accumulation
accumu_steps = config_training["gradient_accumulation"]
logging.info(f"accumu_steps={accumu_steps}")
# Batch size
per_gpu_batch_size = (
config_training["batch_size"] // args.world_size // accumu_steps
)
logging.warning(
f"Requested batch size: {config_training['batch_size']}. Given world\
size and grad accumulation, per-gpu batch is\
{per_gpu_batch_size}"
)
dataloaders = {
k: DataLoader(
v,
batch_size=per_gpu_batch_size,
shuffle=False,
pin_memory=True,
num_workers=num_workers,
)
for k, v in datasets.items()
}
return dataloaders
def get_eval_summary(trainer: pl.Trainer) -> Dict[str, float]:
eval_summary: Dict[str, float] = {"train_step_idx": trainer.global_step}
for k, v in trainer.callback_metrics.items():
eval_summary[k] = v.item()
return eval_summary
class BasicProgressBar(TQDMProgressBar):
def get_metrics(self, trainer, model):
items = super().get_metrics(trainer, model)
items.pop("v_num", None)
return items
def benchmark(args):
log_dir, logger = setup_log(args, f"{args.attention}", f"{args.task}")
args.logger = logger
config = load_config(args.config)
config_task = config[f"{args.task}"]
if args.sweep_parameters is not None:
logging.info("Replacing hyperparameters")
rewrite_hyper(config_task, args.sweep_parameters)
config_training = config_task["training"]
config_training["seq_len"] = config_task["model"]["common"]["seq_len"]
logging.info(f"Learning rate: {config_training['learning_rate']}")
pl.seed_everything(config_training.get("seed", 0))
dataloaders = build_dataloaders(args, config_training)
model = build_model(args, config)
progress_bar = BasicProgressBar()
checkpoint_callback = ModelCheckpoint(
monitor="val_accu",
mode="max",
dirpath=args.checkpoint_dir,
filename="{epoch}-{val_accu:.2f}",
every_n_train_steps=config_training["eval_frequency"],
)
trainer = pl.Trainer(
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=args.debug)
if not args.skip_train
else None,
accumulate_grad_batches=config_training["gradient_accumulation"],
callbacks=[progress_bar, checkpoint_callback],
detect_anomaly=args.debug,
deterministic=True,
gpus=args.world_size,
limit_val_batches=config_training["num_eval_steps"],
logger=logger,
max_steps=config_training["num_train_steps"],
num_sanity_val_steps=int(not args.skip_train),
precision=16 if config_training["mixed_precision"] else 32,
val_check_interval=config_training["eval_frequency"]
/ float(len(dataloaders["train"])),
)
if not args.skip_train:
trainer.fit(
model,
train_dataloaders=dataloaders["train"],
val_dataloaders=dataloaders["dev"],
)
ckpt_path = checkpoint_callback.best_model_path
else:
ckpt_path = args.checkpoint_path
trainer.test(
model,
dataloaders=dataloaders["test"],
ckpt_path=ckpt_path,
)
eval_summary = get_eval_summary(trainer)
with open(os.path.join(log_dir, "test_eval_summary.json"), "w") as f:
logging.info(f"Saving test results at {f.name}")
json.dump(eval_summary, f)
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
if args.skip_train and args.checkpoint_path is None:
raise parser.error("Must provide --checkpoint_path if --skip_train=True")
benchmark(args)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/run_tasks.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
import uuid
from datetime import date
from pathlib import Path
from typing import Dict, Iterable
import submitit
from xformers.benchmarks.LRA.run_with_submitit import (
Trainer,
get_init_file,
get_shared_folder,
parse_args,
)
def grid_parameters(grid: Dict):
"""
Yield all combinations of parameters in the grid (as a dict)
"""
grid_copy = dict(grid)
# Turn single value in an Iterable
for k in grid_copy:
if not isinstance(grid_copy[k], Iterable):
grid_copy[k] = [grid_copy[k]]
for p in itertools.product(*grid_copy.values()):
yield dict(zip(grid.keys(), p))
def grid_search(args):
if args.checkpoint_dir == "":
args.checkpoint_dir = get_shared_folder() / "%j"
date_curr = date.today().strftime("%m-%d-%Y")
orig_check_dir = os.path.join(args.checkpoint_dir, date_curr)
# Create the executor
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(
folder=get_shared_folder() / "%j", slurm_max_num_timeout=30
)
num_gpus_per_node = args.ngpus
nodes = args.nodes
args.world_size = args.nodes * args.ngpus
partition = args.partition
executor.update_parameters(
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=60 * 72,
slurm_signal_delay_s=120,
slurm_partition=partition,
)
executor.update_parameters(name="lra")
if args.task == "text":
grid_meta = {
"training:learning_rate": (
[1e-4, 2e-4, 3e-4, 5e-5],
lambda val: f"lr{val}",
),
"training:warmup": ([3000, 8000], lambda val: f"warmup{val}"),
"training:seed": ([1234, 32, 1994], lambda val: f"seed{val}"),
"training:weight_decay": ([0.02, 0.05, 0.01], lambda val: f"wd{val}"),
"model:pooling_model": (["cls"], lambda val: f"pool-{val}"),
"model:common:dropout": ([0, 0.05], lambda val: f"drop{val}"),
}
elif args.task == "retrieval":
grid_meta = {
"training:learning_rate": ([1e-4, 3e-4], lambda val: f"lr{val}"),
"training:warmup": ([2000, 8000], lambda val: f"warmup{val}"),
"training:seed": ([4096, 1234, 3, 15, 5], lambda val: f"seed{val}"),
"training:weight_decay": ([0.01, 0], lambda val: f"wd{val}"),
"model:pooling_model": (["cls"], lambda val: f"pool-{val}"),
"model:common:dropout": ([0], lambda val: f"drop{val}"),
}
elif args.task == "listops":
grid_meta = {
"training:learning_rate": (
[1e-4, 2e-4, 3e-4, 5e-5],
lambda val: f"lr{val}",
),
"training:warmup": ([3000, 2000], lambda val: f"warmup{val}"),
"training:seed": (
[
1234,
],
lambda val: f"seed{val}",
),
"training:weight_decay": ([0.02, 0.05, 0, 1], lambda val: f"wd{val}"),
"model:pooling_model": (["cls"], lambda val: f"pool-{val}"),
"model:common:dropout": ([0], lambda val: f"drop{val}"),
}
else:
grid_meta = {
"training:learning_rate": ([1e-4, 5e-5], lambda val: f"lr{val}"),
"training:warmup": ([8000], lambda val: f"warmup{val}"),
"training:seed": ([1234, 4321, 3], lambda val: f"seed{val}"),
"training:weight_decay": ([0.01], lambda val: f"wd{val}"),
"model:pooling_model": (["cls"], lambda val: f"pool-{val}"),
"model:common:dropout": ([0.1], lambda val: f"drop{val}"),
}
grid = {k: v[0] for k, v in grid_meta.items()}
save_key = {k: v[1] for k, v in grid_meta.items()}
hyper_parameters = list(grid_parameters(grid))
jobs = []
for i, grid_data in enumerate(hyper_parameters):
args.sweep_parameters = grid_data
run_name = f"{args.attention}"
# run_name = "paper_config"
for k, v in grid_data.items():
run_name += "prenorm-" + save_key[k](v)
args.checkpoint_dir = os.path.join(
orig_check_dir, f"{args.task}", "logs", run_name
)
Path(args.checkpoint_dir).mkdir(parents=True, exist_ok=True)
args.tb_dir = os.path.join(orig_check_dir, f"{args.task}", "tb", run_name)
Path(args.tb_dir).mkdir(parents=True, exist_ok=True)
# Chronos needs a different job name each time
executor.update_parameters(name=f"lra_{args.task}_{i:02d}_{uuid.uuid4().hex}")
args.dist_url = get_init_file().as_uri()
args.temp_file = str(get_init_file())
trainer = Trainer(args)
job = executor.submit(trainer)
jobs.append(job)
print(f"Run {i:02d} submitted with train cfg: {args}")
print(f"Submitted jobs ids: {','.join([str(job.job_id) for job in jobs])}")
if __name__ == "__main__":
args = parse_args()
grid_search(args)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/run_grid_search.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Adapted from https://github.com/mlpen/Nystromformer
import logging
import os
import pickle
import random
import numpy as np
import tensorflow as tf
logging.getLogger().setLevel(logging.INFO)
root_dir = "./datasets/lra_release/lra_release/"
subdir = "pathfinder32"
for diff_level in ["curv_baseline", "curv_contour_length_9", "curv_contour_length_14"]:
data_dir = os.path.join(root_dir, subdir, diff_level)
metadata_list = [
os.path.join(data_dir, "metadata", file)
for file in os.listdir(os.path.join(data_dir, "metadata"))
if file.endswith(".npy")
]
ds_list = []
for idx, metadata_file in enumerate(metadata_list):
logging.info(idx, len(metadata_list), metadata_file, "\t\t")
for inst_meta in (
tf.io.read_file(metadata_file).numpy().decode("utf-8").split("\n")[:-1]
):
metadata = inst_meta.split(" ")
img_path = os.path.join(data_dir, metadata[0], metadata[1])
img_bin = tf.io.read_file(img_path)
if len(img_bin.numpy()) == 0:
logging.warning("detected empty image")
continue
img = tf.image.decode_png(img_bin)
seq = img.numpy().reshape(-1).astype(np.int32)
label = int(metadata[3])
ds_list.append({"input_ids_0": seq, "label": label})
random.shuffle(ds_list)
bp80 = int(len(ds_list) * 0.8)
bp90 = int(len(ds_list) * 0.9)
train = ds_list[:bp80]
dev = ds_list[bp80:bp90]
test = ds_list[bp90:]
with open(f"./datasets/{subdir}-{diff_level}.train.pickle", "wb") as f:
pickle.dump(train, f)
with open(f"./datasets/{subdir}-{diff_level}.dev.pickle", "wb") as f:
pickle.dump(dev, f)
with open(f"./datasets/{subdir}-{diff_level}.test.pickle", "wb") as f:
pickle.dump(test, f)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/setup/pathfinder.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Adapted from https://github.com/mlpen/Nystromformer
import logging
import pickle
import sys
import numpy as np
sys.path.append("./datasets/long-range-arena")
sys.path.append("./datasets/long-range-arena/lra_benchmarks/listops/")
import input_pipeline # type: ignore # noqa
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_datasets( # type: ignore
n_devices=1,
task_name="basic",
data_dir="./datasets/lra_release/lra_release/listops-1000/",
batch_size=1,
max_length=2000,
)
logging.getLogger().setLevel(logging.INFO)
mapping = {"train": train_ds, "dev": eval_ds, "test": test_ds}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append(
{
"input_ids_0": np.concatenate(
[inst["inputs"].numpy()[0], np.zeros(48, dtype=np.int32)]
),
"label": inst["targets"].numpy()[0],
}
)
if idx % 100 == 0:
logging.info(f"{idx}\t\t")
with open(f"./datasets/listops.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/setup/listops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Adapted from https://github.com/mlpen/Nystromformer
import logging
import pickle
import sys
import numpy as np
sys.path.append("./datasets/long-range-arena")
sys.path.append("./datasets/long-range-arena/lra_benchmarks/text_classification/")
import input_pipeline # type: ignore # noqa
logging.getLogger().setLevel(logging.INFO)
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_tc_datasets( # type: ignore
n_devices=1,
task_name="imdb_reviews",
data_dir=None,
batch_size=1,
fixed_vocab=None,
max_length=4000,
)
mapping = {"train": train_ds, "dev": eval_ds, "test": test_ds}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append(
{
"input_ids_0": np.concatenate(
[inst["inputs"].numpy()[0], np.zeros(96, dtype=np.int32)]
),
"label": inst["targets"].numpy()[0],
}
)
if idx % 100 == 0:
logging.info(f"{idx}\t\t")
with open(f"./datasets/text.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/setup/text.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Adapted from https://github.com/mlpen/Nystromformer
import logging
import pickle
import sys
import numpy as np
sys.path.append("./datasets/long-range-arena")
sys.path.append("./datasets/long-range-arena/lra_benchmarks/matching/")
import input_pipeline # type: ignore # noqa
logging.getLogger().setLevel(logging.INFO)
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_matching_datasets( # type: ignore
n_devices=1,
task_name=None,
data_dir="./datasets/lra_release/lra_release/tsv_data/",
batch_size=1,
fixed_vocab=None,
max_length=4096,
tokenizer="char",
vocab_file_path=None,
)
mapping = {"train": train_ds, "dev": eval_ds, "test": test_ds}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append(
{
"input_ids_0": np.concatenate([inst["inputs1"].numpy()[0]]),
"input_ids_1": np.concatenate([inst["inputs2"].numpy()[0]]),
"label": inst["targets"].numpy()[0],
}
)
if idx % 100 == 0:
logging.info(f"{idx}\t\t")
with open(f"./datasets/retrieval.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/setup/retrieval.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Adapted from https://github.com/mlpen/Nystromformer
import logging
import pickle
import sys
sys.path.append("./datasets/long-range-arena")
sys.path.append("./datasets/long-range-arena/lra_benchmarks/image/")
import input_pipeline # type: ignore # noqa
(
train_ds,
eval_ds,
test_ds,
num_classes,
vocab_size,
input_shape,
) = input_pipeline.get_cifar10_datasets( # type: ignore
n_devices=1, batch_size=1, normalize=False
)
logging.getLogger().setLevel(logging.INFO)
mapping = {"train": train_ds, "dev": eval_ds, "test": test_ds}
max_iter = {"train": 45000, "dev": 5000, "test": 10000}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append(
{
"input_ids_0": inst["inputs"].numpy()[0].reshape(-1),
"label": inst["targets"].numpy()[0],
}
)
if idx % 1000 == 0:
logging.info(f"{idx}")
# The dataset from LRA repeats
if idx > max_iter[component]:
break
logging.info(f"{component} dataset processed")
with open(f"./datasets/image.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/setup/cifar10.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/code/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: adapted from the Nystromformer repo
# https://github.com/mlpen/Nystromformer
from enum import Enum
from typing import Dict, Union
import pytorch_lightning as pl
import torch
import torch.nn as nn
from xformers.components import build_attention
from xformers.components.multi_head_dispatch import MultiHeadDispatchConfig
from xformers.factory import xFormer, xFormerConfig, xFormerEncoderConfig
from xformers.utils import generate_matching_config
PLOutput = Dict[str, Union[float, torch.Tensor]]
class Pooling(str, Enum):
MEAN = "mean"
CLS = "cls"
def pooling(mode: Pooling):
def pool_cls(inp):
return inp[:, 0, :]
def pool_mean(inp):
return inp.mean(dim=1)
return {Pooling.MEAN: pool_mean, Pooling.CLS: pool_cls}[mode]
def append_cls(inp, mask, vocab_size):
batch_size = inp.size(0)
cls_id = (
(vocab_size - 1) * torch.ones(batch_size, dtype=torch.long, device=inp.device)
).long()
cls_mask = torch.ones(batch_size, dtype=torch.float, device=mask.device)
inp = torch.cat([cls_id[:, None], inp[:, :-1]], dim=-1)
mask = torch.cat([cls_mask[:, None], mask[:, :-1]], dim=-1)
return inp, mask
def patch_model_config(config, attention_name):
# Rebuild a specific config out of generic + extra params
commons = config["common"]
try:
extra_attention_settings = config["extra_settings"]["attention"][attention_name]
except KeyError:
extra_attention_settings = None
for bc in config["xformer"]:
bc["dim_model"] = commons["dim_model"]
bc["position_encoding_config"].update(commons)
bc["feedforward_config"].update(commons)
bc["multi_head_config"].update(commons)
bc["multi_head_config"]["attention"].update(commons)
bc["multi_head_config"]["attention"]["name"] = attention_name
bc["multi_head_config"]["attention"]["dim_head"] = (
commons["dim_model"] / commons["num_heads"]
)
if extra_attention_settings is not None:
bc["multi_head_config"]["attention"].update(extra_attention_settings)
bc["multi_head_config"] = generate_matching_config(
bc["multi_head_config"], MultiHeadDispatchConfig
)
bc["multi_head_config"].attention = build_attention(
bc["multi_head_config"].attention
)
bc = generate_matching_config(bc, xFormerEncoderConfig)
return config
class SCHead(nn.Module):
def __init__(self, config, dim_embedding, dim_mlp):
super().__init__()
self.pooling = pooling(Pooling(config["pooling_mode"]))
self.mlpblock = nn.Sequential(
nn.Linear(dim_embedding, dim_mlp),
nn.ReLU(),
nn.Linear(dim_mlp, config["common"]["num_classes"]),
)
def forward(self, inp: torch.Tensor):
seq_score = self.mlpblock(self.pooling(inp))
return seq_score
class SCHeadDual(nn.Module):
def __init__(self, config, dim_embedding, dim_mlp):
super().__init__()
self.pooling = pooling(Pooling(config["pooling_mode"]))
self.mlpblock = nn.Sequential(
nn.Linear(
dim_embedding * 4,
dim_mlp,
),
nn.ReLU(),
nn.Linear(dim_mlp, config["common"]["num_classes"]),
)
def forward(self, inp_0: torch.Tensor, inp_1: torch.Tensor):
X_0 = self.pooling(inp_0)
X_1 = self.pooling(inp_1)
seq_score = self.mlpblock(torch.cat([X_0, X_1, X_0 * X_1, X_0 - X_1], dim=-1))
return seq_score
class ModelTrunk(pl.LightningModule):
def __init__(self, config, model_name):
super().__init__()
config_model = config["model"]
self.config_training = config["training"]
self.enable_amp = config["training"]["mixed_precision"]
self.pooling_mode = Pooling(config_model["pooling_mode"])
self.vocab_size = config_model["common"]["vocab_size"]
# Rebuild a specific config out of generic + extra params
self.config_model = patch_model_config(config_model, model_name)
self.model = xFormer.from_config(xFormerConfig(config_model["xformer"]))
self.norm = nn.LayerNorm(self.config_model["common"]["dim_model"])
ff_config = self.config_model["xformer"][0]["feedforward_config"]
self.dim_mlp = (
self.config_model["common"]["dim_model"]
* ff_config["hidden_layer_multiplier"]
)
def training_step( # type: ignore
self, batch: Dict[str, torch.Tensor], batch_idx: int
) -> PLOutput:
outputs = self(**batch)
self.logger.log_metrics({f"train_{k}": v for k, v in outputs.items()}) # type: ignore
self.log("train_accu", outputs["accu"], sync_dist=True)
return outputs
def training_epoch_end(self, outputs):
logs = self.eval_epoch_end(outputs)
self.log("train_accu_mean", logs["accu"], sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config_training["learning_rate"],
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=self.config_training["weight_decay"],
)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer=optimizer,
max_lr=self.config_training["learning_rate"],
pct_start=self.config_training["warmup"]
/ self.config_training["num_train_steps"],
anneal_strategy=self.config_training["lr_decay"],
total_steps=self.config_training["num_train_steps"],
)
return [optimizer], [lr_scheduler]
def eval_step(self, batch: Dict[str, torch.Tensor], batch_idx: int) -> PLOutput:
outputs = self(**batch)
return outputs
def eval_epoch_end(self, outputs, prefix: str = "train"):
logs = {}
counts = torch.tensor([x["count"] for x in outputs]).float()
logs["count"] = counts.sum()
for k in ("accu", "loss"):
logs[k] = (torch.tensor([x[k] for x in outputs]) * counts).sum() / logs[
"count"
]
self.log(f"{prefix}_{k}_mean", logs[k], sync_dist=True)
return logs
def validation_step( # type: ignore
self, batch: Dict[str, torch.Tensor], batch_idx: int
) -> PLOutput:
outputs = self.eval_step(batch, batch_idx)
self.logger.log_metrics({f"val_{k}": v for k, v in outputs.items()}) # type: ignore
self.log("val_accu", outputs["accu"], sync_dist=True, prog_bar=True)
return outputs
def validation_epoch_end(self, outputs):
self.eval_epoch_end(outputs, prefix="val")
def test_step( # type: ignore
self, batch: Dict[str, torch.Tensor], batch_idx: int
) -> PLOutput:
return self.eval_step(batch, batch_idx)
def test_epoch_end(self, outputs):
self.eval_epoch_end(outputs, prefix="test")
class ModelForSC(ModelTrunk):
def __init__(self, config, model_name):
# Setup trunk
super().__init__(config, model_name)
self.seq_classifer = SCHead(
self.config_model,
dim_embedding=self.config_model["common"]["dim_model"],
dim_mlp=self.dim_mlp,
)
def forward( # type: ignore
self, input_ids_0: torch.Tensor, mask_0: torch.Tensor, label: torch.Tensor
):
if self.pooling_mode == Pooling.CLS:
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
token_out = self.norm(
self.model(input_ids_0, encoder_input_mask=mask_0)
) * mask_0.unsqueeze(-1)
seq_scores = self.seq_classifer(token_out)
seq_loss = torch.nn.CrossEntropyLoss(reduction="none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim=-1) == label).to(torch.float32)
outputs = {
"loss": seq_loss.mean(),
"accu": seq_accu.mean(),
"count": label.size(0),
}
return outputs
class ModelForSCDual(ModelTrunk):
def __init__(self, config, model_name):
# Setup trunk
super().__init__(config, model_name)
self.seq_classifer = SCHeadDual(
self.config_model,
dim_embedding=self.config_model["common"]["dim_model"],
dim_mlp=self.dim_mlp,
)
def forward( # type: ignore
self,
input_ids_0: torch.Tensor,
input_ids_1: torch.Tensor,
mask_0: torch.Tensor,
mask_1: torch.Tensor,
label: torch.Tensor,
):
mask_0, mask_1 = mask_0.long(), mask_1.long()
if self.pooling_mode == Pooling.CLS:
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
input_ids_1, mask_1 = append_cls(input_ids_1, mask_1, self.vocab_size)
# Concatenate the two inputs into one batch
input_ids = torch.cat([input_ids_0, input_ids_1], dim=0)
masks = torch.cat([mask_0, mask_1], dim=0)
tokens_out = self.norm(
self.model(input_ids, encoder_input_mask=masks)
) * masks.unsqueeze(-1)
seq_scores = self.seq_classifer(*torch.chunk(tokens_out, 2, dim=0))
seq_loss = torch.nn.CrossEntropyLoss(reduction="none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim=-1) == label).to(torch.float32)
outputs = {
"loss": seq_loss.mean(),
"accu": seq_accu.mean(),
"count": label.size(0),
}
return outputs
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/code/model_wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Almost as-is from the Nystromformer repo
# https://github.com/mlpen/Nystromformer
import logging
import pickle
import torch
from torch.utils.data.dataset import Dataset
logging.getLogger().setLevel(logging.INFO)
class LRADataset(Dataset):
def __init__(self, file_path, seq_len):
with open(file_path, "rb") as f:
self.examples = pickle.load(f)
self.seq_len = seq_len
logging.info(f"Loaded {file_path}... size={len(self.examples)}")
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.create_inst(self.examples[i], self.seq_len)
@staticmethod
def create_inst(inst, seq_len):
output = {
"input_ids_0": torch.tensor(inst["input_ids_0"], dtype=torch.long)[:seq_len]
}
output["mask_0"] = (output["input_ids_0"] != 0).float()
if "input_ids_1" in inst:
output["input_ids_1"] = torch.tensor(inst["input_ids_1"], dtype=torch.long)[
:seq_len
]
output["mask_1"] = (output["input_ids_1"] != 0).float()
output["label"] = torch.tensor(inst["label"], dtype=torch.long)
return output
| EXA-1-master | exa/libraries/xformers/xformers/benchmarks/LRA/code/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Dict, Optional, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from .common import BaseOperator, get_xformers_operator, register_operator
from .unbind import stack_or_none, unbind
@register_operator
class DualGemmSiluOp(BaseOperator):
OPERATOR = get_xformers_operator("dual_gemm_silu_identity_mul")
OPERATOR_CATEGORY = "swiglu"
NAME = "dual_gemm_silu"
@classmethod
# type: ignore
def operator_flop(
cls, x: torch.Tensor, w1: torch.Tensor, b1, w2: torch.Tensor, b2
) -> int:
"""NOTE: we neglect the impact of biases / pointwises"""
M, N, K = x.shape[0], w1.shape[0], w1.shape[1]
return M * N * K * 2 * 2
@register_operator
class GemmFusedSumOp(BaseOperator):
OPERATOR = get_xformers_operator("gemm_fused_operand_sum")
OPERATOR_CATEGORY = "swiglu"
NAME = "gemm_fused_operand_sum"
@classmethod
# type: ignore
def operator_flop(cls, a: torch.Tensor, b: torch.Tensor, out1, out2) -> int:
M, N, K = a.shape[0], b.shape[1], a.shape[1]
return M * N * K * 2
class _SwiGLUDecomposedFunc(torch.autograd.Function):
"""
This is just an example implementation with all
operations explicited. This implementation is worse
than pytorch, because pytorch is able to fuse some operations
(eg the linear forward ...) that are decomposed here.
The time measurements were made on the ViT-Giant setting:
- A100/f16
- input: [4440, 1536]
- hidden: [4440, 4096]
"""
NAME = "decomposed"
FORCE_BW_F32 = False
def _silu_backward(dy, x):
# https://github.com/pytorch/pytorch/blob/563b065f5a4b4055fa6b025c2514b566d5fd9439/aten/src/ATen/native/Activation.cpp#L483
sigm = 1 / (1 + torch.exp(-x.float()))
return (dy.float() * sigm * (1 + x.float() * (1 - sigm))).to(x.dtype)
# 952us
@classmethod
def forward(cls, ctx, x, w1, b1, w2, b2, w3, b3):
x1 = x @ w1.transpose(-2, -1) + b1 # 275us
x2 = x @ w2.transpose(-2, -1) + b2 # 275us
x3 = F.silu(x1) # 62us
x4 = x3 * x2 # 90us
x5 = x4 @ w3.transpose(-2, -1) + b3 # 250us
ctx.save_for_backward(x, w1, b1, w2, b2, w3, b3, x1, x2, x3, x4, x5)
return x5
# 1900us
@classmethod
def backward(cls, ctx, dx5):
saved_tensors = ctx.saved_tensors
if cls.FORCE_BW_F32:
dx5 = dx5.float()
saved_tensors = [t.float() for t in ctx.saved_tensors]
x, w1, b1, w2, b2, w3, b3, x1, x2, x3, x4, x5 = saved_tensors
dx4 = dx5 @ w3 # 255us (nn)
dw3 = dx5.transpose(-2, -1) @ x4 # 247us (nt)
db3 = dx5.sum(0) # 25us
dx3 = dx4 * x2 # 88us
dx2 = dx4 * x3 # 88us
dx1 = cls._silu_backward(dx3, x1) # 90us
dx = dx2 @ w2 # 260us (nn)
dw2 = dx2.transpose(-2, -1) @ x # 245us (nt)
db2 = dx2.sum(0) # 50us
dx += dx1 @ w1 # 260us (nn)
dw1 = dx1.transpose(-2, -1) @ x # 245us (nt)
db1 = dx1.sum(0) # 50us
return (dx, dw1, db1, dw2, db2, dw3, db3)
class _SwiGLUFusedFunc(torch.autograd.Function):
NAME = "fused.py"
@classmethod
@torch.cuda.amp.custom_fwd
def forward(cls, ctx, x, w1, b1, w2, b2, w3, b3):
x1, x2, x4 = DualGemmSiluOp.OPERATOR(x, w1, b1, w2, b2)
x5 = F.linear(x4, w3, b3)
ctx.save_for_backward(x, w1, w2, w3, x1, x2)
ctx.bias = [b1 is not None, b2 is not None, b3 is not None]
return x5
@staticmethod
def _linear_bw(
dy: torch.Tensor, x: torch.Tensor, bias: bool
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
if not bias:
return (dy.transpose(-2, -1) @ x), None
db = torch.empty([dy.shape[1]], dtype=dy.dtype, device=dy.device)
dw = torch.empty([dy.shape[1], x.shape[1]], dtype=dy.dtype, device=dy.device)
GemmFusedSumOp.OPERATOR(dy.transpose(-2, -1), x, dw, db)
return dw, db
@classmethod
@torch.cuda.amp.custom_bwd
def backward(cls, ctx, dx5):
x, w1, w2, w3, x1, x2 = ctx.saved_tensors
w1w2 = stack_or_none([w1, w2], dim=0)
dx4 = dx5 @ w3 # 255us (nn)
dx1dx2, x4 = torch.ops.xformers.silu_bw_fused(x1, x2, dx4)
dx1, dx2 = dx1dx2.unbind(1)
del x1, x2, dx4
dw3, db3 = cls._linear_bw(dx5, x4, bias=ctx.bias[2])
del x4, dx5
if w1w2 is not None:
assert dx1dx2.is_contiguous()
assert w1w2.is_contiguous()
w1w2 = w1w2.view([w1.shape[0] * 2, w1.shape[1]])
dx = dx1dx2.view([dx1.shape[0], 2 * dx1.shape[1]]) @ w1w2
# backward of linear1 + linear2 - packed
dw1dw2 = dx1dx2.view([dx1.shape[0], 2 * dx1.shape[1]]).transpose(-2, -1) @ x
dw1dw2, db1db2 = cls._linear_bw(
dx1dx2.view([dx1.shape[0], 2 * dx1.shape[1]]), x, bias=ctx.bias[0]
)
dw1, dw2 = dw1dw2.view([2, *w1.shape]).unbind(0)
if ctx.bias[0]:
db1db2 = db1db2.view([2, dx1.shape[1]])
db1, db2 = torch.unbind(db1db2, dim=0)
else:
db1 = db2 = None
else:
dx = dx2 @ w2 # 260us (nn)
torch.addmm(
dx, dx1, w1.to(dx1.dtype), beta=1, alpha=1, out=dx
) # dx += dx1 @ w1
dw2, db2 = cls._linear_bw(dx2, x, bias=ctx.bias[1])
dw1, db1 = cls._linear_bw(dx1, x, bias=ctx.bias[0])
return (dx, dw1, db1, dw2, db2, dw3, db3)
class SwiGLUOp:
"""Base class for any swiglu operator in :attr:`xformers.ops.swiglu`"""
def __init__(self, op, packed_weights: bool, name: str, constraints):
self.NAME = name
self.PACKED_WEIGHTS = packed_weights
self.op = op
self.constraints = constraints
def supports(self, op: "SwiGLUOpDispatch") -> bool:
if self.PACKED_WEIGHTS and not op.packed_weights:
return False
return all(c(op) for c in self.constraints)
def __call__(self, *args: Optional[torch.Tensor]) -> torch.Tensor:
pass
def __str__(self) -> str:
return f"SwiGLUOp:{self.NAME}"
class _ForwardToPythonAutogradFunc(SwiGLUOp):
def supports(self, op: "SwiGLUOpDispatch") -> bool:
# Let's disable autocast in bf16 until this issue is fixed
# https://github.com/pytorch/pytorch/issues/87979
if op.dtype_autocast_gpu == torch.bfloat16:
return False
return super().supports(op)
def __call__(self, *args, **kwargs):
return self.op.apply(*args, **kwargs)
class _ForwardToFunc(SwiGLUOp):
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
def info(self):
if self.op.__name__ == "no_such_operator":
return "not built"
return "available"
def _eager_functional_swiglu(
x: torch.Tensor,
w1: torch.Tensor,
b1: torch.Tensor,
w2: torch.Tensor,
b2: torch.Tensor,
w3: torch.Tensor,
b3: torch.Tensor,
) -> torch.Tensor:
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
hidden = F.silu(x1) * x2
return F.linear(hidden, w3, b3)
@dataclass
class SwiGLUOpDispatch:
"""Dispatcher to automatically select
the best operator in :attr:`xformers.ops.swiglu`
"""
device: Union[torch.device, str]
dtype: torch.dtype
dtype_autocast_gpu: Optional[torch.dtype]
packed_weights: bool
bias_enabled: bool
@property
def op(self) -> SwiGLUOp:
"""Computes the best operator
Returns:
SwiGLUOp: The best operator for the configuration
"""
priorities: Sequence[SwiGLUOp] = [
SwiGLUPackedFusedOp,
SwiGLUFusedOp,
]
for op in priorities:
if op.supports(self):
return op
return SwiGLUEagerOp
@staticmethod
def from_arguments(
x: torch.Tensor,
w1: torch.Tensor,
b1: Optional[torch.Tensor],
w2: torch.Tensor,
b2: Optional[torch.Tensor],
w3: torch.Tensor,
b3: Optional[torch.Tensor],
) -> "SwiGLUOpDispatch":
return SwiGLUOpDispatch(
device=x.device,
dtype=x.dtype,
packed_weights=stack_or_none((w1, w2), dim=0) is not None,
dtype_autocast_gpu=torch.get_autocast_gpu_dtype()
if torch.is_autocast_enabled()
else w1.dtype,
bias_enabled=b1 is not None and b2 is not None and b3 is not None,
)
def _only_sm80(op: SwiGLUOpDispatch) -> bool:
device_type = op.device if isinstance(op.device, str) else op.device.type
return device_type == "cuda" and torch.cuda.get_device_capability(op.device)[0] >= 8
def _only_half_or_autocast(op: SwiGLUOpDispatch) -> bool:
HALF_DTYPES = [torch.half, torch.bfloat16]
return op.dtype in HALF_DTYPES or (
op.dtype_autocast_gpu is not None and op.dtype_autocast_gpu in HALF_DTYPES
)
def _bias_enabled(op: SwiGLUOpDispatch) -> bool:
return op.bias_enabled
_SwiGLUDecomposedOp = _ForwardToPythonAutogradFunc(
_SwiGLUDecomposedFunc, False, "decomposed", constraints=[_bias_enabled]
)
SwiGLUFusedOp = _ForwardToPythonAutogradFunc(
_SwiGLUFusedFunc, False, "fused", constraints=[_only_sm80, _only_half_or_autocast]
)
SwiGLUPackedFusedOp = _ForwardToFunc(
get_xformers_operator("swiglu_packedw"),
True,
"fused.p.cpp",
constraints=[_only_sm80, _only_half_or_autocast],
)
SwiGLUEagerOp = _ForwardToFunc(
_eager_functional_swiglu,
False,
"eager",
constraints=[],
)
def _info() -> Dict[str, str]:
return {op.NAME: op.info() for op in [SwiGLUPackedFusedOp]}
def swiglu(
x: torch.Tensor,
w1: torch.Tensor,
b1: Optional[torch.Tensor],
w2: torch.Tensor,
b2: Optional[torch.Tensor],
w3: torch.Tensor,
b3: Optional[torch.Tensor],
*,
op: SwiGLUOp = None,
) -> torch.Tensor:
"""
Computes a SwiGLU block given the weights/bias of the 3
linear layers.
- It is recommended to keep ``op=None`` so the best implementation \
available for the inputs will be used.
:Equivalent pytorch code:
.. code-block:: python
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
hidden = F.silu(x1) * x2
return F.linear(hidden, w3, b3)
:Packing weights:
To allow faster implementations, it's recommended to have w1/w2 come from the same storage, as in:
.. code-block:: python
w1, w2 = xformers.ops.unbind(w12, 0)
:Supported hardware:
This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` \
(autocast is supported), and will fallback to a functional pytorch \
implementation otherwise.
"""
batch_shape = x.shape[:-1]
x = x.reshape([-1, x.shape[-1]])
if w1.ndim != 2 or w1.shape != w2.shape:
raise ValueError(f"Invalid shapes for w1: {w1.shape} / w2: {w2.shape}")
if b1 is not None:
if b1.ndim != 1 or b1.shape[0] != w1.shape[0]:
raise ValueError(f"Invalid shapes for b1: {b1.shape}")
if b2 is not None:
if b2.ndim != 1 or b2.shape[0] != w2.shape[0]:
raise ValueError(f"Invalid shapes for b2: {b2.shape}")
if w3.ndim != 2 or w3.shape[1] != w2.shape[0]:
raise ValueError(f"Invalid shape for w3: {w3.shape}")
if b3 is not None:
if b3.ndim != 1 or b3.shape[0] != w3.shape[0]:
raise ValueError(f"Invalid shapes for w3: {w3.shape} / b3: {b3.shape}")
if op is None:
op = SwiGLUOpDispatch.from_arguments(x, w1, b1, w2, b2, w3, b3).op
if not op.PACKED_WEIGHTS:
return op(x, w1, b1, w2, b2, w3, b3).reshape([*batch_shape, -1])
w1w2 = stack_or_none((w1, w2), dim=0)
if b1 is not None and b2 is not None:
b1b2: Optional[torch.Tensor] = stack_or_none((b1, b2), dim=0)
if b1b2 is None:
raise NotImplementedError("b1/b2 needs to be properly packed")
else:
b1b2 = None
assert b1 is None and b2 is None
if w1w2 is None:
raise NotImplementedError("w1/w2 needs to be properly packed")
return op(x, w1w2, b1b2, w3, b3).reshape([*batch_shape, -1])
class SwiGLU(nn.Module):
"""
A Module that encapsulates the call to :attr:`xformers.ops.swiglu`,
and holds the weights for the 3 linear layers
"""
def __init__(
self,
in_features: int,
hidden_features: int,
out_features: Optional[int] = None,
bias: bool = True,
*,
_pack_weights: bool = True,
) -> None:
"""Create a SwiGLU module
Args:
in_features (int): Number of features of the input
hidden_features (int): Number of hidden features
out_features (Optional[int], optional): Number of features of the input. Defaults to None.
bias (bool, optional): Whether linear layers also include a bias. Defaults to True.
"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.w12: Optional[nn.Linear]
if _pack_weights:
self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
else:
self.w12 = None
self.w1 = nn.Linear(in_features, hidden_features, bias=bias)
self.w2 = nn.Linear(in_features, hidden_features, bias=bias)
self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
self.hidden_features = hidden_features
self.out_features = out_features
self.in_features = in_features
self.op = None
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Computes :attr:`swiglu` with the module's weights
Args:
x (torch.Tensor): A Tensor of shape ``[..., in_features]``
Returns:
torch.Tensor: A Tensor of shape ``[..., out_features]``
"""
return swiglu(x, *self._ordered_params(), op=self.op)
def _ordered_params(self):
"""Used for testing - returns ordered arguments for operators"""
b1: Optional[torch.Tensor]
b2: Optional[torch.Tensor]
if self.w12 is not None:
w1w2 = self.w12.weight
b1b2 = self.w12.bias
w1, w2 = unbind(
w1w2.view([2, w1w2.shape[0] // 2, w1w2.shape[1]]),
dim=0,
)
if b1b2 is not None:
b1, b2 = unbind(b1b2.view([2, b1b2.shape[0] // 2]), dim=0)
else:
b1, b2 = None, None
else:
w1, w2 = self.w1.weight, self.w2.weight
b1, b2 = self.w1.bias, self.w2.bias
return [
w1,
b1,
w2,
b2,
self.w3.weight,
self.w3.bias,
]
| EXA-1-master | exa/libraries/xformers/xformers/ops/swiglu_op.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Sequence, Tuple, Union
import torch
def get_stack_strides(
tensors: Sequence[torch.Tensor], dim: int
) -> Optional[Tuple[int, ...]]:
"""
If the tensors are already stacked on dimension :code:`dim`, \
returns the strides of the stacked tensors. \
Otherwise returns :code:`None`.
"""
if len(tensors) <= 1 or dim > tensors[0].ndim:
return None
final_stride = []
for i in range(tensors[0].ndim + 1):
if i == dim:
final_stride.append(
tensors[1].storage_offset() - tensors[0].storage_offset()
)
continue
if i > dim:
i -= 1
final_stride.append(tensors[0].stride(i))
storage_data_ptr: Optional[int] = None
for i, x in enumerate(tensors[1:]):
# Sanity checks
if x.shape != tensors[0].shape:
return None
if x.stride() != tensors[0].stride():
return None
if (
x.storage_offset()
!= tensors[0].storage_offset() + (i + 1) * final_stride[dim]
):
return None
if storage_data_ptr is None:
storage_data_ptr = tensors[0].storage().data_ptr()
# Actual storage check
if x.storage().data_ptr() != storage_data_ptr:
return None
return tuple(final_stride)
def _stack_or_none_fw(
tensors: Union[Tuple[torch.Tensor, ...], List[torch.Tensor]],
dim: int,
) -> Optional[torch.Tensor]:
strides = get_stack_strides(tensors, dim)
if strides is not None:
input_shape = list(tensors[0].shape)
input_shape.insert(dim, len(tensors))
return tensors[0].as_strided(input_shape, strides)
return None
def _stack_fw(
tensors: Union[Tuple[torch.Tensor, ...], List[torch.Tensor]],
dim: int,
) -> torch.Tensor:
out = _stack_or_none_fw(tensors, dim)
if out is None:
out = torch.stack(tensors, dim=dim)
return out
class _Unbind(torch.autograd.Function):
"""
See function `unbind`
"""
@staticmethod
# type: ignore
def forward(ctx, x: torch.Tensor, dim: int):
ctx.dim = dim
return x.unbind(dim)
@classmethod
# type: ignore
def backward(cls, ctx, *tensors: torch.Tensor):
return _stack_fw(tensors, ctx.dim), None
class _StackOrNone(torch.autograd.Function):
"""
See function `stack_or_none`
"""
@staticmethod
# type: ignore
def forward(ctx, dim: int, *tensors: torch.Tensor):
ctx.dim = dim
return _stack_or_none_fw(tensors, dim=dim)
@classmethod
# type: ignore
def backward(cls, ctx, grad: torch.Tensor):
return (None, *grad.unbind(dim=ctx.dim))
def unbind(x: torch.Tensor, dim: int) -> Tuple[torch.Tensor, ...]:
"""
Does exactly the same as :attr:`torch.unbind` for the forward.
In backward, avoids a :attr:`torch.cat` if the gradients
are already multiple views of the same storage
"""
return _Unbind.apply(x, dim)
def stack_or_none(tensors: Sequence[torch.Tensor], dim: int) -> torch.Tensor:
"""
Does exactly the same as :attr:`torch.stack` if the tensors can be concatenated
without any memory operation. Otherwise returns None.
"""
return _StackOrNone.apply(dim, *tensors)
| EXA-1-master | exa/libraries/xformers/xformers/ops/unbind.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .fmha import (
AttentionBias,
AttentionOp,
AttentionOpBase,
AttentionOpDispatch,
LowerTriangularMask,
MemoryEfficientAttentionCutlassFwdFlashBwOp,
MemoryEfficientAttentionCutlassOp,
MemoryEfficientAttentionFlashAttentionOp,
MemoryEfficientAttentionOp,
MemoryEfficientAttentionTritonFwdFlashBwOp,
TritonFlashAttentionOp,
memory_efficient_attention,
memory_efficient_attention_backward,
memory_efficient_attention_forward,
memory_efficient_attention_forward_requires_grad,
)
from .indexing import index_select_cat, scaled_index_add
from .swiglu_op import (
SwiGLU,
SwiGLUEagerOp,
SwiGLUFusedOp,
SwiGLUOp,
SwiGLUOpDispatch,
SwiGLUPackedFusedOp,
swiglu,
)
from .unbind import get_stack_strides, stack_or_none, unbind
# BW compatibility
AttentionMask = AttentionBias
def masked_matmul(a, b, mask=None):
if torch.overrides.has_torch_function((a, b, mask)):
return torch.overrides.handle_torch_function(
masked_matmul, (a, b, mask), a, b, mask
)
att = a @ b
if mask is None:
return att
if mask.dtype == torch.bool:
if mask.ndim == 2:
mask = mask.unsqueeze(0).expand(att.shape[0], -1, -1)
# mask is presumed false == ignore
att[~mask] = float("-inf")
else:
# mask is presumed additive
att += mask
return att
__all__ = [
"memory_efficient_attention",
"AttentionBias",
"AttentionMask",
"AttentionOp",
"AttentionOpBase",
"AttentionOpDispatch",
"LowerTriangularMask",
"MemoryEfficientAttentionCutlassFwdFlashBwOp",
"MemoryEfficientAttentionCutlassOp",
"MemoryEfficientAttentionFlashAttentionOp",
"MemoryEfficientAttentionOp",
"MemoryEfficientAttentionTritonFwdFlashBwOp",
"memory_efficient_attention_backward",
"memory_efficient_attention_forward",
"memory_efficient_attention_forward_requires_grad",
"SwiGLU",
"SwiGLUEagerOp",
"SwiGLUFusedOp",
"SwiGLUOp",
"SwiGLUOpDispatch",
"SwiGLUPackedFusedOp",
"swiglu",
"TritonFlashAttentionOp",
"unbind",
"stack_or_none",
"get_stack_strides",
"masked_matmul",
"scaled_index_add",
"index_select_cat",
]
| EXA-1-master | exa/libraries/xformers/xformers/ops/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Type, TypeVar
import torch
def get_operator(library: str, name: str):
def no_such_operator(*args, **kwargs):
raise RuntimeError(
f"No such operator {library}::{name} - did you forget to build xformers with `python setup.py develop`?"
)
try:
return getattr(getattr(torch.ops, library), name)
except (RuntimeError, AttributeError):
return no_such_operator
def get_xformers_operator(name: str):
return get_operator("xformers", name)
class BaseOperator:
OPERATOR: Any
NAME: str
OPERATOR_CATEGORY: str
@classmethod
def is_available(cls) -> bool:
if cls.OPERATOR is None or cls.OPERATOR.__name__ == "no_such_operator":
return False
return True
@classmethod
def operator_flop(cls, *inputs) -> int:
"""Calculate number of FLOP given inputs to `OPERATOR`"""
return -1
OPERATORS_REGISTRY: List[Type[BaseOperator]] = []
FUNC_TO_XFORMERS_OPERATOR: Dict[Any, Type[BaseOperator]] = {}
ClsT = TypeVar("ClsT")
def register_operator(cls: ClsT) -> ClsT:
global OPERATORS_REGISTRY, FUNC_TO_XFORMERS_OPERATOR
OPERATORS_REGISTRY.append(cls) # type: ignore
FUNC_TO_XFORMERS_OPERATOR[cls.OPERATOR] = cls # type: ignore
return cls
| EXA-1-master | exa/libraries/xformers/xformers/ops/common.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Sequence
import torch
from .common import BaseOperator, get_xformers_operator, register_operator
@register_operator
class ScaledIndexAddFw(BaseOperator):
OPERATOR = get_xformers_operator("scaled_index_addF")
OPERATOR_CATEGORY = "indexing"
NAME = "scaled_index_addF"
@register_operator
class ScaledIndexAddBw(BaseOperator):
OPERATOR = get_xformers_operator("scaled_index_addB")
OPERATOR_CATEGORY = "indexing"
NAME = "scaled_index_addB"
@register_operator
class IndexSelect(BaseOperator):
OPERATOR = get_xformers_operator("index_select")
OPERATOR_CATEGORY = "indexing"
NAME = "index_select"
class _ScaledIndexAdd(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(
ctx,
input: torch.Tensor,
index: torch.Tensor,
source: torch.Tensor,
scaling: Optional[torch.Tensor],
alpha: float,
) -> torch.Tensor:
ScaledIndexAddFw.OPERATOR(
output=input, # in-place
input=input,
source=source,
index=index,
source_scaling=scaling,
alpha=alpha,
)
ctx.mark_dirty(input)
ctx.save_for_backward(index, scaling, source)
ctx.source_shape = source.shape
ctx.alpha = alpha
return input
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_output):
index, scaling, source = ctx.saved_tensors
grad_source = torch.empty_like(grad_output[: index.shape[0]])
grad_source_scaling = (
torch.empty(
ctx.source_shape,
dtype=scaling.dtype,
device=scaling.device,
)
if scaling is not None
else None
)
ScaledIndexAddBw.OPERATOR(
grad_source=grad_source,
grad_source_scaling=grad_source_scaling,
grad_output=grad_output,
source=source,
index=index,
source_scaling=scaling,
alpha=ctx.alpha,
)
if grad_source_scaling is not None:
grad_source_scaling = grad_source_scaling.sum((0, 1))
return (
grad_output, # input
None, # index
grad_source, # source
grad_source_scaling, # scaling
None, # alpha
)
def scaled_index_add(
input: torch.Tensor, # [B, M, D]
index: torch.Tensor, # [Bi] - int64
source: torch.Tensor, # [Bi, M, D]
scaling: Optional[torch.Tensor] = None, # [D]
alpha: float = 1.0,
) -> torch.Tensor:
"""
In-place scaling+index_add
Indices in ``index`` are assumed to be unique
:Note:
The FW pass is done in-place (``input`` is modified)
:Note:
This is experimental and has only been optimized for a few shapes
:Equivalent pytorch code:
.. code-block:: python
return torch.index_add(inp, dim=0, source=scaling * src, index=indices, alpha=alpha)
"""
return _ScaledIndexAdd.apply(
input,
index,
source,
scaling,
alpha,
)
class _IndexSelectCat(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(
ctx,
*args: torch.Tensor,
) -> torch.Tensor:
assert len(args) % 2 == 0
sources = args[: len(args) // 2]
indices = args[len(args) // 2 :]
output_shape = 0
total_source_elements = 0
for source, index in zip(sources, indices):
output_shape += index.shape[0] * source.shape[1]
total_source_elements += source.shape[0] * source.shape[1]
output = torch.empty(
[output_shape], dtype=sources[0].dtype, device=sources[0].device
)
output_i = 0
for source, index in zip(sources, indices):
elements_here = index.shape[0] * source.shape[1]
IndexSelect.OPERATOR(
output=output[output_i : output_i + elements_here].view(
[index.shape[0], source.shape[1]]
),
source=source,
index=index,
)
output_i += elements_here
ctx.save_for_backward(*indices)
ctx.total_source_elements = total_source_elements
ctx.source_shapes = [s.shape for s in sources]
return output
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_output):
indices = ctx.saved_tensors
grad_sources = torch.zeros(
[ctx.total_source_elements],
dtype=grad_output.dtype,
device=grad_output.device,
)
grad_sources_i = 0
grad_output_i = 0
gradients = []
for source_shape, index in zip(ctx.source_shapes, indices):
grad_output_slice = grad_output[
grad_output_i : grad_output_i + index.shape[0] * source_shape[1]
].reshape([index.shape[0], source_shape[1]])
grad_output_i += index.shape[0] * source_shape[1]
gradient_source = grad_sources[
grad_sources_i : grad_sources_i + source_shape[0] * source_shape[1]
].reshape(source_shape)
grad_sources_i += source_shape[0] * source_shape[1]
ScaledIndexAddFw.OPERATOR(
output=gradient_source.unsqueeze(1),
input=None,
source=grad_output_slice.unsqueeze(1),
index=index,
source_scaling=None,
alpha=1.0,
)
gradients.append(gradient_source)
return (*gradients, *([None] * len(gradients)))
def index_select_cat(
sources: Sequence[torch.Tensor], indices: Sequence[torch.Tensor]
) -> torch.Tensor:
"""
Indices in ``index`` are assumed to be unique
:Note:
This is experimental and has only been optimized for a few shapes
:Example:
Given:
- ``sources[0]`` of shape ``[S0, D0]``
- ``indices[0]`` of shape ``[I0]``
- ``sources[1]`` of shape ``[S1, D1]``
- ``indices[1]`` of shape ``[I1]``
returns a ``torch.Tensor`` of shape ``[I0 * D0 + I1 * D1]``
:Equivalent pytorch code:
.. code-block:: python
return torch.cat([s[i.long()].flatten() for s, i in zip(sources, indices)], dim=0)
"""
return _IndexSelectCat.apply(*sources, *indices)
| EXA-1-master | exa/libraries/xformers/xformers/ops/indexing.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import replace
from typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple
import torch
from ... import _is_triton_available
from ..common import register_operator
if TYPE_CHECKING or _is_triton_available():
from ..._flash_attn.flash_attn_triton import (
_flash_attn_backward,
_flash_attn_forward,
)
triton_flash_backward = _flash_attn_backward
triton_flash_forward = _flash_attn_forward
else:
triton_flash_backward = None
triton_flash_forward = None
from .attn_bias import LowerTriangularMask
from .common import (
AttentionBwOpBase,
AttentionFwOpBase,
Context,
Gradients,
Inputs,
check_lastdim_alignment_stride1,
)
def _prepare_inputs(inp: Inputs) -> Inputs:
attn_bias = inp.attn_bias
if isinstance(attn_bias, torch.Tensor) and attn_bias.ndim == 3:
B = inp.query.shape[0]
h = attn_bias.shape[0] // B
attn_bias = attn_bias.reshape(B, h, attn_bias.shape[1], attn_bias.shape[2])
# Make sure that the last dimension is contiguous
query, key, value = [
x if x.stride(-1) == 1 else x.contiguous()
for x in [inp.query, inp.key, inp.value]
]
return replace(inp, attn_bias=attn_bias, query=query, key=key, value=value)
@register_operator
class FwOp(AttentionFwOpBase):
"""Operator that computes memory-efficient attention using \
`Tri Dao's <https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attn_triton.py>`_ \
implementation, based on
`Phil Tillet's code <https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py>`_
"""
OPERATOR = triton_flash_forward
SUPPORTED_DEVICES = {"cuda"}
CUDA_MINIMUM_COMPUTE_CAPABILITY = (8, 0)
SUPPORTED_DTYPES = {torch.half, torch.bfloat16}
SUPPORTED_MAX_K = 128
SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {
type(None),
LowerTriangularMask,
# TODO: backwards accuracy is failing for a few cases, perhaps we want to disable this for now.
# torch.Tensor,
}
SUPPORTS_DROPOUT = False
SUPPORTS_CUSTOM_SCALE = True
NAME = "tritonflashattF"
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(FwOp, cls).not_supported_reasons(d)
check_lastdim_alignment_stride1(reasons, "query", d.query, 8)
check_lastdim_alignment_stride1(reasons, "key", d.key, 8)
check_lastdim_alignment_stride1(reasons, "value", d.value, 8)
if cls.OPERATOR is None:
reasons.append("triton is not available")
if d.device.type == "cuda":
# Has only been tested on 8.0.
# Fails on 7.5 with illegal memory access
if torch.cuda.get_device_capability(d.device) != (8, 0):
reasons.append("requires A100 GPU")
if _is_triton_available():
import triton
if triton.__version__ > "2.0.0":
reasons.append("Only work on pre-MLIR triton for now")
return reasons
@classmethod
def apply(
cls, inp: Inputs, needs_gradient: bool
) -> Tuple[torch.Tensor, Optional[Context]]:
inp = _prepare_inputs(inp)
out, lse, softmax_scale = triton_flash_forward(
q=inp.query,
k=inp.key,
v=inp.value,
bias=inp.attn_bias if isinstance(inp.attn_bias, torch.Tensor) else None,
softmax_scale=inp.scale_float,
causal=isinstance(inp.attn_bias, LowerTriangularMask),
)
return out, Context(lse=lse, out=out)
@register_operator
class BwOp(AttentionBwOpBase):
__doc__ = FwOp.__doc__
OPERATOR = triton_flash_backward
SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES
CUDA_MINIMUM_COMPUTE_CAPABILITY = FwOp.CUDA_MINIMUM_COMPUTE_CAPABILITY
SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES
SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K
SUPPORTED_ATTN_BIAS_TYPES = FwOp.SUPPORTED_ATTN_BIAS_TYPES
SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT
SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE
SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED
NAME = "tritonflashattB"
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(BwOp, cls).not_supported_reasons(d)
check_lastdim_alignment_stride1(reasons, "query", d.query, 8)
check_lastdim_alignment_stride1(reasons, "key", d.key, 8)
check_lastdim_alignment_stride1(reasons, "value", d.value, 8)
if cls.OPERATOR is None:
reasons.append("triton is not available")
if d.device.type == "cuda":
if torch.cuda.get_device_capability(d.device) != (8, 0):
reasons.append("requires A100 GPU")
if _is_triton_available():
import triton
if triton.__version__ > "2.0.0":
reasons.append("Only work on pre-MLIR triton for now")
return reasons
@classmethod
def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients:
inp = _prepare_inputs(inp)
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
grads = Gradients(
dq=torch.empty_like(inp.query),
dk=torch.empty_like(inp.key),
dv=torch.empty_like(inp.value),
)
cls.OPERATOR(
grad,
inp.query,
inp.key,
inp.value,
ctx.out,
ctx.get_padded_lse(128),
grads.dq,
grads.dk,
grads.dv,
bias=inp.attn_bias if isinstance(inp.attn_bias, torch.Tensor) else None,
softmax_scale=inp.scale_float,
causal=isinstance(inp.attn_bias, LowerTriangularMask),
)
return grads
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/triton.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import textwrap
from typing import List, Type, TypeVar
from . import cutlass, flash, small_k, triton
from .attn_bias import BlockDiagonalMask
from .common import AttentionBwOpBase, AttentionFwOpBase, Inputs
def _is_cutlass_fwd_faster_than_flash(inp: Inputs) -> bool:
# For dropout, we can't mix & match kernels
# Unfortunately, the dropout implementation in CUTLASS
# backward is pretty slow for the BW, so disable it here
if inp.p > 0.0:
return False
# Very small batch sizes - if batch size specified
batch_size, q_len, num_heads, k = inp.query.shape
if isinstance(inp.attn_bias, BlockDiagonalMask):
batch_size *= len(inp.attn_bias.k_seqinfo.seqstart_py) - 1
if batch_size > 0:
threads_flash = batch_size * num_heads
threads_cutlass = threads_flash * (q_len // 64)
if threads_flash < 60 and (threads_cutlass // 2) >= threads_flash:
return True
# Large values of K
return max(k, inp.key.shape[-1]) > 64
def _is_triton_fwd_fastest(inp: Inputs) -> bool:
# TODO: fill out
return False
T = TypeVar("T", Type[AttentionFwOpBase], Type[AttentionBwOpBase])
def _format_inputs_description(inp: Inputs) -> str:
return f"""query : shape={tuple(inp.query.shape)} ({inp.query.dtype})
key : shape={tuple(inp.key.shape)} ({inp.key.dtype})
value : shape={tuple(inp.value.shape)} ({inp.value.dtype})
attn_bias : {type(inp.attn_bias)}
p : {inp.p}"""
def _ensure_op_supports_or_raise(exc_type, name: str, op, inp: Inputs) -> None:
reasons = op.not_supported_reasons(inp)
if not reasons:
return
raise exc_type(
f"""Operator `{name}` does not support inputs:
{textwrap.indent(_format_inputs_description(inp), ' ')}
{_format_not_supported_reasons(op, reasons)}"""
)
def _format_not_supported_reasons(op, reasons: List[str]) -> str:
return f"`{op.NAME}` is not supported because:\n " + "\n ".join(reasons)
def _run_priority_list(name: str, priority_list: List[T], inp: Inputs) -> T:
not_supported_reasons: List[List[str]] = []
for op in priority_list:
not_supported = op.not_supported_reasons(inp)
if not not_supported:
return op
not_supported_reasons.append(not_supported)
# Let's write a nice message explaining what we tried and why it's not supported
msg = f"""No operator found for `{name}` with inputs:
{textwrap.indent(_format_inputs_description(inp), ' ')}"""
for op, not_supported in zip(priority_list, not_supported_reasons):
msg += "\n" + _format_not_supported_reasons(op, not_supported)
raise NotImplementedError(msg)
def _dispatch_fw(inp: Inputs) -> Type[AttentionFwOpBase]:
"""Computes the best operator for forward
Raises:
NotImplementedError: if not operator was found
Returns:
AttentionOp: The best operator for the configuration
"""
priority_list_ops: List[Type[AttentionFwOpBase]] = [
flash.FwOp,
triton.FwOp,
cutlass.FwOp,
small_k.FwOp,
]
if _is_cutlass_fwd_faster_than_flash(inp):
priority_list_ops.remove(cutlass.FwOp)
priority_list_ops.insert(0, cutlass.FwOp)
if _is_triton_fwd_fastest(inp):
priority_list_ops.remove(triton.FwOp)
priority_list_ops.insert(0, triton.FwOp)
return _run_priority_list(
"memory_efficient_attention_forward", priority_list_ops, inp
)
def _dispatch_bw(inp: Inputs) -> Type[AttentionBwOpBase]:
priority_list_ops: List[Type[AttentionBwOpBase]] = [
flash.BwOp,
cutlass.BwOp,
# CUDA illegal memory issues, race conditions etc..
# triton.BwOp,
# Deprecated
small_k.BwOp,
]
return _run_priority_list(
"memory_efficient_attention_backward", priority_list_ops, inp
)
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/dispatch.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Tuple, Type, Union
import torch
from . import cutlass, flash, small_k, triton
from .attn_bias import AttentionBias, BlockDiagonalMask, LowerTriangularMask
from .common import (
AttentionBwOpBase,
AttentionFwOpBase,
AttentionOp,
AttentionOpBase,
AttentionOpDispatch,
Context,
Gradients,
Inputs,
bmk2bmhk,
)
from .dispatch import _dispatch_bw, _dispatch_fw, _ensure_op_supports_or_raise
MemoryEfficientAttentionCutlassOp = (cutlass.FwOp, cutlass.BwOp)
MemoryEfficientAttentionCutlassFwdFlashBwOp = (cutlass.FwOp, flash.BwOp)
MemoryEfficientAttentionTritonFwdFlashBwOp = (triton.FwOp, flash.BwOp)
MemoryEfficientAttentionFlashAttentionOp = (flash.FwOp, flash.BwOp)
MemoryEfficientAttentionOp = (small_k.FwOp, small_k.BwOp)
TritonFlashAttentionOp = (triton.FwOp, triton.BwOp)
class _fMHA(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(ctx, op: AttentionOp, *args: Any) -> Any:
inp = Inputs(*args)
op_fw = op[0] if op is not None else None
op_bw = op[1] if op is not None else None
out, op_ctx = _memory_efficient_attention_forward_requires_grad(
inp=inp, op=op_fw
)
# Saving attn_bias is a bit complicated, as the
# torch part should go in `save_for_backward`
if isinstance(inp.attn_bias, torch.Tensor):
attn_bias_tensor = inp.attn_bias
attn_bias_ctx = None
else:
attn_bias_tensor = None
attn_bias_ctx = inp.attn_bias
ctx.save_for_backward(
inp.query,
inp.key,
inp.value,
op_ctx.out,
op_ctx.lse,
)
ctx.rng_state = op_ctx.rng_state
ctx.attn_bias_tensor = attn_bias_tensor
if op_ctx.op_bw is not None:
if op_bw is not None and op_bw is not op_ctx.op_bw:
raise ValueError(
f"Specified op_bw={op_bw.NAME}, but forward op "
f"can only run with op_bw={op_ctx.op_bw.NAME}. Please set op_bw=None."
)
op_bw = op_ctx.op_bw
ctx.op_fw = op_fw
ctx.op_bw = op_bw
ctx.p = inp.p
ctx.scale = inp.scale
ctx.attn_bias_ctx = attn_bias_ctx
ctx.n_args = len(args)
return out
@staticmethod
def deserialize_bias(
attn_bias_ctx, attn_bias_tensor: Optional[torch.Tensor]
) -> Any:
if attn_bias_tensor is None:
return attn_bias_ctx
return attn_bias_tensor
@classmethod
@torch.autograd.function.once_differentiable
def backward(cls, ctx, grad):
# Re-create context
query, key, value, out, lse = ctx.saved_tensors
attn_bias_tensor = ctx.attn_bias_tensor
rng_state = ctx.rng_state
inp = Inputs(
query=query,
key=key,
value=value,
attn_bias=cls.deserialize_bias(ctx.attn_bias_ctx, attn_bias_tensor),
p=ctx.p,
scale=ctx.scale,
)
op_ctx = Context(
lse=lse,
out=out,
rng_state=rng_state,
)
grads = _memory_efficient_attention_backward(
ctx=op_ctx, inp=inp, grad=grad, op=ctx.op_bw
)
return (None, grads.dq, grads.dk, grads.dv, grads.db) + (None,) * (
ctx.n_args - 2
)
def memory_efficient_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[AttentionOp] = None,
) -> torch.Tensor:
"""Implements the memory-efficient attention mechanism following
`"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_.
:Inputs shape:
- Input tensors must be in format ``[B, M, H, K]``, where B is the batch size, M \
the sequence length, H the number of heads, and K the embeding size per head
- If inputs have dimension 3, it is assumed that the dimensions are ``[B, M, K]`` and ``H=1``
- Inputs can be non-contiguous - we only require the last dimension's stride to be 1
:Equivalent pytorch code:
.. code-block:: python
scale = 1 / query.shape[-1] ** 0.5
query = query * scale
attn = query @ key.transpose(-2, -1)
if attn_bias is not None:
attn = attn + attn_bias
attn = attn.softmax(-1)
attn = F.dropout(attn, p)
return attn @ value
:Examples:
.. code-block:: python
import xformers.ops as xops
# Compute regular attention
y = xops.memory_efficient_attention(q, k, v)
# With a dropout of 0.2
y = xops.memory_efficient_attention(q, k, v, p=0.2)
# Causal attention
y = xops.memory_efficient_attention(
q, k, v,
attn_bias=xops.LowerTriangularMask()
)
:Supported hardware:
NVIDIA GPUs with compute capability above 6.0 (P100+), datatype ``f16``, ``bf16`` and ``f32``.
Raises:
NotImplementedError: if there is no operator available to compute the MHA
ValueError: if inputs are invalid
:parameter query: Tensor of shape ``[B, Mq, H, K]``
:parameter key: Tensor of shape ``[B, Mkv, H, K]``
:parameter value: Tensor of shape ``[B, Mkv, H, Kv]``
:parameter attn_bias: Bias to apply to the attention matrix - defaults to no masking. \
For common biases implemented efficiently in xFormers, see :attr:`xformers.ops.fmha.attn_bias.AttentionBias`. \
This can also be a :attr:`torch.Tensor` for an arbitrary mask (slower).
:parameter p: Dropout probability. Disabled if set to ``0.0``
:parameter scale: Scaling factor for ``Q @ K.transpose()``. If set to ``None``, the default \
scale (q.shape[-1]**-0.5) will be used.
:parameter op: The operators to use - see :attr:`xformers.ops.AttentionOpBase`. \
If set to ``None`` (recommended), xFormers \
will dispatch to the best available operator, depending on the inputs \
and options.
:return: multi-head attention Tensor with shape ``[B, Mq, H, Kv]``
"""
return _memory_efficient_attention(
Inputs(
query=query, key=key, value=value, p=p, attn_bias=attn_bias, scale=scale
),
op=op,
)
def memory_efficient_attention_forward(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionFwOpBase]] = None,
) -> torch.Tensor:
"""
Calculates the forward pass of :attr:`xformers.ops.memory_efficient_attention`.
"""
return _memory_efficient_attention_forward(
Inputs(
query=query, key=key, value=value, p=p, attn_bias=attn_bias, scale=scale
),
op=op,
)
def memory_efficient_attention_forward_requires_grad(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionFwOpBase]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns a tuple (output, lse), where `lse` can be used to compute the backward pass later.
See :attr:`xformers.ops.memory_efficient_attention` for an explanation of the arguments
See :attr:`xformers.ops.memory_efficient_attention_backward` for running the backward pass
"""
if p != 0.0:
raise NotImplementedError(
"dropout is not supported on the non-autograd API."
" If you want to use dropout, please call `memory_efficient_attention` directly"
)
out, ctx = _memory_efficient_attention_forward_requires_grad(
Inputs(
query=query, key=key, value=value, p=p, attn_bias=attn_bias, scale=scale
),
op=op,
)
return out, ctx.lse
def memory_efficient_attention_backward(
grad: torch.Tensor,
output: torch.Tensor,
lse: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionBwOpBase]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes the gradient of the attention.
Returns a tuple (dq, dk, dv)
See :attr:`xformers.ops.memory_efficient_attention` for an explanation of the arguments.
`lse` is the tensor returned by :attr:`xformers.ops.memory_efficient_attention_forward_requires_grad`
"""
if p != 0.0:
raise NotImplementedError(
"dropout is not supported on the non-autograd API."
" If you want to use dropout, please call `memory_efficient_attention` directly"
)
gradients = _memory_efficient_attention_backward(
Context(out=output, lse=lse),
Inputs(
query=query, key=key, value=value, p=p, attn_bias=attn_bias, scale=scale
),
grad,
op=op,
)
return (gradients.dq, gradients.dk, gradients.dv)
def _memory_efficient_attention(
inp: Inputs, op: Optional[AttentionOp] = None
) -> torch.Tensor:
# fast-path that doesn't require computing the logsumexp for backward computation
if all(x.requires_grad is False for x in [inp.query, inp.key, inp.value]):
return _memory_efficient_attention_forward(
inp, op=op[0] if op is not None else None
)
output_shape = inp.normalize_bmhk()
return _fMHA.apply(
op, inp.query, inp.key, inp.value, inp.attn_bias, inp.p, inp.scale
).reshape(output_shape)
def _memory_efficient_attention_forward(
inp: Inputs, op: Optional[Type[AttentionFwOpBase]]
) -> torch.Tensor:
inp.validate_inputs()
output_shape = inp.normalize_bmhk()
if op is None:
op = _dispatch_fw(inp)
else:
_ensure_op_supports_or_raise(ValueError, "memory_efficient_attention", op, inp)
out, *_ = op.apply(inp, needs_gradient=False)
return out.reshape(output_shape)
def _memory_efficient_attention_forward_requires_grad(
inp: Inputs, op: Optional[Type[AttentionFwOpBase]]
) -> Tuple[torch.Tensor, Context]:
inp.validate_inputs()
output_shape = inp.normalize_bmhk()
if op is None:
op = _dispatch_fw(inp)
else:
_ensure_op_supports_or_raise(ValueError, "memory_efficient_attention", op, inp)
out = op.apply(inp, needs_gradient=True)
assert out[1] is not None
return (out[0].reshape(output_shape), out[1])
def _memory_efficient_attention_backward(
ctx: Context, inp: Inputs, grad: torch.Tensor, op: Optional[Type[AttentionBwOpBase]]
) -> Gradients:
"""Warning: grad/ctx.out is potentially in BMK format"""
inp.validate_inputs()
if grad.ndim != inp.query.ndim or grad.ndim != ctx.out.ndim:
raise ValueError(
"All tensors should be either in BMK (ndim=3) or BMHK (ndim=4) format. \n"
f"grad.shape : {grad.shape} \n"
f"out.shape : {ctx.out.shape} \n"
f"query.shape: {inp.query.shape}"
)
shape_dq, shape_dk, shape_dv = tuple(
x.shape for x in (inp.query, inp.key, inp.value)
)
inp.normalize_bmhk()
# LSE has shape [B, H, M] while query has shape [B, M, H, K]
if (
ctx.lse.ndim != 3
# Dim 0
or (
not isinstance(inp.attn_bias, BlockDiagonalMask)
and ctx.lse.shape[0] != inp.query.shape[0]
)
or (
isinstance(inp.attn_bias, BlockDiagonalMask)
and ctx.lse.shape[0] != inp.attn_bias.q_seqinfo.seqstart.shape[0] - 1
)
# Dim 1
or ctx.lse.shape[1] != inp.query.shape[2]
# Dim 2
or (
not isinstance(inp.attn_bias, BlockDiagonalMask)
and ctx.lse.shape[2] < inp.query.shape[1]
)
):
raise ValueError(
"Input tensors have incompatible shapes."
f"lse.shape : {ctx.lse.shape} \n"
f"query.shape : {inp.query.shape}"
)
grad = bmk2bmhk(grad, 1)
ctx.out = bmk2bmhk(ctx.out, 1)
if op is None:
op = _dispatch_bw(inp)
else:
_ensure_op_supports_or_raise(
ValueError, "memory_efficient_attention_backward", op, inp
)
grads = op.apply(ctx, inp, grad)
grads.dq = grads.dq.reshape(shape_dq)
grads.dk = grads.dk.reshape(shape_dk)
grads.dv = grads.dv.reshape(shape_dv)
return grads
__all__ = [
"AttentionBias",
"AttentionOp",
"AttentionOpBase",
"AttentionOpDispatch",
"LowerTriangularMask",
"MemoryEfficientAttentionCutlassFwdFlashBwOp",
"MemoryEfficientAttentionTritonFwdFlashBwOp",
"MemoryEfficientAttentionCutlassOp",
"MemoryEfficientAttentionFlashAttentionOp",
"MemoryEfficientAttentionOp",
"TritonFlashAttentionOp",
"memory_efficient_attention",
]
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from typing import Iterable, List, Optional, Sequence, Tuple, Union
import torch
class AttentionBias:
"""Base class for a custom bias that can be applied \
as the attn_bias argument in
:attr:`xformers.ops.memory_efficient_attention`.
That function has the ability to add a tensor, the
attention bias, to the QK^T matrix before it is used
in the softmax part of the attention calculation.
The attention bias tensor with shape
(B or 1, n_queries, number of keys)
can be given as the attn_bias input.
The most common use case is for an attention bias is
to contain only zeros and negative infinities, which forms
a mask so that some queries only attend to some keys.
Children of this class define alternative things which can
be used as the attn_bias input to define an attention bias which
forms such a mask, for some common cases.
When using an :attr:`xformers.ops.AttentionBias`
instead of a :attr:`torch.Tensor`, the mask matrix does
not need to be materialized, and can be
hardcoded into some kernels for better performance.
See:
- :attr:`xformers.ops.fmha.attn_bias.LowerTriangularMask`
- :attr:`xformers.ops.fmha.attn_bias.LowerTriangularMaskWithTensorBias`
- :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalMask`
- :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask`
"""
def materialize(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
"""
Materializes the bias as a `torch.Tensor`. This is very slow
and we don't attempt to make it fast. Only use for debugging/testing.
Shape should be like `[*, q_seqlen, k_seqlen]`
"""
raise NotImplementedError()
class LowerTriangularMask(AttentionBias):
"""
A lower-triangular (aka causal) mask
A query Q cannot attend to a key which is farther from the
initial key than Q is from the initial query.
"""
def materialize(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
create_as = dtype if dtype is not torch.bfloat16 else torch.float32
tensor = torch.full( # type: ignore
shape,
dtype=create_as,
fill_value=float("-inf"),
device=device,
)
return torch.triu(tensor, diagonal=1).to(dtype) # type: ignore
def add_bias(self, bias: torch.Tensor) -> "LowerTriangularMaskWithTensorBias":
return LowerTriangularMaskWithTensorBias(bias)
class LowerTriangularMaskWithTensorBias(LowerTriangularMask):
"""A lower-triangular (aka causal) mask with an additive bias"""
def __init__(self, bias: torch.Tensor) -> None:
self._bias = bias
def materialize(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
return super().materialize(shape, dtype=dtype, device=device) + self._bias
@dataclass
class _SeqLenInfo:
"""
(Internal) Represents the division of a dimension into blocks.
For example, to represents a dimension of length 7 divided into
three blocks of lengths 2, 3 and 2, use `from_seqlength([2, 3, 2])`.
The members will be:
max_seqlen: 3
seqstart_py: [0, 2, 5, 7]
seqstart: torch.IntTensor([0, 2, 5, 7])
"""
seqstart: torch.Tensor
max_seqlen: int
seqstart_py: List[int]
def to(self, device: torch.device) -> None:
self.seqstart = self.seqstart.to(device, non_blocking=True)
def intervals(self) -> Iterable[Tuple[int, int]]:
yield from zip(self.seqstart_py, self.seqstart_py[1:])
@classmethod
def from_seqlens(cls, seqlens: Iterable[int]) -> "_SeqLenInfo":
"""
Input tensors are assumed to be in shape [B, M, *]
"""
assert not isinstance(seqlens, torch.Tensor)
seqstart_py = [0]
max_seqlen = -1
for seqlen in seqlens:
max_seqlen = max(max_seqlen, seqlen)
seqstart_py.append(seqstart_py[len(seqstart_py) - 1] + seqlen)
seqstart = torch.tensor(seqstart_py, dtype=torch.int32)
return cls(max_seqlen=max_seqlen, seqstart=seqstart, seqstart_py=seqstart_py)
def split(
self, x: torch.Tensor, batch_sizes: Optional[Sequence[int]] = None
) -> List[torch.Tensor]:
if self.seqstart_py[-1] != x.shape[1] or x.shape[0] != 1:
raise ValueError(
f"Invalid `torch.Tensor` of shape {x.shape}, expected format "
f"(B, M, *) with B=1 and M={self.seqstart_py[-1]}\n"
f" seqstart: {self.seqstart_py}"
)
if batch_sizes is None:
batch_sizes = [1] * (len(self.seqstart_py) - 1)
split_chunks = []
it = 0
for batch_size in batch_sizes:
split_chunks.append(
self.seqstart_py[it + batch_size] - self.seqstart_py[it]
)
it += batch_size
return [
tensor.reshape([bs, -1, *tensor.shape[2:]])
for bs, tensor in zip(batch_sizes, x.split(split_chunks, dim=1))
]
@dataclass
class _PaddedSeqLenInfo(_SeqLenInfo):
"""
(Internal) Represents the division of a dimension into blocks which are
padded out to the same total length.
For example, to represent a dimension of length 12 with space for
three blocks of length 4, but where the occupied lengths are
2, 3 and 2, use `from_seqlens_padded([2, 3, 2], 4)`.
The layout along the dimension is
0 ─► block 0
block 0
<space>
<space>
4 ─► block 1
block 1
block 1
<space>
8 ─► block 2
block 2
<space>
<space>
12 ─►
The members will be:
max_seqlen: 3
seqstart_py: [0, 4, 8, 12]
seqstart: torch.IntTensor([0, 4, 8, 12])
seqlen_py: [2, 3, 2]
seqlen: torch.IntTensor([2, 3, 2])
"""
seqlen: torch.Tensor
seqlen_py: Sequence[int]
# From parent: seqstart[i] contains the start position
# of the i-th sequence
# seqstart: torch.Tensor
def __post_init__(self) -> None:
assert len(self.seqstart_py) == len(self.seqlen_py) + 1
def to(self, device: torch.device) -> None:
self.seqlen = self.seqlen.to(device, non_blocking=True)
super().to(device)
def intervals(self) -> Iterable[Tuple[int, int]]:
for (start, _), length in zip(super().intervals(), self.seqlen_py):
yield start, start + length
@classmethod
def from_seqlens(cls, seqlens: Iterable[int]) -> "_SeqLenInfo":
raise RuntimeError(
"Use either `_SeqLenInfo.from_seqlens` or `_PaddedSeqLenInfo.from_seqlens_padded`"
)
@classmethod
def from_seqlens_padded(
cls, seqlens: Sequence[int], padding: int
) -> "_PaddedSeqLenInfo":
"""
Input tensors are assumed to be in shape [B, M, *]
seqstart = padding * torch.arange(batch_size)
"""
assert not isinstance(seqlens, torch.Tensor)
assert all(seqlen <= padding for seqlen in seqlens)
seqstart_py = list(range(0, len(seqlens) * padding + 1, padding))
return cls(
seqlen=torch.tensor(seqlens, dtype=torch.int32),
seqlen_py=seqlens,
max_seqlen=max(seqlens),
seqstart=torch.tensor(seqstart_py, dtype=torch.int32),
seqstart_py=seqstart_py,
)
def split(
self, x: torch.Tensor, batch_sizes: Optional[Sequence[int]] = None
) -> List[torch.Tensor]:
raise NotImplementedError("_PaddedSeqLenInfo.split")
@dataclass
class BlockDiagonalMask(AttentionBias):
"""
A block-diagonal mask that can be passed as ``attn_bias``
argument to :attr:`xformers.ops.memory_efficient_attention`.
Queries and Keys are each divided into the same number of blocks.
Queries in block i only attend to keys in block i.
.. figure:: /_static/block_diag_bias.png
This bias can be used to handle a batch of sequences of
different lengths, via :attr:`BlockDiagonalMask.from_tensor_list`
:Example:
.. code-block:: python
import torch
from xformers.ops import fmha
K = 16
dtype = torch.float16
device = "cuda"
list_x = [
torch.randn([1, 3, 1, K], dtype=dtype, device=device),
torch.randn([1, 6, 1, K], dtype=dtype, device=device),
torch.randn([1, 2, 1, K], dtype=dtype, device=device),
]
attn_bias, x = fmha.BlockDiagonalMask.from_tensor_list(list_x)
linear = torch.nn.Linear(K, K * 3).to(device=device, dtype=dtype)
q, k, v = linear(x).reshape([1, -1, 1, 3, K]).unbind(-2)
out = fmha.memory_efficient_attention(q, k, v, attn_bias=attn_bias)
list_out = attn_bias.split(out)
print(list_out[0].shape) # [1, 3, 1, K]
assert tuple(list_out[0].shape) == (1, 3, 1, K)
"""
q_seqinfo: _SeqLenInfo
k_seqinfo: _SeqLenInfo
_batch_sizes: Optional[Sequence[int]] = None
def _create_block_mask(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
return torch.zeros(
shape,
dtype=dtype,
device=device,
)
def materialize(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
"""Materialize the attention bias - for debugging & testing"""
assert shape[-1] == self.k_seqinfo.seqstart_py[-1], (
shape[-1],
self.k_seqinfo.seqstart_py[-1],
)
assert shape[-2] == self.q_seqinfo.seqstart_py[-1], (
shape[-2],
self.q_seqinfo.seqstart_py[-1],
)
mask = torch.empty(shape[-2:], dtype=dtype, device=device)
mask.fill_(-math.inf)
for i, ((q_start, q_end), (k_start, k_end)) in enumerate(
zip(
self.q_seqinfo.intervals(),
self.k_seqinfo.intervals(),
)
):
mask[q_start:q_end, k_start:k_end] = self._create_block_mask(
(q_end - q_start, k_end - k_start),
dtype=dtype,
device=device,
)
for _ in range(len(shape) - 2):
mask = mask.unsqueeze(0)
return mask.expand(shape)
@classmethod
def from_seqlens(
cls,
q_seqlen: Sequence[int],
kv_seqlen: Optional[Sequence[int]] = None,
) -> "BlockDiagonalMask":
"""Creates a :attr:`BlockDiagonalMask` from a list of tensors lengths for query and key/value.
Args:
q_seqlen (Union[Sequence[int], torch.Tensor]): List or tensor of sequence lengths for query tensors
kv_seqlen (Union[Sequence[int], torch.Tensor], optional): List or tensor of sequence lengths for key/value.
(Defaults to ``q_seqlen``.)
Returns:
BlockDiagonalMask
"""
assert kv_seqlen is None or len(q_seqlen) == len(kv_seqlen)
q_seqinfo = _SeqLenInfo.from_seqlens(q_seqlen)
if kv_seqlen is None or q_seqlen == kv_seqlen:
k_seqinfo = q_seqinfo
else:
k_seqinfo = _SeqLenInfo.from_seqlens(kv_seqlen)
return cls(q_seqinfo=q_seqinfo, k_seqinfo=k_seqinfo)
@classmethod
def from_tensor_list(
cls,
tensors: Sequence[torch.Tensor],
) -> Tuple["BlockDiagonalMask", torch.Tensor]:
"""Creates a :attr:`BlockDiagonalMask` from a list of tensors, and returns the tensors
concatenated on the sequence length dimension
.. figure:: /_static/block_diag_cat_split.png
See also :attr:`BlockDiagonalMask.split` to split the returned
:attr:`torch.Tensor` back to a list of tensors of varying sequence length
Args:
tensors (Sequence[torch.Tensor]): A list of tensors of shape ``[B, M_i, *]``.
All tensors should have the same dimension and the same batch size ``B``, but
they can have different sequence length ``M``.
Returns:
Tuple[BlockDiagonalMask, torch.Tensor]: The corresponding bias for the attention
along with `tensors` concatenated on the sequence length dimension, with shape ``[1, sum_i{M_i}, *]``
"""
batch_sizes = [tensor.shape[0] for tensor in tensors]
seqlens = []
for x in tensors:
for _ in range(x.shape[0]):
seqlens.append(x.shape[1])
block_diag = cls.from_seqlens(seqlens)
block_diag._batch_sizes = batch_sizes
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in tensors)
concat_tensors = torch.cat(tensors_bs1, dim=1)
return block_diag, concat_tensors
@classmethod
def from_tensor_lists_qkv(
cls,
tensors_q: Sequence[torch.Tensor],
tensors_k: Sequence[torch.Tensor],
tensors_v: Optional[Sequence[torch.Tensor]] = None,
) -> Tuple["BlockDiagonalMask", torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
assert len(tensors_q) == len(tensors_k)
assert tensors_v is None or len(tensors_v) == len(tensors_q)
batch_sizes = [tensor.shape[0] for tensor in tensors_q]
q_seqlens, kv_seqlens = [], []
for i, (q, k) in enumerate(zip(tensors_q, tensors_k)):
assert q.shape[0] == k.shape[0]
q_seqlens += [q.shape[1]] * q.shape[0]
kv_seqlens += [k.shape[1]] * k.shape[0]
assert tensors_v is None or tensors_v[i].shape[:2] == k.shape[:2]
block_diag = cls.from_seqlens(q_seqlens, kv_seqlens)
block_diag._batch_sizes = batch_sizes
return (
block_diag,
torch.cat([x.reshape([1, -1, *x.shape[2:]]) for x in tensors_q], dim=1),
torch.cat([x.reshape([1, -1, *x.shape[2:]]) for x in tensors_k], dim=1),
torch.cat([x.reshape([1, -1, *x.shape[2:]]) for x in tensors_v], dim=1)
if tensors_v is not None
else None,
)
def split_queries(self, tensor: torch.Tensor) -> Sequence[torch.Tensor]:
return self.q_seqinfo.split(tensor, self._batch_sizes)
def split_kv(self, tensor: torch.Tensor) -> Sequence[torch.Tensor]:
return self.k_seqinfo.split(tensor, self._batch_sizes)
def split(self, tensor: torch.Tensor) -> Sequence[torch.Tensor]:
"""The inverse operation of :attr:`BlockDiagonalCausalMask.from_tensor_list`
Args:
tensor (torch.Tensor): Tensor of tokens of shape ``[1, sum_i{M_i}, *]``
Returns:
Sequence[torch.Tensor]: A list of tokens with possibly different sequence lengths
"""
assert self.q_seqinfo is self.k_seqinfo
return self.q_seqinfo.split(tensor, self._batch_sizes)
def make_causal(self) -> "BlockDiagonalCausalMask":
"""Makes each block causal"""
return BlockDiagonalCausalMask(
q_seqinfo=self.q_seqinfo,
k_seqinfo=self.k_seqinfo,
_batch_sizes=self._batch_sizes,
)
def make_causal_from_bottomright(self) -> "BlockDiagonalCausalFromBottomRightMask":
"""Makes each block causal with a possible non-causal prefix"""
return BlockDiagonalCausalFromBottomRightMask(
q_seqinfo=self.q_seqinfo,
k_seqinfo=self.k_seqinfo,
_batch_sizes=self._batch_sizes,
)
@dataclass
class BlockDiagonalCausalMask(BlockDiagonalMask):
"""
Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalMask`, except that each block is causal.
Queries and Keys are each divided into the same number of blocks.
A query Q in block i cannot attend to a key which is not in block i,
nor one which is farther from the initial key in block i than Q
is from the initial query in block i.
"""
def _create_block_mask(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
return LowerTriangularMask().materialize(
shape,
dtype=dtype,
device=device,
)
@dataclass
class BlockDiagonalCausalFromBottomRightMask(BlockDiagonalMask):
"""
Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalMask`, except that each block is causal.
This mask allows for a non-causal prefix
NOTE: Each block should have `num_keys >= num_queries` otherwise the forward pass is not
defined (softmax of vector of `-inf` in the attention)
Queries and keys are each divided into the same number of blocks.
A query Q in block i cannot attend to a key which is not in block i,
nor one which nearer the final key in block i than Q is to the
final query in block i.
"""
def __post_init__(self) -> None:
for i, ((q_start, q_end), (k_start, k_end)) in enumerate(
zip(
self.q_seqinfo.intervals(),
self.k_seqinfo.intervals(),
)
):
num_queries = q_end - q_start
num_keys = k_end - k_start
if num_keys < num_queries:
raise ValueError(
f"Block #{i} has num_keys={num_keys} and num_queries={num_queries}."
" Expected `num_keys >= num_queries`"
)
def _create_block_mask(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
create_as = dtype if dtype is not torch.bfloat16 else torch.float32
tensor = torch.full( # type: ignore
shape,
dtype=create_as,
fill_value=float("-inf"),
device=device,
)
num_queries, num_keys = shape[-2:]
return torch.triu(tensor, diagonal=num_keys - num_queries + 1).to(dtype) # type: ignore
@dataclass
class BlockDiagonalCausalWithOffsetPaddedKeysMask(AttentionBias):
"""
Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask`,
except an offset on causality is allowed for each block and we support padding for k/v
The keys and values are divided into blocks which are padded out to
the same total length.
For example, if there is space for 12 keys, for three blocks of
max length 4, but we only want to use the first 2, 3 and 2
of each block, use `kv_padding=4` and `kv_seqlens=[2, 3, 2]`.
The queries are divided into blocks, without padding, of lengths given by
q_seqlen.
A query Q in block i cannot attend to a key which is not in block i,
nor one which is not in use (i.e. in the padded area),
nor one whose distance from the initial key in block i
exceeds the distance of Q from the initial query in block i by
more than causal_diagonal[i] (which defaults to 0).
"""
q_seqinfo: _SeqLenInfo
k_seqinfo: _PaddedSeqLenInfo
causal_diagonal: Optional[torch.Tensor] = None
def _create_block_mask(
self,
shape: Tuple[int, ...],
offset: int = 0,
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
create_as = dtype if dtype is not torch.bfloat16 else torch.float32
tensor = torch.full( # type: ignore
shape,
dtype=create_as,
fill_value=float("-inf"),
device=device,
)
return torch.triu(tensor, diagonal=1 + offset).to(dtype) # type: ignore
def materialize(
self,
shape: Tuple[int, ...],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
) -> torch.Tensor:
"""Materialize the attention bias - for debugging & testing"""
assert shape[-1] == self.k_seqinfo.seqstart_py[-1]
assert shape[-2] == self.q_seqinfo.seqstart_py[-1]
mask = torch.empty(shape[-2:], dtype=dtype, device=device)
mask.fill_(-math.inf)
for i, ((q_start, q_end), (k_start, k_end)) in enumerate(
zip(
self.q_seqinfo.intervals(),
self.k_seqinfo.intervals(),
)
):
mask[q_start:q_end, k_start:k_end] = self._create_block_mask(
(q_end - q_start, k_end - k_start),
offset=0
if self.causal_diagonal is None
else int(self.causal_diagonal[i].item()),
dtype=dtype,
device=device,
)
for _ in range(len(shape) - 2):
mask = mask.unsqueeze(0)
return mask.expand(shape)
@classmethod
def from_seqlens(
cls,
q_seqlen: Sequence[int],
kv_padding: int,
kv_seqlen: Sequence[int],
causal_diagonal: Optional[torch.Tensor] = None,
) -> "BlockDiagonalCausalWithOffsetPaddedKeysMask":
"""Creates a :attr:`BlockDiagonalCausalWithOffsetPaddedKeysMask` from a list of tensors lengths for query and key/value.
Args:
q_seqlen (Sequence[int]): List or tensor of sequence lengths for query tensors
kv_padding (int): Padding for k/v - also an upperbound on each individual key length
kv_seqlen (Sequence[int]): List or tensor of sequence lengths for key/value.
causal_diagonal (torch.Tensor, optional): tensor of sequence positions for causal masking
Returns:
BlockDiagonalCausalWithOffsetPaddedKeysMask
"""
assert kv_seqlen is None or len(q_seqlen) == len(kv_seqlen), (
q_seqlen,
kv_seqlen,
)
q_seqinfo = _SeqLenInfo.from_seqlens(q_seqlen)
k_seqinfo = _PaddedSeqLenInfo.from_seqlens_padded(kv_seqlen, kv_padding)
return cls(
q_seqinfo=q_seqinfo, k_seqinfo=k_seqinfo, causal_diagonal=causal_diagonal
)
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/attn_bias.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from typing import Any, List, Mapping, Optional, Set, Tuple, Type, Union
import torch
from ..._cpp_lib import _built_with_cuda
from ..common import BaseOperator
from .attn_bias import AttentionBias, BlockDiagonalMask, LowerTriangularMask
def _is_bias_type_supported_in_BMK(attn_bias_type: Any) -> bool:
# NoneType
if isinstance(None, attn_bias_type):
return True
if attn_bias_type in [LowerTriangularMask, torch.Tensor]:
return True
return False
@dataclass
class Inputs:
"""
Stores inputs to the `memory_efficient_attention` operators
"""
query: torch.Tensor
key: torch.Tensor
value: torch.Tensor
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None
p: float = 0.0
scale: Optional[float] = None
@property
def device(self) -> torch.device:
return self.query.device
@property
def scale_float(self) -> float:
return self.query.shape[-1] ** (-0.5) if self.scale is None else self.scale
def normalize_bmhk(self) -> Tuple[int, ...]:
if self.query.ndim not in [3, 4]:
raise ValueError(
f"Invalid shape for query: {self.query.shape}. "
"Expected shape [batch, seqlen, num_heads, K], or [batch, seqlen, K]."
)
output_shape = (self.query.shape[:-1]) + (self.value.shape[-1],)
# Convert from legacy format
if self.query.ndim == 3:
self.query = self.query.unsqueeze(2)
self.key = self.key.unsqueeze(2)
self.value = self.value.unsqueeze(2)
return output_shape
def validate_inputs(self) -> None:
qkv = (self.query, self.key, self.value)
if self.query.ndim not in (3, 4) or any(x.ndim != self.query.ndim for x in qkv):
raise ValueError(
f"Query/Key/Value should all have BMHK or BMK shape.\n"
f" query.shape: {self.query.shape}\n"
f" key.shape : {self.key.shape}\n"
f" value.shape: {self.value.shape}"
)
if any(x.device != self.query.device for x in qkv):
raise ValueError("Query/Key/Value should all be on the same device")
if any(x.dtype != self.query.dtype for x in qkv):
raise ValueError(
"Query/Key/Value should all have the same dtype\n"
f" query.dtype: {self.query.dtype}\n"
f" key.dtype : {self.key.dtype}\n"
f" value.dtype: {self.value.dtype}"
)
# Biases with tensors attached are meant to be in BMHK format
# This would require to permute biases/gradients which can be expensive,
# so let's just forbid it - BMK is a legacy format anyway
if self.query.ndim == 3 and not _is_bias_type_supported_in_BMK(
type(self.attn_bias)
):
raise ValueError(
f"Please provide inputs in BMHK format rather "
f"than BMK when using bias type `{type(self.attn_bias).__name__}`"
)
if isinstance(self.attn_bias, BlockDiagonalMask):
if any(x.shape[0] != 1 for x in qkv):
raise ValueError(
f"Expected batch_size=1 when using block-diagonal bias\n"
f" query.shape: {self.query.shape}\n"
f" key.shape : {self.key.shape}\n"
f" value.shape: {self.value.shape}"
)
if self.p < 0.0 or self.p > 1.0:
raise ValueError(f"Invalid dropout probability: p={self.p}")
@dataclass
class Context:
lse: torch.Tensor
out: torch.Tensor
op_bw: Optional[Type["AttentionBwOpBase"]] = None
rng_state: Optional[torch.Tensor] = None
def get_padded_lse(self, pad_to: int, force_pad_inf: bool = False) -> torch.Tensor:
pad_amount = (pad_to - (self.lse.shape[2] % pad_to)) % pad_to
lse = self.lse
if pad_amount > 0:
if force_pad_inf:
lse = lse[:, :, : self.out.shape[1]]
pad_amount = (pad_to - (lse.shape[2] % pad_to)) % pad_to
lse = torch.nn.functional.pad(lse, [0, pad_amount], value=math.inf)
elif force_pad_inf and self.out.shape[1] != lse.shape[2]:
lse[:, :, self.out.shape[1] :].fill_(math.inf)
return lse
@dataclass
class Gradients:
dq: torch.Tensor
dk: torch.Tensor
dv: torch.Tensor
# bias gradient. None if there is no tensor bias or if it doesn't require grad
db: Optional[torch.Tensor] = None
class AttentionOpBase(BaseOperator):
"""Base class for any attention operator in xFormers
See:
- :attr:`xformers.ops.fmha.cutlass.FwOp`
- :attr:`xformers.ops.fmha.cutlass.BwOp`
- :attr:`xformers.ops.fmha.flash.FwOp`
- :attr:`xformers.ops.fmha.flash.BwOp`
- :attr:`xformers.ops.fmha.triton.FwOp`
- :attr:`xformers.ops.fmha.triton.BwOp`
- :attr:`xformers.ops.fmha.small_k.FwOp`
- :attr:`xformers.ops.fmha.small_k.BwOp`
"""
OPERATOR: Any
SUPPORTED_DEVICES: Set[str]
CUDA_MINIMUM_COMPUTE_CAPABILITY: Tuple[int, int] = (5, 0)
SUPPORTED_DTYPES: Set[torch.dtype]
SUPPORTED_MAX_K: float
SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {type(None)}
SUPPORTS_DROPOUT: bool
SUPPORTS_CUSTOM_SCALE: bool = False
SUPPORTS_DIFFERENT_VALUE_EMBED: bool = False
NAME: str
OPERATOR_CATEGORY = "memory_efficient_attention"
_TEST_BATCH_SIZES: List[int] = [1, 300]
_TEST_K: List[int] = [32, 128]
@classmethod
def supports(cls, d: Inputs) -> bool:
return not cls.not_supported_reasons(d)
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
"""
Returns a list of reasons why this is not supported.
The kernel can run these inputs only if the returned list is empty
"""
reasons = []
device_type = d.query.device.type
dtype = d.query.dtype
if device_type not in cls.SUPPORTED_DEVICES:
reasons.append(f"device={device_type} (supported: {cls.SUPPORTED_DEVICES})")
if device_type == "cuda" and not _built_with_cuda:
reasons.append("xFormers wasn't build with CUDA support")
if dtype not in cls.SUPPORTED_DTYPES:
reasons.append(f"dtype={dtype} (supported: {cls.SUPPORTED_DTYPES})")
if (
not cls.SUPPORTS_DIFFERENT_VALUE_EMBED
and d.query.shape[-1] != d.value.shape[-1]
):
reasons.append("query.shape[-1] != value.shape[-1]")
if max(d.query.shape[-1], d.value.shape[-1]) > cls.SUPPORTED_MAX_K:
reasons.append(
f"max(query.shape[-1] != value.shape[-1]) > {cls.SUPPORTED_MAX_K}"
)
if type(d.attn_bias) not in cls.SUPPORTED_ATTN_BIAS_TYPES:
reasons.append(f"attn_bias type is {type(d.attn_bias)}")
if (d.p != 0.0) and not cls.SUPPORTS_DROPOUT:
reasons.append("dropout > 0.0")
if d.scale is not None and not cls.SUPPORTS_CUSTOM_SCALE:
reasons.append("has custom scale")
# bfloat16 is only supported on A100+
# ... although the kernels can still run and give the
# correct result
if dtype is torch.bfloat16 and (
not device_type.startswith("cuda")
or torch.cuda.get_device_capability(d.query.device)[0] < 8
):
reasons.append("bf16 is only supported on A100+ GPUs")
if not cls.is_available():
reasons.append(
"Operator wasn't built - see `python -m xformers.info` for more info"
)
return reasons
class AttentionFwOpBase(AttentionOpBase):
ERROR_ATOL: Mapping[torch.dtype, float] = {
torch.float: 3e-4,
torch.half: 4e-3,
torch.bfloat16: 2e-2,
}
ERROR_RTOL: Mapping[torch.dtype, float] = {
torch.float: 2e-5,
torch.half: 4e-4,
torch.bfloat16: 5e-3,
}
@classmethod
def apply(
cls, inp: Inputs, needs_gradient: bool
) -> Tuple[torch.Tensor, Optional[Context]]:
raise NotImplementedError()
@classmethod
def attn_operator_flop(
cls,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
causal: bool = False,
seqstart_k: Optional[torch.Tensor] = None,
seqstart_q: Optional[torch.Tensor] = None,
) -> int:
"""
Computes total flops for the attention
Assumes inputs in format BMHK
"""
assert query.ndim == 4
if seqstart_q is not None:
seqstart_q_py = seqstart_q.tolist()
else:
seqstart_q_py = [0, query.shape[1]]
if seqstart_k is not None:
seqstart_k_py = seqstart_k.tolist()
else:
seqstart_k_py = [0, key.shape[1]]
total_flop = 0
for q_start, q_end, k_start, k_end in zip(
seqstart_q_py, seqstart_q_py[1:], seqstart_k_py, seqstart_k_py[1:]
):
num_q = q_end - q_start
num_kv = k_end - k_start
# (M,K) @ (K,N) GEMM needs M*N*K*2 flop
# Q @ K.transpose
total_flop += num_q * num_kv * query.shape[-1] * 2
# (ignore softmax)
# attn @ V
total_flop += num_q * key.shape[-1] * num_kv * 2
# Multiply by num_heads and batches
total_flop = total_flop * value.shape[2] * value.shape[0]
if causal:
total_flop //= 2
return total_flop
class AttentionBwOpBase(AttentionOpBase):
ERROR_ATOL: Mapping[torch.dtype, float] = {
torch.float: 5e-4,
torch.half: 9e-2,
torch.bfloat16: 0.7,
}
ERROR_RTOL: Mapping[torch.dtype, float] = {
torch.float: 1e-4,
torch.half: 2e-2,
torch.bfloat16: 0.1,
}
SUPPORTS_ATTN_BIAS_GRAD = False
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(AttentionBwOpBase, cls).not_supported_reasons(d)
if (
isinstance(d.attn_bias, torch.Tensor)
and d.attn_bias.requires_grad
and not cls.SUPPORTS_ATTN_BIAS_GRAD
):
reasons.append(
"Computing the bias gradient is not supported (attn_bias.requires_grad = True)"
)
return reasons
@classmethod
def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients:
raise NotImplementedError()
@classmethod
def attn_operator_flop(
cls,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
causal: bool = False,
seqstart_k: Optional[torch.Tensor] = None,
seqstart_q: Optional[torch.Tensor] = None,
) -> int:
"""
Computes total flops for the attention
Assumes inputs in format BMHK
"""
assert query.ndim == 4
if seqstart_q is not None:
seqstart_q_py = seqstart_q.tolist()
else:
seqstart_q_py = [0, query.shape[1]]
if seqstart_k is not None:
seqstart_k_py = seqstart_k.tolist()
else:
seqstart_k_py = [0, key.shape[1]]
total_flop = 0
for q_start, q_end, k_start, k_end in zip(
seqstart_q_py, seqstart_q_py[1:], seqstart_k_py, seqstart_k_py[1:]
):
num_q = q_end - q_start
num_kv = k_end - k_start
Kqk = query.shape[-1]
Kv = value.shape[-1]
# (M,K) @ (K,N) GEMM needs M*N*K*2 flop
# att = Q @ K.transpose
total_flop += num_q * num_kv * Kqk * 2
# att @ dO
total_flop += num_kv * num_q * Kv * 2
# dov = dO @ V
total_flop += num_q * Kv * num_kv * 2
# dov @ K
total_flop += num_q * Kqk * num_kv * 2
# dov @ Q
total_flop += num_q * Kqk * num_kv * 2
# Multiply by num_heads and batches
total_flop = total_flop * value.shape[2] * value.shape[0]
if causal:
total_flop //= 2
return total_flop
AttentionOp = Tuple[
Optional[Type[AttentionFwOpBase]], Optional[Type[AttentionBwOpBase]]
]
@dataclass
class AttentionOpDispatch:
"""Dispatcher to automatically select
the best operator to run memory-efficient attention.
:Deprecated:
This class is deprecated and will be removed in a later version
"""
op: AttentionOp
@classmethod
def from_arguments(
cls,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
) -> "AttentionOpDispatch":
"""Here for backward compatibility"""
from .dispatch import _dispatch_bw, _dispatch_fw
inp = Inputs(
query=query,
key=key,
value=value,
attn_bias=attn_bias,
p=p,
scale=scale,
)
return AttentionOpDispatch(op=(_dispatch_fw(inp), _dispatch_bw(inp)))
def bmk2bmhk(tensor, num_heads: int) -> torch.Tensor:
if tensor.ndim == 4:
return tensor
return tensor.reshape([-1, num_heads, tensor.shape[1], tensor.shape[2]]).permute(
(0, 2, 1, 3)
)
def check_lastdim_alignment_stride1(
reasons: List[str], name: str, x: torch.Tensor, alignment: int
) -> None:
if x.shape[-1] % alignment != 0:
reasons.append(f"{name}.shape[-1] % {alignment} != 0")
elif x.stride(-2) % alignment != 0:
reasons.append(
f"{name}.stride(-2) % {alignment} != 0 ({name}.stride() = {x.stride()})"
)
# We can have stride=0 sometimes if dimension=1
if x.stride(-1) > 1:
reasons.append(
f"{name}.stride(-1) > 1 ({name}.stride() = {x.stride()}) - you should call `.contiguous()` on the input"
)
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/common.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import replace
from typing import Any, List, Optional, Set, Tuple
import torch
from ..common import get_operator, register_operator
from .attn_bias import BlockDiagonalCausalMask, BlockDiagonalMask, LowerTriangularMask
from .common import (
AttentionBwOpBase,
AttentionFwOpBase,
Context,
Gradients,
Inputs,
check_lastdim_alignment_stride1,
)
try:
from ... import _C_flashattention # type: ignore[attr-defined]
# create library so that flash-attn goes through the PyTorch Dispatcher
_flash_lib = torch.library.Library("xformers_flash", "DEF")
_flash_lib.define(
"flash_fwd(Tensor query, Tensor key, Tensor value, "
"Tensor cu_seqlens_q, Tensor cu_seqlens_k, "
"int max_seqlen_q, int max_seqlen_k, "
"float p, float softmax_scale, "
"bool is_causal, bool return_softmax) -> (Tensor, Tensor)"
)
_flash_lib.define(
"flash_bwd(Tensor dout, Tensor query, Tensor key, Tensor value, "
"Tensor out, Tensor softmax_lse_, Tensor dq, Tensor dk, Tensor dv, "
"Tensor cu_seqlens_q, Tensor cu_seqlens_k, "
"int max_seqlen_q, int max_seqlen_k, "
"float p, float softmax_scale, bool is_causal) -> Tensor"
)
def _flash_fwd(
query,
key,
value,
cu_seq_lens_q,
cu_seq_lens_k,
max_seq_len_q,
max_seq_len_k,
p,
softmax_scale,
causal,
return_softmax,
):
out = query.new_empty(query.shape[0], query.shape[1], value.shape[2])
lse = _C_flashattention.fwd(
query,
key,
value,
out,
cu_seq_lens_q,
cu_seq_lens_k,
max_seq_len_q,
max_seq_len_k,
p,
softmax_scale,
False,
causal,
return_softmax,
0,
None,
)[0]
return out, lse
def _flash_bwd(
grad,
query,
key,
value,
out,
lse,
dq,
dk,
dv,
cu_seq_lens_q,
cu_seq_lens_k,
max_seq_len_q,
max_seq_len_k,
p,
softmax_scale,
causal,
):
_C_flashattention.bwd(
grad,
query,
key,
value,
out,
lse,
dq,
dk,
dv,
cu_seq_lens_q,
cu_seq_lens_k,
max_seq_len_q,
max_seq_len_k,
p,
softmax_scale,
False,
causal,
0,
None,
)
return dq
_flash_lib.impl("flash_fwd", _flash_fwd, "CUDA")
_flash_lib.impl("flash_bwd", _flash_bwd, "CUDA")
except ImportError:
pass
def _convert_input_format(
inp: Inputs,
) -> Tuple[Inputs, float, torch.Tensor, int, torch.Tensor, int]:
query, key, value = inp.query, inp.key, inp.value
batch = query.shape[0]
seqlen_q = query.shape[1]
seqlen_kv = key.shape[1]
num_heads = query.shape[2]
head_dim_q = query.shape[3]
head_dim_v = value.shape[3]
attn_bias = inp.attn_bias
if isinstance(attn_bias, BlockDiagonalMask):
attn_bias.k_seqinfo.seqstart = attn_bias.k_seqinfo.seqstart.to(
inp.query.device, non_blocking=True
)
attn_bias.q_seqinfo.seqstart = attn_bias.q_seqinfo.seqstart.to(
inp.query.device, non_blocking=True
)
cu_seqlen_k = attn_bias.k_seqinfo.seqstart
cu_seqlen_q = attn_bias.q_seqinfo.seqstart
max_seqlen_q = attn_bias.q_seqinfo.max_seqlen
max_seqlen_k = attn_bias.k_seqinfo.max_seqlen
else:
cu_seqlen_k = torch.arange(
0,
(batch + 1) * seqlen_kv,
step=seqlen_kv,
dtype=torch.int32,
device=query.device,
)
if seqlen_q == seqlen_kv:
cu_seqlen_q = cu_seqlen_k
else:
cu_seqlen_q = torch.arange(
0,
(batch + 1) * seqlen_q,
step=seqlen_q,
dtype=torch.int32,
device=query.device,
)
max_seqlen_q = seqlen_q
max_seqlen_k = seqlen_kv
# Initially we have `query.shape = [batch, seqlen, head_dim_q]`
# We want format `[batch * seqlen, num_heads, head_dim_q]`
new_inp = replace(
inp,
query=query.reshape([batch * seqlen_q, num_heads, head_dim_q]),
key=key.reshape([batch * seqlen_kv, num_heads, head_dim_q]),
value=value.reshape([batch * seqlen_kv, num_heads, head_dim_v]),
)
softmax_scale = inp.query.shape[-1] ** (-0.5) if inp.scale is None else inp.scale
return new_inp, softmax_scale, cu_seqlen_q, max_seqlen_q, cu_seqlen_k, max_seqlen_k
@register_operator
class FwOp(AttentionFwOpBase):
"""Operator that computes memory-efficient attention using \
`Flash-Attention <https://github.com/HazyResearch/flash-attention>`_ \
implementation.
"""
OPERATOR = get_operator("xformers_flash", "flash_fwd")
SUPPORTED_DEVICES: Set[str] = {"cuda"}
CUDA_MINIMUM_COMPUTE_CAPABILITY = (7, 5)
SUPPORTED_DTYPES: Set[torch.dtype] = {torch.half, torch.bfloat16}
SUPPORTED_MAX_K = 128
SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {
type(None),
LowerTriangularMask,
BlockDiagonalMask,
BlockDiagonalCausalMask,
}
SUPPORTS_DROPOUT = True
SUPPORTS_CUSTOM_SCALE = True
SUPPORTS_DIFFERENT_VALUE_EMBED = False
NAME = "flshattF"
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(FwOp, cls).not_supported_reasons(d)
check_lastdim_alignment_stride1(reasons, "query", d.query, 8)
if d.device.type == "cuda":
device_capability = torch.cuda.get_device_capability(d.device)
if device_capability < (7, 5):
reasons.append("requires a GPU with compute capability > 7.5")
return reasons
@classmethod
def apply(
cls, inp: Inputs, needs_gradient: bool
) -> Tuple[torch.Tensor, Optional[Context]]:
return_softmax = False
out_shape = [
inp.query.shape[0],
inp.query.shape[1],
inp.query.shape[2],
inp.value.shape[3],
]
(
inp,
softmax_scale,
cu_seqlens_q,
max_seqlen_q,
cu_seqlens_k,
max_seqlen_k,
) = _convert_input_format(inp)
rng_state = torch.cuda.get_rng_state() if inp.p != 0.0 else None
out, softmax_lse = cls.OPERATOR(
inp.query,
inp.key,
inp.value,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
inp.p,
softmax_scale,
isinstance(inp.attn_bias, (LowerTriangularMask, BlockDiagonalCausalMask)),
return_softmax,
)
out = out.reshape(out_shape)
ctx = Context(out=out, lse=softmax_lse)
if inp.p != 0.0:
ctx.op_bw = BwOp
ctx.rng_state = rng_state
return (out, ctx)
@classmethod
# type: ignore
def operator_flop(
cls,
query,
key,
value,
cu_seq_lens_q,
cu_seq_lens_k,
max_seq_len_q,
max_seq_len_k,
p,
softmax_scale,
causal,
return_softmax,
) -> int:
return cls.attn_operator_flop(
query.unsqueeze(0),
key.unsqueeze(0),
value.unsqueeze(0),
causal=causal,
seqstart_k=cu_seq_lens_k,
seqstart_q=cu_seq_lens_q,
)
@register_operator
class BwOp(AttentionBwOpBase):
__doc__ = FwOp.__doc__
OPERATOR = get_operator("xformers_flash", "flash_bwd")
SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES
CUDA_MINIMUM_COMPUTE_CAPABILITY = FwOp.CUDA_MINIMUM_COMPUTE_CAPABILITY
SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES
SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K
SUPPORTED_ATTN_BIAS_TYPES = FwOp.SUPPORTED_ATTN_BIAS_TYPES
SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT
SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE
SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED
NAME = "flshattB"
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(BwOp, cls).not_supported_reasons(d)
check_lastdim_alignment_stride1(reasons, "query", d.query, 8)
if d.device.type == "cuda":
# We know `d.device` is cuda now
# d=128 is only supported on A100 for bw
# d > 64 is only supported on A100 for bw
device_capability = torch.cuda.get_device_capability(d.device)
if device_capability < (7, 5):
reasons.append("requires a GPU with compute capability > 7.5")
is_sm80 = device_capability[0] == 8 and device_capability[1] == 0
if max(d.key.shape[-1], d.query.shape[-1]) > 64 and not is_sm80:
reasons.append(
"requires a GPU with compute capability == 8.0 for 'query.shape[-1] > 64'"
)
return reasons
@classmethod
def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients:
dq_shape, dk_shape, dv_shape = inp.query.shape, inp.key.shape, inp.value.shape
(
inp,
softmax_scale,
cu_seqlens_q,
max_seqlen_q,
cu_seqlens_k,
max_seqlen_k,
) = _convert_input_format(inp)
kernel_out_shape = [
inp.query.shape[0],
inp.query.shape[1],
inp.value.shape[2],
]
# Create dq,dk,dv
# If Q/K/V come from a single QKV tensor, let's put the gradient in the
# right strides, so we can avoid a `cat`
if (
inp.query.shape[0] == inp.key.shape[0]
and inp.query.shape[2] == inp.value.shape[2]
and inp.query.storage().data_ptr() == inp.key.storage().data_ptr()
and inp.query.storage().data_ptr() == inp.value.storage().data_ptr()
):
# Create one big contiguous chunk
# This is because q, k and v usually come from a single
# output of a linear layer that is chunked.
# Creating the gradients with the right layout saves us
# a `torch.cat` call in the backward pass
chunk = torch.empty(
(inp.query.shape[0], 3, inp.query.shape[1], inp.query.shape[2]),
dtype=inp.query.dtype,
device=inp.device,
)
grads = Gradients(
dq=chunk.select(1, 0),
dk=chunk.select(1, 1),
dv=chunk.select(1, 2),
)
else:
grads = Gradients(
dq=torch.empty_like(inp.query),
dk=torch.empty_like(inp.key),
dv=torch.empty_like(inp.value),
)
assert grad.dtype in cls.SUPPORTED_DTYPES
cur_rng_state = None
if inp.p != 0.0:
assert ctx.rng_state is not None
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(ctx.rng_state)
cls.OPERATOR(
grad.reshape(kernel_out_shape).contiguous(),
inp.query,
inp.key,
inp.value,
ctx.out.reshape(kernel_out_shape),
ctx.lse,
grads.dq,
grads.dk,
grads.dv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
inp.p,
softmax_scale,
isinstance(inp.attn_bias, (LowerTriangularMask, BlockDiagonalCausalMask)),
)
if cur_rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
grads.dq = grads.dq.reshape(dq_shape)
grads.dk = grads.dk.reshape(dk_shape)
grads.dv = grads.dv.reshape(dv_shape)
return grads
@classmethod
# type: ignore
def operator_flop(
cls,
grad,
query,
key,
value,
out,
lse,
dq,
dk,
dv,
cu_seq_lens_q,
cu_seq_lens_k,
max_seq_len_q,
max_seq_len_k,
p,
softmax_scale,
causal,
) -> int:
return cls.attn_operator_flop(
query.unsqueeze(0),
key.unsqueeze(0),
value.unsqueeze(0),
causal=causal,
seqstart_k=cu_seq_lens_k,
seqstart_q=cu_seq_lens_q,
)
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/flash.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Mapping, Optional, Set, Tuple, Union
import torch
from ..common import get_xformers_operator, register_operator
from .attn_bias import AttentionBias
from .common import (
AttentionBwOpBase,
AttentionFwOpBase,
Context,
Gradients,
Inputs,
bmk2bmhk,
)
def _bmhk2bmk_contiguous(tensor) -> torch.Tensor:
return (
tensor.permute((0, 2, 1, 3))
.contiguous()
.view([tensor.shape[0] * tensor.shape[2], tensor.shape[1], tensor.shape[3]])
.contiguous()
)
def _get_tensor_bias_bmk(
attn_bias: Optional[Union[torch.Tensor, AttentionBias]]
) -> Optional[torch.Tensor]:
if not isinstance(attn_bias, torch.Tensor):
assert attn_bias is None
return None
# BMK -> BMHK
if attn_bias.ndim == 4:
attn_bias = attn_bias.reshape([-1, *attn_bias.shape[2:]])
return attn_bias
@register_operator
class FwOp(AttentionFwOpBase):
"""An operator optimized for very small values of K (``K <= 32``) \
and f32 pre-Ampere as it does not use TensorCores.
Only supports contiguous inputs in BMK format, so an extra reshape \
or contiguous call might be done.
:Deprecated:
This operator is deprecated and should not be used in new code
"""
OPERATOR = get_xformers_operator("efficient_attention_forward_small_k")
SUPPORTED_DEVICES = {"cuda", "cpu"}
SUPPORTED_DTYPES = {torch.float}
SUPPORTED_MAX_K: float = 32
SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {type(None), torch.Tensor}
SUPPORTS_DROPOUT = True
SUPPORTS_CUSTOM_SCALE = False
NAME = "smallkF"
BACKWARD_ERROR_ATOL: Mapping[torch.dtype, float] = {
torch.float: 4e-3,
}
# as this kernel is a bit slow, this should make tests run faster
_TEST_BATCH_SIZES = [1, 3]
_TEST_K = [2, 3, 8, 16, 32]
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(FwOp, cls).not_supported_reasons(d)
if isinstance(d.attn_bias, torch.Tensor) and d.attn_bias.stride(1) != 0:
reasons.append("bias with non-zero stride not supported")
buffer_size = 8
k = d.query.shape[-1]
for pack in [1, 2, 4]:
if (k % pack) == 0 and (k // pack) <= buffer_size:
return reasons
reasons.append(f"unsupported embed per head: {k}")
return reasons
@classmethod
def apply(
cls, inp: Inputs, needs_gradient: bool
) -> Tuple[torch.Tensor, Optional[Context]]:
if inp.scale is not None:
raise NotImplementedError("Unsupport custom scale")
num_heads = inp.query.shape[2]
query = _bmhk2bmk_contiguous(inp.query)
key = _bmhk2bmk_contiguous(inp.key)
value = _bmhk2bmk_contiguous(inp.value)
out, lse, rng_seed, rng_offset = cls.OPERATOR(
query=query,
key=key,
value=value,
compute_logsumexp=needs_gradient,
attn_bias=_get_tensor_bias_bmk(inp.attn_bias),
p=inp.p,
)
out = bmk2bmhk(out, num_heads)
lse = lse.reshape([lse.shape[0] // num_heads, num_heads, lse.shape[1]])
if not needs_gradient:
return out, None
ctx = Context(out=out, lse=lse)
if inp.p != 0.0:
ctx.op_bw = BwOp
ctx.rng_state = torch.tensor(
[rng_seed, rng_offset], dtype=torch.int64, device="cpu"
)
return out, ctx
@register_operator
class BwOp(AttentionBwOpBase):
__doc__ = FwOp.__doc__
OPERATOR = get_xformers_operator("efficient_attention_backward_small_k")
SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES
SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES
SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K
SUPPORTED_ATTN_BIAS_TYPES = FwOp.SUPPORTED_ATTN_BIAS_TYPES
SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT
SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE
SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED
# there is some extra precision loss in the CPU implementation due to an
# extra accumulation step in grad_q, which is not present in the CUDA
# implementation
ERROR_ATOL: Mapping[torch.dtype, float] = {
torch.float: 4e-3,
}
NAME = "smallkB"
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(BwOp, cls).not_supported_reasons(d)
if isinstance(d.attn_bias, torch.Tensor) and d.attn_bias.stride(1) != 0:
reasons.append("bias with non-zero stride not supported")
buffer_size = 8
k = d.query.shape[-1]
for pack in [1, 2, 4]:
if (k % pack) == 0 and (k // pack) <= buffer_size:
return reasons
reasons.append(f"unsupported embed per head: {k}")
return reasons
@classmethod
def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients:
num_heads = grad.shape[2]
grad = _bmhk2bmk_contiguous(grad)
query = _bmhk2bmk_contiguous(inp.query)
key = _bmhk2bmk_contiguous(inp.key)
value = _bmhk2bmk_contiguous(inp.value)
out = _bmhk2bmk_contiguous(ctx.out)
rng_seed = rng_offset = 0
if inp.p != 0.0:
if (
ctx.rng_state is None
or ctx.rng_state.dtype != torch.int64
or ctx.rng_state.device.type != "cpu"
or ctx.rng_state.shape != (2,)
):
raise NotImplementedError(f"Invalid rng_state: {ctx.rng_state}")
rng_seed, rng_offset = ctx.rng_state.tolist()
grad_q, grad_k, grad_v = cls.OPERATOR(
grad,
query,
key,
value,
# LSE: BHM -> (BH)M
ctx.lse.reshape([-1, ctx.lse.shape[-1]]),
out,
_get_tensor_bias_bmk(inp.attn_bias),
inp.p,
rng_seed,
rng_offset,
)
return Gradients(
dq=bmk2bmhk(grad_q, num_heads),
dk=bmk2bmhk(grad_k, num_heads),
dv=bmk2bmhk(grad_v, num_heads),
)
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/small_k.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Mapping, Optional, Set, Tuple, Union
import torch
from ..common import get_xformers_operator, register_operator
from . import attn_bias
from .attn_bias import (
AttentionBias,
BlockDiagonalCausalMask,
BlockDiagonalCausalWithOffsetPaddedKeysMask,
BlockDiagonalMask,
LowerTriangularMask,
LowerTriangularMaskWithTensorBias,
)
from .common import (
AttentionBwOpBase,
AttentionFwOpBase,
Context,
Gradients,
Inputs,
check_lastdim_alignment_stride1,
)
def _uses_tensorcores(sm: int, is_half: bool) -> bool:
if sm >= 80:
return True
if sm >= 70:
return is_half
return False
def _minimum_gemm_alignment(inp: Inputs) -> int:
if inp.device.type != "cuda":
return 1
cap = torch.cuda.get_device_capability(inp.device)
sm = cap[0] * 10 + cap[1]
bits_per_scalar = {torch.float: 32, torch.half: 16, torch.bfloat16: 16}[
inp.query.dtype
]
uses_tensorcores = _uses_tensorcores(sm, bits_per_scalar == 16)
matmul_alignment_mn = 1
if sm >= 80:
matmul_alignment_mn = 4
if uses_tensorcores:
matmul_alignment_mn = max(matmul_alignment_mn, 128 // bits_per_scalar)
return matmul_alignment_mn
def _get_seqlen_info(
inp: Inputs,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], int, int]:
attn_bias = inp.attn_bias
if isinstance(
attn_bias, (BlockDiagonalMask, BlockDiagonalCausalWithOffsetPaddedKeysMask)
):
if (
isinstance(attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask)
and attn_bias.causal_diagonal is not None
):
attn_bias.causal_diagonal = attn_bias.causal_diagonal.to(inp.query.device)
attn_bias.k_seqinfo.to(inp.query.device)
attn_bias.q_seqinfo.to(inp.query.device)
seqstart_k = attn_bias.k_seqinfo.seqstart
seqstart_q = attn_bias.q_seqinfo.seqstart
max_seqlen_q = attn_bias.q_seqinfo.max_seqlen
max_seqlen_k = attn_bias.k_seqinfo.max_seqlen
else:
seqstart_k = None
seqstart_q = None
max_seqlen_q = -1
max_seqlen_k = -1
return seqstart_k, seqstart_q, max_seqlen_q, max_seqlen_k
def _get_tensor_bias(
attn_bias: Optional[Union[torch.Tensor, AttentionBias]]
) -> Optional[torch.Tensor]:
if isinstance(attn_bias, torch.Tensor):
return attn_bias
elif isinstance(attn_bias, LowerTriangularMaskWithTensorBias):
return attn_bias._bias
return None
def _check_bias_alignment(
reasons: List[str], attn_bias: Optional[Union[torch.Tensor, AttentionBias]]
) -> None:
attn_bias_tensor = _get_tensor_bias(attn_bias)
if attn_bias_tensor is not None:
alignment = 128 // torch.finfo(attn_bias_tensor.dtype).bits
show_padding_hint = False
for d in range(attn_bias_tensor.ndim - 1):
if attn_bias_tensor.stride(d) % alignment != 0:
reasons.append(
f"attn_bias.stride(-2) % {alignment} != 0 (attn_bias.stride() = {attn_bias_tensor.stride()})"
)
show_padding_hint = True
if show_padding_hint:
reasons.append(
"""\
HINT: To use an `attn_bias` with a sequence length that is not a multiple of 8, \
you need to ensure memory is aligned by slicing a bigger tensor. \
Example: use `attn_bias = torch.zeros([1, 1, 5, 8])[:,:,:,:5]` instead of `torch.zeros([1, 1, 5, 5])`"""
)
# We can have stride=0 sometimes if dimension=1
if attn_bias_tensor.stride(-1) > 1:
reasons.append(
f"attn_bias.stride(-1) > 1 (attn_bias.stride() = {attn_bias_tensor.stride()}) - "
"you should call `.contiguous()` on the bias"
)
def _custom_mask_type(bias: Optional[Union[torch.Tensor, AttentionBias]]) -> int:
if isinstance(
bias,
(
LowerTriangularMask,
BlockDiagonalCausalMask,
BlockDiagonalCausalWithOffsetPaddedKeysMask,
),
):
return 1
if isinstance(bias, attn_bias.BlockDiagonalCausalFromBottomRightMask):
return 2
return 0
@register_operator
class FwOp(AttentionFwOpBase):
"""xFormers' MHA kernel based on CUTLASS.
Supports a large number of settings (including without TensorCores, f32 ...)
and GPUs as old as P100 (Sm60)
"""
OPERATOR = get_xformers_operator("efficient_attention_forward_cutlass")
SUPPORTED_DEVICES: Set[str] = {"cuda"}
SUPPORTED_DTYPES: Set[torch.dtype] = {torch.float, torch.half, torch.bfloat16}
SUPPORTED_MAX_K = 65536
SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {
type(None),
torch.Tensor,
LowerTriangularMask,
LowerTriangularMaskWithTensorBias,
BlockDiagonalMask,
BlockDiagonalCausalMask,
BlockDiagonalCausalWithOffsetPaddedKeysMask,
attn_bias.BlockDiagonalCausalFromBottomRightMask,
}
SUPPORTS_DROPOUT = True
SUPPORTS_CUSTOM_SCALE = True
SUPPORTS_DIFFERENT_VALUE_EMBED = True
NAME = "cutlassF"
_TEST_K: List[int] = [
32, # 64x64 kernel
128, # 64x128 kernel
256, # 64x128 with accumulation in gmem
]
@classmethod
def apply(
cls, inp: Inputs, needs_gradient: bool
) -> Tuple[torch.Tensor, Optional[Context]]:
if type(inp.attn_bias) not in FwOp.SUPPORTED_ATTN_BIAS_TYPES:
raise NotImplementedError("Unsupported attn_bias type")
seqstart_k, seqstart_q, max_seqlen_q, _ = _get_seqlen_info(inp)
out, lse, rng_seed, rng_offset = cls.OPERATOR(
query=inp.query,
key=inp.key,
value=inp.value,
attn_bias=_get_tensor_bias(inp.attn_bias),
seqstart_q=seqstart_q,
seqstart_k=seqstart_k,
max_seqlen_q=max_seqlen_q,
dropout_p=inp.p,
compute_logsumexp=needs_gradient,
custom_mask_type=_custom_mask_type(inp.attn_bias),
scale=inp.scale,
causal_diagonal=inp.attn_bias.causal_diagonal
if isinstance(inp.attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask)
else None,
seqlen_k=inp.attn_bias.k_seqinfo.seqlen
if isinstance(inp.attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask)
else None,
)
ctx: Optional[Context] = None
if needs_gradient:
ctx = Context(
out=out,
lse=lse,
# cutlass forward is only compatible with cutlass backward if
# dropout is used (because of the way RNG states are passed and the
# way random numbers are generated during backward)
op_bw=BwOp if inp.p != 0 else None,
)
if inp.p != 0:
ctx.rng_state = torch.tensor(
[rng_seed, rng_offset], dtype=torch.int64, device="cpu"
)
return out, ctx
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(FwOp, cls).not_supported_reasons(d)
matmul_alignment_mn = _minimum_gemm_alignment(d)
check_lastdim_alignment_stride1(reasons, "query", d.query, matmul_alignment_mn)
check_lastdim_alignment_stride1(reasons, "value", d.value, matmul_alignment_mn)
_check_bias_alignment(reasons, d.attn_bias)
return reasons
@classmethod
# type: ignore
def operator_flop(
cls,
q,
k,
v,
b,
seqstart_q,
seqstart_k,
max_seqlen_q_,
compute_lse,
custom_mask_type,
*a,
) -> int:
return cls.attn_operator_flop(
q,
k,
v,
causal=custom_mask_type > 0,
seqstart_k=seqstart_k,
seqstart_q=seqstart_q,
)
@register_operator
class BwOp(AttentionBwOpBase):
__doc__ = FwOp.__doc__
OPERATOR = get_xformers_operator("efficient_attention_backward_cutlass")
SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES
SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES
SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K
SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {
type(None),
torch.Tensor,
LowerTriangularMask,
# TODO: Fix handling of gradient through the fMHA autograd function
# LowerTriangularMaskWithTensorBias,
BlockDiagonalMask,
BlockDiagonalCausalMask,
attn_bias.BlockDiagonalCausalFromBottomRightMask,
}
SUPPORTS_ATTN_BIAS_GRAD = True
SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT
SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE
SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED
NAME = "cutlassB"
ERROR_ATOL: Mapping[torch.dtype, float] = {
torch.float: 5e-4,
# increased from 9e-2, more opportunities for numerical errors when bias is
# used, noticed in gK on SM80
torch.half: 1e-1,
torch.bfloat16: 7e-1,
}
_TEST_K: List[int] = [
32, # 64x64 kernel
128, # 64x128/128x128 kernel
256, # 64x128 with accumulation in gmem
]
@classmethod
def not_supported_reasons(cls, d: Inputs) -> List[str]:
reasons = super(BwOp, cls).not_supported_reasons(d)
matmul_alignment_mn = _minimum_gemm_alignment(d)
check_lastdim_alignment_stride1(reasons, "query", d.query, matmul_alignment_mn)
check_lastdim_alignment_stride1(reasons, "key", d.key, matmul_alignment_mn)
check_lastdim_alignment_stride1(reasons, "value", d.value, matmul_alignment_mn)
_check_bias_alignment(reasons, d.attn_bias)
attn_bias_tensor = _get_tensor_bias(d.attn_bias)
# Backprop of gradient through broadcasted bias is not supported
if attn_bias_tensor is not None and attn_bias_tensor.requires_grad:
# Don't forget that inputs are either in BMK or BMHK!
if d.query.ndim == 3 and attn_bias_tensor.ndim == 3:
expected_bias_shape = (*d.query.shape[:2], d.key.shape[1])
else:
# bias is B H Mq Mk
expected_bias_shape = (
d.query.shape[0],
d.query.shape[2] if d.query.ndim == 4 else 1,
d.query.shape[1],
d.key.shape[1],
)
if tuple(attn_bias_tensor.shape) != expected_bias_shape:
reasons.append(
"Broadcasting the `attn_bias` tensor is not supported "
f"(shape: {tuple(attn_bias_tensor.shape)}"
f"/ expected: {expected_bias_shape})"
)
return reasons
@classmethod
def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients:
if type(inp.attn_bias) not in BwOp.SUPPORTED_ATTN_BIAS_TYPES:
raise NotImplementedError("Unsupported attn_bias type")
seqstart_k, seqstart_q, max_seqlen_q, max_seqlen_k = _get_seqlen_info(inp)
dtype = inp.query.dtype
rng_seed = rng_offset = 0
if inp.p != 0.0:
if (
ctx.rng_state is None
or ctx.rng_state.dtype != torch.int64
or ctx.rng_state.device.type != "cpu"
or ctx.rng_state.shape != (2,)
):
raise NotImplementedError(f"Invalid rng_state: {ctx.rng_state}")
rng_seed, rng_offset = ctx.rng_state.tolist()
force_pad_inf = torch.cuda.get_device_capability(inp.query.device) == (7, 5)
(grad_q, grad_k, grad_v, grad_bias) = cls.OPERATOR(
grad.to(dtype),
inp.query,
inp.key,
inp.value,
_get_tensor_bias(inp.attn_bias),
cu_seqlens_q=seqstart_q,
cu_seqlens_k=seqstart_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
logsumexp=ctx.get_padded_lse(32, force_pad_inf=force_pad_inf),
output=ctx.out.to(dtype),
dropout_p=inp.p,
# if not using dropout, seed and offset are irrelevant but still expected
# in function signature so just pass 0
# seed and offset could be None if a different FW op other than cutlass
# was used.
rng_seed=rng_seed,
rng_offset=rng_offset,
custom_mask_type=_custom_mask_type(inp.attn_bias),
scale=inp.scale,
)
# c++/CUDA implementation returns an uninitialized tensor if bias doesn't
# require grad
if not (
isinstance(inp.attn_bias, torch.Tensor) and inp.attn_bias.requires_grad
):
grad_bias = None
return Gradients(dq=grad_q, dk=grad_k, dv=grad_v, db=grad_bias)
@classmethod
# type: ignore
def operator_flop(
cls,
dO,
q,
k,
v,
b,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
logsumexp,
output,
dropout_p,
rng_seed,
rng_offset,
custom_mask_type,
scale,
) -> int:
return cls.attn_operator_flop(
q,
k,
v,
seqstart_q=cu_seqlens_q,
seqstart_k=cu_seqlens_k,
causal=custom_mask_type > 0,
)
| EXA-1-master | exa/libraries/xformers/xformers/ops/fmha/cutlass.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import Mapping, Tuple
import torch
@dataclass
class DeviceLimit:
name: str = "default" # pattern to match from `torch.cuda.get_device_name()`
source: str = ""
sm: Tuple[int, int] = (0, 0)
# bytes/s
gmem_bandwidth: float = math.inf
# dtype -> TFlop/s
gemm_tflops: Mapping[torch.dtype, float] = field(default_factory=dict)
# For f32, we assume we can use tf32
DEVICE_LIMITS: Tuple[DeviceLimit, ...] = (
DeviceLimit(
"A100",
"https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a100/pdf/nvidia-a100-datasheet-us-nvidia-1758950-r4-web.pdf", # noqa: E501
sm=(8, 0),
gmem_bandwidth=1.5 * (1024**4),
gemm_tflops={
torch.float64: 19.5,
torch.float32: 156,
torch.float16: 312,
torch.bfloat16: 312,
torch.int8: 624,
},
),
DeviceLimit(
"A30",
"https://www.nvidia.com/content/dam/en-zz/Solutions/data-center/products/a30-gpu/pdf/a30-datasheet.pdf",
sm=(8, 0),
gmem_bandwidth=933 * (1024**3),
gemm_tflops={
torch.float64: 10.3,
torch.float32: 82,
torch.float16: 165,
torch.bfloat16: 165,
torch.int8: 330,
},
),
DeviceLimit(
"T4",
"https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/tesla-t4/t4-tensor-core-datasheet-951643.pdf",
sm=(7, 5),
gmem_bandwidth=300 * (1024**3),
gemm_tflops={
torch.float32: 8.1,
torch.float16: 65,
torch.int8: 130,
},
),
# Assuming SXM2
DeviceLimit(
"V100",
"https://images.nvidia.com/content/technologies/volta/pdf/tesla-volta-v100-datasheet-letter-fnl-web.pdf",
sm=(7, 0),
gmem_bandwidth=900 * (1024**3),
gemm_tflops={
torch.float64: 7.8,
torch.float32: 15.7,
torch.float16: 125,
},
),
DeviceLimit(
"P100",
"https://images.nvidia.com/content/tesla/pdf/nvidia-tesla-p100-datasheet.pdf",
sm=(6, 0),
gmem_bandwidth=732 * (1024**3),
gemm_tflops={
torch.float64: 5.3,
torch.float32: 10.6,
torch.float16: 21.2,
},
),
)
def get_device_limits(device) -> DeviceLimit:
"""Currently only implemented for GPUs"""
if device is not None and device.type == "cuda":
device_sm = torch.cuda.get_device_capability(device)
device_name = torch.cuda.get_device_name(device)
for lim in DEVICE_LIMITS:
if lim.sm == device_sm:
if lim.name in device_name:
return lim
return DeviceLimit()
| EXA-1-master | exa/libraries/xformers/xformers/profiler/device_limits.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .api import profile, step
from .profiler import MemSnapshotsProfiler, NsightProfiler, PyTorchProfiler
from .slow_ops_profiler import DetectSlowOpsProfiler
__all__ = [
"profile",
"step",
"MemSnapshotsProfiler",
"PyTorchProfiler",
"NsightProfiler",
"DetectSlowOpsProfiler",
]
| EXA-1-master | exa/libraries/xformers/xformers/profiler/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Sequence, Tuple
import torch.nn as nn
from .profiler import MemSnapshotsProfiler, NsightProfiler, PyTorchProfiler, _Profiler
from .slow_ops_profiler import DetectSlowOpsProfiler
DEFAULT_SCHEDULE = (
(MemSnapshotsProfiler, 0, 2),
(DetectSlowOpsProfiler, 2, 4),
(NsightProfiler, 4, 6),
(PyTorchProfiler, 6, 20),
)
def profile(
output_dir: str,
module: Optional[nn.Module] = None,
schedule: Sequence[Tuple[Any, int, int]] = DEFAULT_SCHEDULE,
):
"""
A pre-configured profiler that will run on the first ~20 steps of the training
It will provide multiple traces that can be exploited later.
Use it in a context manager around your training loop, and call `xformers.profiler.step`
before starting the next iteration.
:Examples:
.. code-block:: python
import torch
import timm.models
import xformers.profiler
dtype = torch.bfloat16
device = "cuda"
model = timm.models.vit_large_patch16_224().to(device).to(dtype)
inp = torch.zeros([64, 3, 224, 224], device=device, dtype=dtype)
optim = torch.optim.Adam(model.parameters())
with xformers.profiler.profile(
output_dir="profile_data",
module=model,
schedule=[
(MemSnapshotsProfiler, 0, 2),
(DetectSlowOpsProfiler, 2, 4),
(NsightProfiler, 4, 6),
(PyTorchProfiler, 6, 20),
]
):
for i in range(20):
model(inp).sum().backward()
optim.step()
optim.zero_grad()
xformers.profiler.step()
# alternatively, use the profiler without context and with ``.start()`` / `.stop()`
# calls.
xprofiler = xformers.profiler.profile(...)
xprofiler.start()
for i in range(20):
model(inp).sum().backward()
optim.step()
optim.zero_grad()
xprofiler.step()
xprofiler.stop()
"""
return _Profiler(output_dir=output_dir, schedule=schedule, module=module)
def step() -> None:
"""See `xformers.profiler.profile`"""
# Silently return if no profiler is enabled
if _Profiler._CURRENT_PROFILER is None:
return
_Profiler._CURRENT_PROFILER.step()
| EXA-1-master | exa/libraries/xformers/xformers/profiler/api.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import math
import os
from collections import defaultdict
from dataclasses import dataclass, field
from functools import partial
from typing import Any, Dict, List, Set, Tuple
import torch.cuda.memory
import torch.cuda.nvtx
import torch.profiler
import torch.utils.hooks
from torch.utils._python_dispatch import TorchDispatchMode, _pop_mode_temporarily
from torch.utils._pytree import tree_map
from ..ops.common import FUNC_TO_XFORMERS_OPERATOR
from .device_limits import get_device_limits
from .profiler import _Profiler
class TorchFuncMockNoDispatch:
"""
Wraps a method to call it without the custom
pytorch dispatcher
"""
def __init__(self, pt_impl):
self.pt_impl = pt_impl
def __get__(self, obj, c):
return partial(self, obj)
def __call__(self, obj, *args, **kwargs):
with _pop_mode_temporarily():
return self.pt_impl(obj, *args, **kwargs)
class DispatcherWithoutBrokenFuncs(TorchDispatchMode):
TENSOR_FUNCS_NO_DISPATCH = [
# Can't convert Stream argument to Python object
# https://github.com/pytorch/pytorch/issues/94403
"record_stream"
]
def __enter__(self) -> None:
self._pt_impls = {}
for k in self.TENSOR_FUNCS_NO_DISPATCH:
impl = getattr(torch.Tensor, k)
self._pt_impls[k] = impl
setattr(torch.Tensor, k, TorchFuncMockNoDispatch(impl))
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
for k in self.TENSOR_FUNCS_NO_DISPATCH:
setattr(torch.Tensor, k, self._pt_impls[k])
return super().__exit__(exc_type, exc_val, exc_tb)
def get_shape(i):
return i.shape
def prod(x):
res = 1
for i in x:
res *= i
return res
class GemmOpComputeFlops:
def _get_mnk(self, inputs: List[Any]) -> Tuple[int, int, int]:
return (prod(inputs[0].shape[:-1]), inputs[1].shape[1], inputs[0].shape[-1])
def __call__(self, inputs: List[Any], outputs: List[Any]) -> float:
return 2 * prod(self._get_mnk(inputs))
def op_suffix(self, inputs: List[Any]) -> str:
m, n, k = self._get_mnk(inputs)
return f"_{m}x{n}x{k}"
class GemmOpComputeFlopsLinear(GemmOpComputeFlops):
def _get_mnk(self, inputs: List[Any]) -> Tuple[int, int, int]:
return (prod(inputs[0].shape[:-1]), inputs[1].shape[0], inputs[0].shape[-1])
class GemmOpComputeFlopsMv(GemmOpComputeFlops):
def _get_mnk(self, inputs: List[Any]) -> Tuple[int, int, int]:
return (prod(inputs[0].shape[:-1]), 1, inputs[0].shape[-1])
class GemmOpComputeFlopsBmm(GemmOpComputeFlops):
def _get_mnk(self, inputs: List[Any]) -> Tuple[int, int, int]:
a, b = inputs[0], inputs[1]
assert a.ndim == 3
assert b.ndim == 3
bs = max(inputs[0].shape[0], inputs[1].shape[0])
return (bs * a.shape[1], b.shape[-1], b.shape[-2])
class GemmOpComputeFlopsAddmm(GemmOpComputeFlops):
def _get_mnk(self, inputs: List[Any]) -> Tuple[int, int, int]:
return super()._get_mnk(inputs[1:])
class GemmOpComputeFlopsAddbmm(GemmOpComputeFlopsBmm):
def _get_mnk(self, inputs: List[Any]) -> Tuple[int, int, int]:
return super()._get_mnk(inputs[1:])
def conv_flop_count(
x_shape: List[int],
w_shape: List[int],
out_shape: List[int],
transposed: bool = False,
) -> float:
"""
Count flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Flops for a transposed convolution are calculated as
flops = (x_shape[2:] * prod(w_shape) * batch_size).
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
transposed (bool): is the convolution transposed
Returns:
int: the number of flops
"""
batch_size = x_shape[0]
conv_shape = (x_shape if transposed else out_shape)[2:]
flop = batch_size * prod(w_shape) * prod(conv_shape)
return flop
def conv_flop(inputs: List[Any], outputs: List[Any]):
"""
Count flops for convolution.
"""
x, w = inputs[:2]
x_shape, w_shape, out_shape = (get_shape(x), get_shape(w), get_shape(outputs[0]))
transposed = inputs[6]
return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed)
def transpose_shape(shape):
return [shape[1], shape[0]] + list(shape[2:])
def conv_backward_flop(inputs: List[Any], outputs: List[Any]):
grad_out_shape, x_shape, w_shape = [get_shape(i) for i in inputs[:3]]
output_mask = inputs[-1]
fwd_transposed = inputs[7]
flop_count = 0.0
if output_mask[0]:
grad_input_shape = get_shape(outputs[0])
flop_count += conv_flop_count(
grad_out_shape, w_shape, grad_input_shape, not fwd_transposed
)
if output_mask[1]:
grad_weight_shape = get_shape(outputs[1])
flop_count += conv_flop_count(
transpose_shape(x_shape), grad_out_shape, grad_weight_shape, fwd_transposed
)
return flop_count
def tensor_storage_size_in_mem(x: torch.Tensor):
total = 1
for dim_sz, stride in zip(x.shape, x.stride()):
if stride >= 1:
total *= dim_sz
return total
def get_size(inputs: List[Any]):
total_bytes = 0
def process(x) -> None:
nonlocal total_bytes
if isinstance(x, torch.Tensor):
total_bytes += tensor_storage_size_in_mem(x) * x.element_size()
tree_map(process, inputs)
return total_bytes
def operation_memory_rw_bytes(inputs: List[Any], outputs: List[Any]):
size_input, size_output = get_size(inputs), get_size(outputs)
return size_input + size_output
def output_read_from_input(inputs: List[Any], outputs: List[Any]):
size_input, size_output = get_size(inputs), get_size(outputs)
return size_output + min(size_input, size_output)
def output_total_size(inputs: List[Any], outputs: List[Any]):
return get_size(outputs)
def input_total_size(inputs: List[Any], outputs: List[Any]):
return get_size(inputs)
def guess_flops_unknown_op(inputs: List[Any], outputs: List[Any]):
# Approximation that isn't too bad
total_elements = 0
def process(x) -> None:
nonlocal total_elements
if isinstance(x, torch.Tensor):
total_elements += x.numel()
tree_map(process, inputs)
tree_map(process, outputs)
return total_elements / 2
def no_flop(inputs: List[Any], outputs: List[Any]):
return 0
def no_io(inputs: List[Any], outputs: List[Any]):
return 0
aten = torch.ops.aten
NO_FLOPS_NO_IO_OPS = [
aten.permute,
aten.view,
aten.view_as,
aten.detach,
aten.t,
aten.transpose,
aten.expand,
aten._unsafe_view,
aten.select,
aten.split,
aten.split_with_sizes,
aten.empty,
aten.empty_strided,
aten.empty_like,
aten.is_same_size,
]
NO_FLOPS_OPS = [
aten._reshape_alias,
aten.reshape,
aten.clone,
aten.cat,
aten.select_backward,
aten.slice,
aten.slice_backward,
aten.ones,
aten.ones_like,
aten.zeros_like,
aten.zero_,
aten.zeros,
aten.masked_fill,
aten.masked_fill_,
]
flop_mapping = {
aten.mv: GemmOpComputeFlopsMv(), # mat-vec
aten.mm: GemmOpComputeFlops(),
aten.matmul: GemmOpComputeFlops(),
aten.addmm: GemmOpComputeFlopsAddmm(),
aten.bmm: GemmOpComputeFlopsBmm(),
aten.addbmm: GemmOpComputeFlopsAddbmm(),
aten.linear: GemmOpComputeFlopsLinear(),
aten.convolution: conv_flop,
aten._convolution: conv_flop,
aten.convolution_backward: conv_backward_flop,
# Operations with 0 flop
**{op: no_flop for op in NO_FLOPS_OPS},
**{op: no_flop for op in NO_FLOPS_NO_IO_OPS},
}
io_mapping = {
aten.clone: output_read_from_input,
aten.cat: output_read_from_input,
aten.slice: output_read_from_input,
aten.ones_like: output_total_size,
aten.zeros_like: output_total_size,
aten.zero_: input_total_size,
**{op: no_io for op in NO_FLOPS_NO_IO_OPS}
# TODO: Check how this is implemented in PT
# aten.slice_backward: no_flop,
# aten.select_backward: no_flop,
}
@dataclass
class _OpInfo:
flop_count: float = 0.0
time_ms: float = 0.0
io_bytes: int = 0
is_exact_flop: bool = True
op_name: str = ""
op_suffix: str = ""
stacktrace: Tuple[str, ...] = field(default_factory=tuple)
ev_start: torch.cuda.Event = field(
default_factory=lambda: torch.cuda.Event(enable_timing=True)
)
ev_end: torch.cuda.Event = field(
default_factory=lambda: torch.cuda.Event(enable_timing=True)
)
# Hardware limits for this operation (inf if unknown)
hardware_tflops_limit: float = math.inf
hardware_membw_limit: float = math.inf
@property
def time_membound_ms(self) -> float:
assert self.time_ms > 0.0
if self.io_bytes == 0:
return 0.0
return min(self.time_ms, 1000 * self.io_bytes / self.hardware_membw_limit)
@property
def time_computebound_ms(self) -> float:
assert self.time_ms > 0.0
tflop = self.flop_count / (1000**4)
if tflop == 0.0:
return 0.0
return min(self.time_ms, 1000 * tflop / self.hardware_tflops_limit)
def finalize(self) -> None:
self.time_ms = self.ev_start.elapsed_time(self.ev_end)
@dataclass
class _OpInfoAggregated:
is_exact_flop: bool = True
total_flop_count: float = 0.0
total_io_bytes: int = 0
total_time_ms: float = 0.0
total_time_membound_ms: float = 0.0
total_time_computebound_ms: float = 0.0
num: int = 0
stacktraces: List[Tuple[str, ...]] = field(default_factory=list)
def add(self, op: _OpInfo) -> None:
self.total_flop_count += op.flop_count
self.total_time_ms += op.time_ms
self.total_io_bytes += op.io_bytes
self.total_time_membound_ms += op.time_membound_ms
self.total_time_computebound_ms += op.time_computebound_ms
self.num += 1
self.is_exact_flop = op.is_exact_flop
self.stacktraces.append(op.stacktrace)
def as_dict(self, **kwargs) -> Dict[str, Any]:
mem_bound = min(1, self.total_time_membound_ms / self.total_time_ms)
tflops = self.total_flop_count / (self.total_time_ms / 1000) / (1000**4)
compute_bound = min(1, self.total_time_computebound_ms / self.total_time_ms)
return {
"is_exact_flop": self.is_exact_flop,
"total_flop_count": self.total_flop_count,
"total_time_ms": self.total_time_ms,
"total_io_bytes": self.total_io_bytes,
"num": self.num,
"Tflops": tflops,
"mem_bound": mem_bound,
"compute_bound": compute_bound,
**kwargs,
}
class DetectSlowOpsProfiler(DispatcherWithoutBrokenFuncs):
"""
Inspired from https://fb.workplace.com/groups/pytorch.dev/permalink/1054537595124720/
"""
def __init__(self, main_profiler: _Profiler) -> None:
self.main_profiler = main_profiler
self.trace: List[_OpInfo] = []
self.temp_disabled = False
def _hardware_tflops_membw_limit(
self, args: Tuple[Any, ...], outputs: Tuple[Any, ...]
) -> Tuple[float, float]:
device = None
dtypes: List[torch.dtype] = []
for a in itertools.chain(outputs, args):
if isinstance(a, torch.Tensor):
if device is None:
device = a.device
dtypes.append(a.dtype)
limits = get_device_limits(device)
dtypes = [dt for dt in dtypes if dt in limits.gemm_tflops]
if not dtypes or device is None:
return (math.inf, math.inf)
dtype = dtypes[0]
if torch.is_autocast_enabled() and dtype is torch.float32:
dtype = torch.get_autocast_gpu_dtype()
return limits.gemm_tflops[dtype], limits.gmem_bandwidth
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
func_packet = func._overloadpacket
if self.temp_disabled or func_packet.__name__ in [
"_record_function_exit",
"_record_function_enter_new",
]:
return func(*args, **kwargs)
op = _OpInfo()
op.ev_start.record()
out = func(*args, **kwargs)
op.ev_end.record()
(
op.hardware_tflops_limit,
op.hardware_membw_limit,
) = self._hardware_tflops_membw_limit(
args, out if isinstance(out, tuple) else (out,)
)
op.op_name = func_packet.__name__
# Prevent functions called by flop counting ops to be recorded
self.temp_disabled = True
flop_count = -1
compute_flops = None
if func_packet in FUNC_TO_XFORMERS_OPERATOR:
flop_count = FUNC_TO_XFORMERS_OPERATOR[func_packet].operator_flop(
*args, **kwargs
)
if flop_count == -1:
compute_flops = flop_mapping.get(func_packet, guess_flops_unknown_op)
flop_count = compute_flops(args, out if isinstance(out, tuple) else (out,))
if isinstance(compute_flops, GemmOpComputeFlops):
op.op_name += compute_flops.op_suffix(args)
compute_io = io_mapping.get(func_packet, operation_memory_rw_bytes)
op.io_bytes = compute_io(args, out if isinstance(out, tuple) else (out,))
self.temp_disabled = False
op.stacktrace = tuple(self.main_profiler.parents)
op.flop_count = flop_count
op.is_exact_flop = compute_flops is not guess_flops_unknown_op
self.trace.append(op)
return out
def __enter__(self):
self.main_profiler._install_hooks()
super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.main_profiler._remove_hooks()
torch.cuda.synchronize() # Wait for the events to be recorded
for op in self.trace:
op.finalize()
self.save_json()
def step(self) -> None:
pass
def save_json(self) -> None:
# Aggregate data at the module + op level
all_paths: Set[Tuple[str, ...]] = set()
per_module_data: Dict[Tuple[str, ...], _OpInfoAggregated] = defaultdict(
_OpInfoAggregated
)
per_op_data: Dict[str, _OpInfoAggregated] = defaultdict(_OpInfoAggregated)
for op in self.trace:
all_paths.add(op.stacktrace)
for op in self.trace:
for i in range(len(op.stacktrace)):
if op.stacktrace[: i + 1] in all_paths:
per_module_data[op.stacktrace[: i + 1]].add(op)
per_op_data[op.op_name].add(op)
# Generate JSON
all_data = []
for stacktrace, agg_info in per_module_data.items():
all_data.append(
agg_info.as_dict(
agg="module", path=stacktrace, name=stacktrace[-1], op=""
)
)
for op_name, agg_info in per_op_data.items():
# Find the most common path
paths_count: Dict[Tuple[str, ...], int] = defaultdict(int)
agg_info.stacktraces.sort() # In case of a draw, let's always return the same
for p in agg_info.stacktraces:
paths_count[p] += 1
maxp = agg_info.stacktraces[0]
for p, count in paths_count.items():
if count > paths_count[maxp]:
maxp = p
all_data.append(
agg_info.as_dict(
agg="opname",
path=f"{'.'.join(maxp)} (x{paths_count[maxp]})",
name="",
op=op_name,
)
)
filename = os.path.abspath(
os.path.join(
self.main_profiler.output_dir,
f"{self.main_profiler.worker_name}_ops.json",
)
)
self.main_profiler.summary.append(("OpsSummary", filename))
with open(filename, "w+") as f:
json.dump(all_data, f)
| EXA-1-master | exa/libraries/xformers/xformers/profiler/slow_ops_profiler.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import queue
import socket
import weakref
from dataclasses import dataclass
from typing import Any, List, Optional, Sequence, Tuple
import torch.cuda.memory
import torch.cuda.nvtx
import torch.nn as nn
import torch.profiler
import torch.utils.hooks
logger = logging.getLogger(__name__)
def _normalize_tuple(x):
if not isinstance(x, tuple):
return (x,)
return x
class NsightProfiler:
"""Profiler that triggers start of NSight profiler.
NOTE: you need to ensure that the script running this code actually is running with
``nsys profile`` and also has a flag ``--capture-range=cudaProfilerApi`` so the
capturing is performed by this profiler during certain steps.
"""
def __init__(self, main_profiler: "_Profiler") -> None:
self.main_profiler = main_profiler
# TODO figure out if there is a way to know if nsys is launched at this point
def __enter__(self):
self.main_profiler._install_hooks()
torch.cuda.profiler.start()
def __exit__(self, exc_type, exc_val, exc_tb):
torch.cuda.profiler.stop()
self.main_profiler._remove_hooks()
def step(self) -> None:
pass
class PyTorchProfiler:
"""Profiler which relies on native Pytorch profiling. Current setting of the profiler
captures traces, memory footprint and other info that could be read via TensorBoard.
Currently implemented as a infinite-cycle profiler with a few warmup steps following
a few active steps.
"""
WARMUP = 5
ACTIVE_STEPS = 2
MIN_STEPS = WARMUP + 1
def __init__(self, main_profiler: "_Profiler") -> None:
self.main_profiler = main_profiler
tracing_schedule = torch.profiler.schedule(
skip_first=0,
wait=0,
warmup=self.WARMUP,
active=self.ACTIVE_STEPS,
)
trace_handler = torch.profiler.tensorboard_trace_handler(
dir_name=main_profiler.output_dir, use_gzip=True
)
self.hta = torch.profiler.profile(
schedule=tracing_schedule,
on_trace_ready=trace_handler,
profile_memory=True,
record_shapes=True,
with_stack=True,
)
self.done_steps = 0
def __enter__(self):
self.hta.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.hta.__exit__(exc_type, exc_val, exc_tb)
if self.done_steps < PyTorchProfiler.MIN_STEPS:
logger.warning(
f"You completed less steps than necessary to complete at least one"
f" active step for torch.profiler.profile to log anything. Steps"
f" completed: {self.done_steps}, minimum steps to capture at least"
f" one step: {PyTorchProfiler.MIN_STEPS}"
)
def step(self) -> None:
self.hta.step()
self.done_steps += 1
class MemSnapshotsProfiler:
"""Profiler that captures memory traces for allocation and deallocation of memory for
tensors.
"""
def __init__(self, main_profiler: "_Profiler") -> None:
self.main_profiler = main_profiler
self.enabled = False
@property
def _has_trace_plot(self) -> bool:
return hasattr(torch.cuda._memory_viz, "trace_plot")
def __enter__(self):
if not self._has_trace_plot:
return
self.enabled = True
# TODO: This does not show the previous memory allocations
# We could at least have a placeholder with how much
# memory was allocated before
torch.cuda.memory._record_memory_history(
True,
# keep 100,000 alloc/free events from before the snapshot
trace_alloc_max_entries=100000,
# record stack information for the trace events
trace_alloc_record_context=True,
)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._has_trace_plot:
self.main_profiler.summary.append(
("MemTrace", "(not available with your Pytorch version)")
)
return
assert self.enabled
snapshot = torch.cuda.memory._snapshot()
torch.cuda.memory._record_memory_history(False)
# No data was recorded - avoids a `ValueError` in `trace_plot`
if all(len(t) == 0 for t in snapshot["device_traces"]):
self.main_profiler.summary.append(("MemTrace", "(no allocation recorded)"))
return
# Dump to disk
filename = os.path.abspath(
os.path.join(
self.main_profiler.output_dir,
f"{self.main_profiler.worker_name}_memory_trace_plot.html",
)
)
self.main_profiler.summary.append(("MemTrace", filename))
with open(filename, "w+") as fd:
fd.write(
torch.cuda._memory_viz.trace_plot(
snapshot, device=None, plot_segments=False
)
)
def step(self) -> None:
pass
@dataclass
class _ProfilerState:
cls: Any
iter_begin: int
iter_end: int
object: Any = None
class _Profiler:
_CURRENT_PROFILER = None
def __init__(
self,
output_dir: str,
schedule: Sequence[Tuple[Any, int, int]],
module: Optional[nn.Module],
) -> None:
self.check_schedule(schedule)
self.done_steps = 0
self.output_dir = output_dir
self.worker_name = "{}_{}".format(socket.gethostname(), str(os.getpid()))
os.makedirs(output_dir, exist_ok=True)
self.module = weakref.ref(module if module is not None else nn.Module())
self.parents = ["Global"]
self.hooks: List[torch.utils.hooks.RemovableHandle] = []
self.hooks_refcount = 0
self.profilers: List[_ProfilerState] = sorted(
[_ProfilerState(cls, begin, end) for cls, begin, end in schedule],
key=lambda x: x.iter_begin,
)
self.last_step = self.profilers[-1].iter_end if self.profilers else 0
self.summary: List[Tuple[str, str]] = []
def check_schedule(self, schedule: Sequence[Tuple[Any, int, int]]) -> None:
if len(schedule) == 0:
logger.warning(
"You specified empty schedule for profiling. No data will be captured."
)
pq: Any = queue.PriorityQueue()
for cls, begin, end in schedule:
if issubclass(cls, PyTorchProfiler):
assert end - begin > PyTorchProfiler.MIN_STEPS, (
f"PyTorch profiler must have minimum {PyTorchProfiler.MIN_STEPS}"
+ " steps to capture at least one active step."
)
assert (
begin >= 0
), f"Begin step of profiler must be non-negative, found: {begin}"
assert end > 0, f"End step of profiler must be positive, found: {end}"
assert (
begin < end
), f"Start must be before the end, found: begin={begin} and end={end}"
pq.put((begin, end))
prev_end = -1
for begin, end in pq.queue:
assert begin >= prev_end, (
"There is some overlapping in profiler scheduling. Please do not"
+ " overlap profilers by step as they may affect each other. Schedule:"
+ f" {schedule}"
)
prev_end = end
def update_profilers_on_step(self) -> None:
for p in self.profilers:
if p.iter_begin <= self.done_steps and self.done_steps < p.iter_end:
if p.object is None:
o = p.cls(self)
logging.info(f"Starting {p.cls.__name__} profiler...")
o.__enter__()
p.object = o
else:
p.object.step()
else:
if p.object is not None:
o = p.object
p.object = None
logging.info(f"Shutting down {p.cls.__name__} profiler...")
o.__exit__(None, None, None)
def _install_hooks(self) -> None:
self.hooks_refcount += 1
# Already installed
if self.hooks:
return
module = self.module()
if module is None:
return
for name, sub_mod in module.named_modules():
if name == "":
continue
name = name.split(".")[-1]
self.hooks += [
sub_mod.register_forward_pre_hook(self._enter_module_hook(name)),
sub_mod.register_forward_hook(self._exit_module_hook(name)),
]
def _remove_hooks(self) -> None:
self.hooks_refcount -= 1
if self.hooks_refcount == 0:
for h in self.hooks:
h.remove()
def _enter_module_hook(self, name):
class PopState(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
if len(args) == 1:
return args[0]
return args
@staticmethod
def backward(ctx, *grad_outs):
self._exit_module(name)
return grad_outs
def f(module, inputs):
self._enter_module(name)
inputs = _normalize_tuple(inputs)
out = PopState.apply(*inputs)
return out
return f
def _exit_module_hook(self, name):
class PushState(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
if len(args) == 1:
return args[0]
return args
@staticmethod
def backward(ctx, *grad_outs):
self._enter_module(name)
return grad_outs
def f(module, inputs, outputs):
self._exit_module(name)
outputs = _normalize_tuple(outputs)
return PushState.apply(*outputs)
return f
def _enter_module(self, name) -> None:
self.parents.append(name)
torch.cuda.nvtx.range_push(name)
def _exit_module(self, name) -> None:
torch.cuda.nvtx.range_pop()
assert self.parents[-1] == name
self.parents.pop()
def start(self):
self.__enter__()
def stop(self, exc_type=None, exc_val=None, exc_tb=None):
self.__exit__(exc_type, exc_val, exc_tb)
def __enter__(self):
if _Profiler._CURRENT_PROFILER is not None:
raise ValueError("Only one xformers profiler can be active at a time")
_Profiler._CURRENT_PROFILER = self
self.update_profilers_on_step()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_Profiler._CURRENT_PROFILER = None
for p in self.profilers:
if p.object is not None:
p.object.__exit__(exc_type, exc_val, exc_tb)
def step(self) -> None:
"""Signals the profiler that the next profiling step has started."""
self.done_steps += 1
if self.done_steps <= self.last_step:
self.parents = ["Global"]
self.update_profilers_on_step()
if self.done_steps == self.last_step:
logger.info("xFormers profiler done. %s", self.format_summary())
def format_summary(self) -> str:
if len(self.summary) == 0:
return ""
pad_titles = max(len(title) for title, value in self.summary)
return "summary:\n" + "\n".join(
[f" {title.ljust(pad_titles)}: {value}" for title, value in self.summary]
)
| EXA-1-master | exa/libraries/xformers/xformers/profiler/profiler.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .utils import _csr_to_coo, _transpose_with_info
def _should_use_coo(a, sparsity):
if not a.is_cuda:
return False
B, M, K = a.shape
# amortize overhead of converting from csr to coo
if B < 32 and M < 4096:
return False
if sparsity > 0.995:
return False
if sparsity < 0.9:
return False
if K > 64:
return False
# let's be overly cautious here for now
return sparsity > 0.97
def _should_use_csr_ge(a, sparsity):
if not a.is_cuda:
return False
return sparsity > 0.99
def _sddmm_func(a, b, row_indices, row_offsets, column_indices):
sparsity = 1 - column_indices.shape[0] / (a.shape[1] * b.shape[1])
if _should_use_coo(a, sparsity):
m = a.shape[-2]
n = b.shape[-2]
# converting from csr to coo has a constant overhead of ~150us
# so only dispatch to it for reasonably large problem sizes
ro, ci = _csr_to_coo(m, n, row_offsets, column_indices)
return torch.ops.xformers.coo_sddmm(a, b, row_indices, ro, ci)
elif _should_use_csr_ge(a, sparsity):
return torch.ops.xformers.csr_sddmm(
a, b, row_indices, row_offsets, column_indices
)
return torch.ops.xformers.sddmm_sputnik(
a, b, row_indices, row_offsets, column_indices
)
class _SparseSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, m, n, row_indices, values, row_offsets, column_indices):
out = torch.ops.xformers.sparse_softmax_sputnik(
m, n, row_indices, values, row_offsets, column_indices
)
# note: save out and not values, as an optimization step
ctx.save_for_backward(row_indices, out, row_offsets, column_indices)
ctx.size = (m, n)
return out
@staticmethod
def backward(ctx, grad):
row_indices, out, row_offsets, column_indices = ctx.saved_tensors
m, n = ctx.size
# gradients w.r.t. values
grad = grad.contiguous()
ga = torch.ops.xformers.sparse_softmax_backward_sputnik(
m, n, row_indices, out, grad, row_offsets, column_indices
)
return None, None, None, ga, None, None
class _sddmm(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b, row_indices, row_offsets, column_indices, _transp_info):
out = _sddmm_func(a, b, row_indices, row_offsets, column_indices)
ctx.save_for_backward(
a, b, row_indices, row_offsets, column_indices, *_transp_info
)
return out
@staticmethod
def backward(ctx, grad):
(
a,
b,
row_indices,
row_offsets,
column_indices,
*_transp_info,
) = ctx.saved_tensors
m, n = a.shape[1], b.shape[1]
# gradients w.r.t. values
grad = grad.contiguous()
a = a.contiguous()
b = b.contiguous()
a_grad = torch.ops.xformers.spmm_sputnik(
b, row_indices, grad, row_offsets, column_indices, m
)
(
row_indices_t,
grad_t,
row_offsets_t,
column_indices_t,
) = _transpose_with_info(grad, _transp_info)
b_grad = torch.ops.xformers.spmm_sputnik(
a, row_indices_t, grad_t, row_offsets_t, column_indices_t, n
)
return a_grad, b_grad, None, None, None, None
class _spmm(torch.autograd.Function):
@staticmethod
def forward(
ctx, b, row_indices, values, row_offsets, column_indices, m, _transp_info
):
b = b.contiguous()
out = torch.ops.xformers.spmm_sputnik(
b, row_indices, values, row_offsets, column_indices, m
)
ctx.save_for_backward(
b, row_indices, values, row_offsets, column_indices, *_transp_info
)
return out
@staticmethod
def backward(ctx, grad):
(
b,
row_indices,
values,
row_offsets,
column_indices,
*_transp_info,
) = ctx.saved_tensors
k = b.shape[1]
# gradients w.r.t. values
grad = grad.contiguous()
grad_sparse = _sddmm_func(grad, b, row_indices, row_offsets, column_indices)
(
row_indices_t,
values_t,
row_offsets_t,
column_indices_t,
) = _transpose_with_info(values, _transp_info)
grad_dense = torch.ops.xformers.spmm_sputnik(
grad, row_indices_t, values_t, row_offsets_t, column_indices_t, k
)
return grad_dense, None, grad_sparse, None, None, None, None
| EXA-1-master | exa/libraries/xformers/xformers/sparse/_csr_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .blocksparse_tensor import BlockSparseTensor # noqa: F401
from .csr_tensor import SparseCSRTensor # noqa: F401
| EXA-1-master | exa/libraries/xformers/xformers/sparse/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
def _coo_to_csr(m, n, row_indices, column_indices):
# assumes coalesced coo
row_offsets = row_indices.bincount(minlength=n).cumsum(0, dtype=row_indices.dtype)
row_offsets = torch.nn.functional.pad(row_offsets, (1, 0))
return row_offsets, column_indices
def _csr_to_coo(m, n, row_offsets, column_indices):
# convert from compressed rows to uncompressed
indices = torch.arange(m, dtype=row_offsets.dtype, device=row_offsets.device)
row_sizes = torch.diff(row_offsets)
row_coo = torch.repeat_interleave(indices, row_sizes.long())
return row_coo, column_indices
def _diffsort(a):
return torch.argsort(torch.diff(a), dim=0, descending=True)
def _get_transpose_info(m, n, row_indices, row_offsets, column_indices):
# strategy:
# - uncompress the rows to have data in COO format
# - get permutation for stable sort of the columns to get the rows for the transposed matrix
# - compress the new rows and return the permutation to be applied on the values
# convert from compressed rows to uncompressed
row_coo, _ = _csr_to_coo(m, n, row_offsets, column_indices)
# get the permutation for the stable sort
row_offsets_t, perm = column_indices.sort(dim=0, stable=True)
column_indices_t = row_coo[perm]
row_offsets_t, _ = _coo_to_csr(m, n, row_offsets_t, column_indices)
row_indices_t = _diffsort(row_offsets_t).int()
return row_indices_t, row_offsets_t, column_indices_t, perm
def _transpose_with_info(values, _transpose_info):
row_indices_t, row_offsets_t, column_indices_t, perm = _transpose_info
values_t = values[:, perm]
return row_indices_t, values_t, row_offsets_t, column_indices_t
def _transpose(m, n, row_indices, values, row_offsets, column_indices):
_transpose_info = _get_transpose_info(
m, n, row_indices, row_offsets, column_indices
)
return _transpose_with_info(values, _transpose_info)
def _nonzero_mask_to_sparse_csr_indices(mask, device):
"""Converts dense 2d matrix to a csr sparse matrix."""
assert len(mask.shape) == 2
index_dtype = torch.int32
# Calculate the offset of each row.
row_offsets = mask.sum(dim=-1, dtype=index_dtype).cumsum(dim=-1, dtype=index_dtype)
row_offsets = torch.nn.functional.pad(row_offsets, (1, 0))
# Create the row indices and sort them.
row_indices = _diffsort(row_offsets).to(index_dtype)
# Extract the column indices for the nonzero values.
column_indices = torch.where(mask)[1].to(index_dtype).contiguous()
row_indices = row_indices.to(device)
row_offsets = row_offsets.to(device)
column_indices = column_indices.to(device)
return row_indices, row_offsets, column_indices
def _dense_to_sparse(matrix, device):
"""Converts dense 2d matrix to a csr sparse matrix."""
assert len(matrix.shape) == 2
value_dtype = torch.float32
# Extract the nonzero values.
mask = matrix != 0
values = matrix[mask].to(dtype=value_dtype, device=device)
row_indices, row_offsets, column_indices = _nonzero_mask_to_sparse_csr_indices(
mask, device
)
return values, row_indices, row_offsets, column_indices
def _round_nnz(mask, divisible_by=4):
nonzero = torch.where(mask)
nnz = nonzero[0].shape[0]
nonzero = tuple(n[: (nnz - nnz % divisible_by)] for n in nonzero)
nm = torch.zeros_like(mask)
nm[nonzero] = True
return nm
def _dense3d_to_sparse(matrix, device):
assert len(matrix.shape) == 3
mask = matrix != 0
if not torch.all(mask == mask[0]):
raise ValueError("Expected the same sparsity pattern over the batch dimension")
# for now, our kernels assume that we have the number of
# nnz to be divisible by 4
mask = _round_nnz(mask[0], divisible_by=4)
mask = mask[None].expand(matrix.shape)
values = matrix[mask].reshape(matrix.shape[0], -1).to(device)
row_indices, row_offsets, column_indices = _nonzero_mask_to_sparse_csr_indices(
mask[0], device
)
return values, row_indices, row_offsets, column_indices
| EXA-1-master | exa/libraries/xformers/xformers/sparse/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from xformers.ops import masked_matmul
logger = logging.getLogger("xformers")
try:
from triton.ops.blocksparse import matmul as blocksparse_matmul
from triton.ops.blocksparse import softmax as blocksparse_softmax
except ImportError as e:
logger.warning(
"Triton is not available, some optimizations will not be enabled.\n"
+ f"This is just a warning: {e}"
)
blocksparse_matmul = None
blocksparse_softmax = None
def _can_use_triton(a):
if a.device.type == "cpu":
return False
if blocksparse_matmul is None:
return False
return True
def _spmm(b, layout, values):
N, nnz, _, block_size = values.shape
br = b.reshape(
b.shape[0], b.shape[1], b.shape[2] // block_size, block_size, b.shape[3]
)
# perform matmul on blocks
h, r, c = layout.nonzero(as_tuple=True)
temp = values @ br[:, h, c, :]
linear_idx = h * (b.shape[2] // block_size) + r
out = torch.zeros(
N,
b.shape[1] * layout.shape[-2],
block_size,
b.shape[3],
dtype=b.dtype,
device=b.device,
)
# now aggregate the results of the different blocks
out.index_add_(1, linear_idx.to(b.device), temp)
out = out.reshape(N, b.shape[1], -1, b.shape[3])
return out
def _softmax(layout, values):
h, r, c = layout.nonzero(as_tuple=True)
norms = torch.logsumexp(values, dim=-1, keepdim=True)
linear_idx = h * layout.shape[1] + r
out_t = torch.zeros(
norms.shape[0],
layout.shape[0] * layout.shape[1],
norms.shape[2],
norms.shape[3],
dtype=norms.dtype,
device=norms.device,
)
max_val = norms.max()
out_t.index_add_(
1, linear_idx.to(values.device), (norms - max_val).exp()
).clamp_min_(1e-24).log_().add_(max_val)
out = torch.exp(values - out_t[:, linear_idx])
return out
def _sddmm(a, b, layout):
block_size = a.shape[-2] // layout.shape[-2]
a = a.reshape(
a.shape[0], a.shape[1], a.shape[2] // block_size, block_size, a.shape[3]
)
b = b.reshape(
b.shape[0], b.shape[1], b.shape[2] // block_size, block_size, b.shape[3]
)
h, r, c = layout.nonzero(as_tuple=True)
out = torch.einsum("nhik,nhjk->nhij", a[:, h, r, :, :], b[:, h, c, :, :])
return out
class BlockSparseTensor(torch.Tensor):
@staticmethod
def __new__(cls, values, layout):
kwargs = {}
kwargs["device"] = values.device
kwargs["dtype"] = values.dtype
kwargs["layout"] = values.layout
kwargs["requires_grad"] = values.requires_grad
assert values.ndim == 4
B, _, block_size, _ = values.shape
C, h, w = layout.shape
# TODO validate shape of layout vs values
shape = (B, C, block_size * h, block_size * w)
return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
def __init__(self, values, layout):
assert values.shape[-2] == values.shape[-1]
assert (
values.device == layout.device
), "Both values and layout need to reside on the same device"
block_size = values.shape[-1]
# TODO: make this check conditioned on the use of Triton
assert block_size >= 16, "Minimum block size is 16, for now at least"
# Pure blocksparse data
self.__values = values
self.__layout = layout
# blocksparse operators for triton
if blocksparse_matmul:
self._initialize_triton_ops()
else:
self.__sparse_dot_sdd = None
self.__sparse_dot_dsd = None
self.__sparse_softmax = None
def _initialize_triton_ops(self):
block_size = self.__values.shape[-1]
self.__sparse_dot_sdd = blocksparse_matmul(
self.__layout,
block_size,
"sdd",
trans_a=False,
trans_b=True,
device=self.__layout.device,
)
self.__sparse_dot_dsd = blocksparse_matmul(
self.__layout,
block_size,
"dsd",
trans_a=False,
trans_b=False,
device=self.__layout.device,
)
self.__sparse_softmax = blocksparse_softmax(
self.__layout, block_size, device=self.__layout.device
)
def __repr__(self):
return f"block_sparse_tensor(shape={self.shape}, values={self.__values})"
def values(self):
return self.__values
@classmethod
def _raw_wrap(cls, values, layout, sparse_dot_sdd, sparse_dot_dsd, sparse_softmax):
matrix = cls.__new__(cls, values, layout)
matrix.__values = values
matrix.__layout = layout
matrix.__sparse_dot_sdd = sparse_dot_sdd
matrix.__sparse_dot_dsd = sparse_dot_dsd
matrix.__sparse_softmax = sparse_softmax
return matrix
@classmethod
def _wrap(cls, values, bmat):
matrix = cls.__new__(cls, values, bmat.__layout)
matrix.__values = values
matrix.__layout = bmat.__layout
matrix.__sparse_dot_sdd = bmat.__sparse_dot_sdd
matrix.__sparse_dot_dsd = bmat.__sparse_dot_dsd
matrix.__sparse_softmax = bmat.__sparse_softmax
return matrix
@classmethod
def _bmm(cls, arg0, arg1):
if not (isinstance(arg0, cls) and type(arg1) == torch.Tensor):
return NotImplemented
if _can_use_triton(arg1):
res = arg0.__sparse_dot_dsd(arg0.__values, arg1)
else:
res = _spmm(arg1, arg0.__layout, arg0.__values)
return res
@classmethod
def _masked_matmul(cls, a, b, mask):
if not (type(a) == torch.Tensor and type(b) == torch.Tensor):
return NotImplemented
b = b.transpose(-2, -1)
assert b.is_contiguous()
if _can_use_triton(a):
res = mask.__sparse_dot_sdd(a, b)
else:
res = _sddmm(a, b, mask.__layout)
return cls._wrap(res, mask)
@classmethod
def _softmax(cls, arg0, dim):
if not (dim == -1 or dim == 2):
return NotImplemented
if _can_use_triton(arg0):
res = arg0.__sparse_softmax(arg0.__values)
else:
res = _softmax(arg0.__layout, arg0.__values)
return cls._wrap(res, arg0)
@classmethod
def _to(cls, arg0, device):
if isinstance(device, str):
device = torch.device(device)
assert isinstance(device, torch.device)
return cls(
arg0.__values.to(device=device),
arg0.__layout,
)
@classmethod
def _copy(cls, arg0, arg1):
if not (isinstance(arg0, cls) and isinstance(arg1, cls)):
return NotImplemented
assert arg0.shape == arg1.shape
av0, av1 = arg0.__values, arg1.__values
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__layout, arg1.__layout
av0.resize_as_(av1).copy_(av1)
out = cls(arg0.__values, arg0.__layout)
arg0.__sparse_dot_sdd = out.__sparse_dot_sdd
arg0.__sparse_dot_dsd = out.__sparse_dot_dsd
arg0.__sparse_softmax = out.__sparse_softmax
return arg0
@classmethod
def _equal(cls, arg0, arg1):
if not (isinstance(arg0, cls) and isinstance(arg1, cls)):
return NotImplemented
if arg0.shape != arg1.shape:
return False
if not torch.equal(arg0.__values, arg1.__values):
return False
if not torch.equal(arg0.__layout, arg1.__layout):
return False
return True
@classmethod
def _to_dense(cls, arg0):
# out = torch.zeros(arg0.shape, dtype=arg0.dtype, device=arg0.device, requires_grad=arg0.requires_grad)
out = torch.zeros(arg0.shape, dtype=arg0.dtype, device=arg0.device)
values = arg0.__values
layout = arg0.__layout
block_size = values.shape[-1]
blocks_i = layout.shape[-2]
blocks_j = layout.shape[-1]
out_r = out.reshape(
arg0.shape[0], arg0.shape[1], blocks_i, block_size, blocks_j, block_size
)
for idx, (h, i, j) in enumerate(zip(*layout.nonzero(as_tuple=True))):
out_r[:, h, i, :, j, :] = values[:, idx, :, :]
return out
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func in [
torch.Tensor.bmm,
torch.bmm,
torch.Tensor.__matmul__,
torch.matmul,
torch.Tensor.matmul,
]:
assert len(args) == 2
return cls._bmm(args[0], args[1])
if func in [torch.Tensor.softmax, torch.nn.functional.softmax, torch.softmax]:
return cls._softmax(args[0], kwargs["dim"])
if func == masked_matmul:
assert len(args) == 3
return cls._masked_matmul(args[0], args[1], args[2])
if func in [torch.nn.functional.dropout, torch.dropout, torch.dropout_]:
x = args[0]
values = x.__values.clone()
values = func(values, *args[1:], **kwargs)
return cls._wrap(values, x)
if func == torch.Tensor.to:
# print(args, kwargs)
assert len(args) >= 2
return cls._to(args[0], args[1])
# return cls._to(args[0], kwargs["device"])
if func in [torch.Tensor.copy_]:
assert len(args) == 2
return cls._copy(args[0], args[1])
if func in [torch.Tensor.equal, torch.equal]:
assert len(args) == 2
return cls._equal(args[0], args[1])
if func == torch.Tensor.to_dense:
assert len(args) == 1
return cls._to_dense(args[0])
if func == torch.Tensor.detach:
x = args[0]
values = x.__values.clone()
values = func(values, *args[1:], **kwargs)
return cls._wrap(values, x)
if func == torch.Tensor.__deepcopy__:
x = args[0]
memo = args[1]
return cls._raw_wrap(
x.__values.__deepcopy__(memo),
x.__layout.__deepcopy__(memo),
# x.__sparse_dot_sdd.__deepcopy__(memo),
# x.__sparse_dot_dsd.__deepcopy__(memo),
# x.__sparse_softmax.__deepcopy__(memo),
x.__sparse_dot_sdd,
x.__sparse_dot_dsd,
x.__sparse_softmax,
)
if func in [torch.Tensor.grad.__get__, torch.Tensor._grad.__get__]:
assert len(args) == 1
assert len(kwargs) == 0
x = args[0]
return cls._wrap(x.__values.grad, x)
if func == torch.Tensor.requires_grad_:
func(args[0].__values)
with torch._C.DisableTorchFunction():
ret = func(*args, **kwargs)
# TODO: check this
if func in torch.overrides.get_default_nowrap_functions():
return ret
return torch._tensor._convert(ret, cls)
return NotImplemented
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
| EXA-1-master | exa/libraries/xformers/xformers/sparse/blocksparse_tensor.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.ops import masked_matmul
from xformers.sparse import _csr_ops
from xformers.sparse.utils import (
_csr_to_coo,
_dense3d_to_sparse,
_diffsort,
_get_transpose_info,
_transpose_with_info,
)
class SparseCSRTensor(torch.Tensor):
@staticmethod
def __new__(cls, row_offsets, column_indices, values, shape):
kwargs = {}
kwargs["device"] = values.device
kwargs["dtype"] = values.dtype
kwargs["layout"] = values.layout
kwargs["requires_grad"] = values.requires_grad
assert len(shape) == 3
assert torch.__version__ > (1, 10), "SparseCSRTensor requires PyTorch 1.11+"
return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
def __init__(self, row_offsets, column_indices, values, shape):
assert row_offsets.ndim == 1
assert column_indices.ndim == 1
assert values.ndim == 2
self.__row_offsets = row_offsets.contiguous()
self.__row_indices = _diffsort(row_offsets).to(row_offsets.dtype)
self.__column_indices = column_indices.contiguous()
self.__values = values.contiguous()
self.__transp_info = _get_transpose_info(
self.shape[1],
self.shape[2],
self.__row_indices,
self.__row_offsets,
self.__column_indices,
)
def __repr__(self):
return f"sparse_csr_tensor(shape={self.shape}, values={self.__values})"
@classmethod
def from_dense(cls, matrix):
values, row_indices, row_offsets, column_indices = _dense3d_to_sparse(
matrix, matrix.device
)
return cls(row_offsets, column_indices, values, matrix.shape)
@classmethod
def from_sparse_coo(cls, arg0):
"""
assert arg0.is_sparse
x = arg0.coalesce()
rows, cols = x.indices().unbind(0)
vals = x.values()
_coo_to_csr()
"""
pass
@classmethod
def _wrap(
cls, shape, values, row_indices, row_offsets, column_indices, _transp_info
):
matrix = cls.__new__(cls, row_offsets, column_indices, values, shape)
matrix.__values = values
matrix.__row_indices = row_indices
matrix.__row_offsets = row_offsets
matrix.__column_indices = column_indices
matrix.__transp_info = _transp_info
return matrix
def values(self):
return self.__values
@property
def _csr_row_indices(self):
return self.__row_indices
@property
def _csr_row_offsets(self):
return self.__row_offsets
@property
def _csr_column_indices(self):
return self.__column_indices
@property
def _csr_transp_info(self):
return self.__transp_info
@classmethod
def _bmm(cls, arg0, arg1):
if not (isinstance(arg0, cls) and type(arg1) == torch.Tensor):
return NotImplemented
assert arg0.ndim == 3
assert arg1.ndim == 3
self = arg0
b = arg1
_, m, n = self.shape
row_indices = self.__row_indices
values = self.__values
row_offsets = self.__row_offsets
column_indices = self.__column_indices
out = _csr_ops._spmm.apply(
b, row_indices, values, row_offsets, column_indices, m, self.__transp_info
)
return out
@classmethod
def _softmax(cls, arg0, dim):
if not (dim == -1 or dim == 2):
return NotImplemented
self = arg0
_, m, n = self.shape
row_indices = self.__row_indices
values = self.__values
row_offsets = self.__row_offsets
column_indices = self.__column_indices
out = _csr_ops._SparseSoftmax.apply(
m, n, row_indices, values, row_offsets, column_indices
)
return cls._wrap(
self.shape,
out,
row_indices,
row_offsets,
column_indices,
self.__transp_info,
)
@classmethod
def _transpose(cls, arg0, dim0, dim1):
# TODO: check if need to return this or not
if not (dim0 == 1 or dim0 == -2):
return NotImplemented
if not (dim1 == 2 or dim1 == -1):
return NotImplemented
B, m, n = arg0.shape
values = arg0.__values
(
output_row_indices,
output_values,
output_row_offsets,
output_column_indices,
) = _transpose_with_info(values, arg0.__transp_info)
new_transp_info = _get_transpose_info(
n, m, output_row_indices, output_row_offsets, output_column_indices
)
return cls._wrap(
(B, n, m),
output_values,
output_row_indices,
output_row_offsets,
output_column_indices,
new_transp_info,
)
@classmethod
def _masked_matmul(cls, a, b, mask):
if not (type(a) == torch.Tensor and type(b) == torch.Tensor):
return NotImplemented
assert mask.shape[1] == a.shape[1]
assert mask.shape[2] == b.shape[2]
row_indices = mask.__row_indices
row_offsets = mask.__row_offsets
column_indices = mask.__column_indices
a = a.contiguous()
out = _csr_ops._sddmm.apply(
a,
b.transpose(-2, -1).contiguous(),
row_indices,
row_offsets,
column_indices,
mask.__transp_info,
)
# TODO add bias here
return cls._wrap(
mask.shape,
out,
row_indices,
row_offsets,
column_indices,
mask.__transp_info,
)
@classmethod
def _to(cls, arg0, device):
if isinstance(device, str):
device = torch.device(device)
assert isinstance(device, torch.device)
return cls._wrap(
arg0.shape,
arg0.__values.to(device=device),
arg0.__row_indices.to(device=device),
arg0.__row_offsets.to(device=device),
arg0.__column_indices.to(device=device),
tuple(t.to(device=device) for t in arg0.__transp_info),
)
@classmethod
def _copy(cls, arg0, arg1):
if not (isinstance(arg0, cls) and isinstance(arg1, cls)):
return NotImplemented
assert arg0.shape == arg1.shape
av0, av1 = arg0.__values, arg1.__values
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__row_indices, arg1.__row_indices
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__row_offsets, arg1.__row_offsets
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__column_indices, arg1.__column_indices
av0.resize_as_(av1).copy_(av1)
for v0, v1 in zip(arg0.__transp_info, arg1.__transp_info):
v0.resize_as_(v1).copy_(v1)
return arg0
@classmethod
def _equal(cls, arg0, arg1):
if not (isinstance(arg0, cls) and isinstance(arg1, cls)):
return NotImplemented
if arg0.shape != arg1.shape:
return False
if not torch.equal(arg0.__values, arg1.__values):
return False
if not torch.equal(arg0.__row_offsets, arg1.__row_offsets):
return False
if not torch.equal(arg0.__column_indices, arg1.__column_indices):
return False
return True
@classmethod
def _to_dense(cls, arg0):
_, m, n = arg0.shape
shape = arg0.shape
matrix = torch.zeros(shape, dtype=arg0.dtype, device=arg0.device)
row_offsets = arg0.__row_offsets.long()
column_indices = arg0.__column_indices.long()
row_coo, _ = _csr_to_coo(m, n, row_offsets, column_indices)
b_idxs = torch.arange(len(arg0.__values), device=arg0.device)[:, None]
matrix[b_idxs, row_coo, column_indices] = arg0.__values
return matrix
@classmethod
def _binary_op(cls, func, arg0, arg1):
if not (
isinstance(arg0, (cls, int, float)) and isinstance(arg1, (cls, int, float))
):
return NotImplemented
v0, v1 = arg0, arg1
if isinstance(arg0, cls):
v0 = arg0.__values
if isinstance(arg1, cls):
v1 = arg1.__values
# assert arg0.shape == arg1.shape
if isinstance(arg0, cls) and isinstance(arg1, cls):
msg = f"arg0 and arg1 need to have the same sparsity pattern in {func} (for now)"
if not arg0.__row_offsets.shape == arg1.__row_offsets.shape:
raise NotImplementedError(msg)
if not arg0.__column_indices.shape == arg1.__column_indices.shape:
raise NotImplementedError(msg)
if not arg0.__values.shape == arg1.__values.shape:
raise NotImplementedError(msg)
# TODO this is not always true, but is a fast approximation for now
if arg0.__row_offsets is not arg1.__row_offsets:
raise NotImplementedError(msg)
if arg0.__column_indices is not arg1.__column_indices:
raise NotImplementedError(msg)
out = func(v0, v1)
return cls._wrap(
arg0.shape,
out,
arg0.__row_indices,
arg0.__row_offsets,
arg0.__column_indices,
arg0.__transp_info,
)
@classmethod
def _binary_op_slow(cls, func, arg0, arg1):
# assert arg0.shape == arg1.shape
v0, v1 = arg0, arg1
if isinstance(arg0, cls):
v0 = arg0.to_dense()
if isinstance(arg1, cls):
v1 = arg1.to_dense()
out = func(v0, v1)
return cls.from_dense(out)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func in [
torch.Tensor.bmm,
torch.bmm,
torch.Tensor.__matmul__,
torch.matmul,
torch.Tensor.matmul,
]:
assert len(args) == 2
return cls._bmm(args[0], args[1])
if func in [torch.Tensor.softmax, torch.nn.functional.softmax, torch.softmax]:
return cls._softmax(args[0], kwargs["dim"])
if func in [torch.Tensor.transpose, torch.transpose]:
assert len(kwargs) == 0
return cls._transpose(args[0], args[1], args[2])
if func == masked_matmul:
assert len(args) == 3
return cls._masked_matmul(args[0], args[1], args[2])
if func in [
torch.Tensor.add,
torch.add,
torch.Tensor.__add__,
]:
assert len(args) == 2
if not (isinstance(args[0], cls) and isinstance(args[1], cls)):
raise NotImplementedError(
f"{func} with {type(args[0])} and {type(args[1])} not implemented"
)
return cls._binary_op(func, args[0], args[1])
if func in [
torch.Tensor.mul,
torch.mul,
torch.Tensor.__mul__,
]:
assert len(args) == 2
return cls._binary_op(func, args[0], args[1])
if func in [torch.Tensor.logical_and, torch.logical_and, torch.Tensor.__and__]:
assert len(args) == 2
return cls._binary_op_slow(func, args[0], args[1])
if func in [torch.nn.functional.dropout, torch.dropout, torch.dropout_]:
x = args[0]
values = x.__values.clone()
values = func(values, *args[1:], **kwargs)
return cls._wrap(
x.shape,
values,
x.__row_indices,
x.__row_offsets,
x.__column_indices,
x.__transp_info,
)
if func == torch.Tensor.to:
# print(args, kwargs)
assert len(args) >= 2
return cls._to(args[0], args[1])
# return cls._to(args[0], kwargs["device"])
if func in [torch.Tensor.copy_]:
assert len(args) == 2
return cls._copy(args[0], args[1])
if func in [torch.Tensor.equal, torch.equal]:
assert len(args) == 2
return cls._equal(args[0], args[1])
if func == torch.Tensor.to_dense:
assert len(args) == 1
return cls._to_dense(args[0])
if func == torch.Tensor.detach:
x = args[0]
return cls._wrap(
x.shape,
x.__values.detach(),
x.__row_indices,
x.__row_offsets,
x.__column_indices,
x.__transp_info,
)
if func == torch.Tensor.__deepcopy__:
x = args[0]
memo = args[1]
return cls._wrap(
x.shape,
x.__values.__deepcopy__(memo),
x.__row_indices.__deepcopy__(memo),
x.__row_offsets.__deepcopy__(memo),
x.__column_indices.__deepcopy__(memo),
tuple(v.__deepcopy__(memo) for v in x.__transp_info),
)
if func in [torch.Tensor.grad.__get__, torch.Tensor._grad.__get__]:
assert len(args) == 1
assert len(kwargs) == 0
x = args[0]
return cls._wrap(
x.shape,
x.__values.grad,
x.__row_indices,
x.__row_offsets,
x.__column_indices,
x.__transp_info,
)
if func == torch.Tensor.requires_grad_:
func(args[0].__values)
with torch._C.DisableTorchFunction():
ret = func(*args, **kwargs)
# TODO: check this
if func in torch.overrides.get_default_nowrap_functions():
return ret
return torch._tensor._convert(ret, cls)
return NotImplemented
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
| EXA-1-master | exa/libraries/xformers/xformers/sparse/csr_tensor.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import sys
import tempfile
import torch
is_windows = False
if sys.platform == "win32": # pytorch on windows uses gloo not ncll
is_windows = True
def init_torch_distributed_local():
if torch.distributed.is_initialized():
return
init_url = "file://" + tempfile.mkstemp()[1]
backend = (
torch.distributed.Backend.NCCL
if torch.cuda.is_available() and not is_windows
else torch.distributed.Backend.GLOO
)
torch.distributed.init_process_group(
backend=backend,
rank=0,
world_size=1,
init_method=init_url,
)
| EXA-1-master | exa/libraries/xformers/xformers/helpers/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from xformers.components.residual import ResidualNormStyle
@dataclass
class BasicLayerConfig:
embedding: int
attention_mechanism: str
patch_size: int
stride: int
padding: int
seq_len: int
feedforward: str
normalization: str = "layernorm"
repeat_layer: int = 1
def get_hierarchical_configuration(
layer_base_configs: List[BasicLayerConfig],
residual_norm_style: ResidualNormStyle = ResidualNormStyle.Pre,
use_rotary_embeddings: bool = True,
mlp_multiplier: int = 4,
in_channels: int = 3,
dim_head: Optional[int] = None,
):
"""
A small helper to generate hierarchical xformers configurations,
which correspond for instance to poolformer or swin architectures.
Contrary to more "classical" Transformer architectures, which conserve the sequence/context
length across layers, hierarchical Transformers trade the sequence length for the embedding dimension
"""
base_config: Dict[str, Any] = {
"block_type": "encoder",
"dim_model": 0,
"use_triton": False,
"residual_norm_style": str(residual_norm_style),
"multi_head_config": {
"num_heads": 1,
"use_rotary_embeddings": use_rotary_embeddings,
"attention": {
"name": "TBD",
},
},
"feedforward_config": {
"name": "TBD",
"activation": "gelu",
"hidden_layer_multiplier": mlp_multiplier,
"dropout": 0.0,
},
"position_encoding_config": {
"name": "learnable",
"seq_len": 0,
"add_class_token": False,
},
"patch_embedding_config": {
"in_channels": in_channels,
"kernel_size": 0,
"stride": 0,
"padding": 0,
},
}
xformers_config = []
in_channels = in_channels
for layer_base_config in layer_base_configs:
lc = copy.deepcopy(base_config)
lc["normalization"] = layer_base_config.normalization
# Fill in the changing model dimensions
lc["dim_model"] = layer_base_config.embedding
# Update the patches
lc["patch_embedding_config"] = {
"in_channels": in_channels,
"kernel_size": layer_base_config.patch_size,
"stride": layer_base_config.stride,
"padding": layer_base_config.padding,
}
# Update the number of channels for the next layer
in_channels = lc["dim_model"] * 1
lc["position_encoding_config"]["seq_len"] = layer_base_config.seq_len
# Fill in the number of heads (defaults to 1)
if dim_head is not None:
lc["multi_head_config"]["num_heads"] = (
layer_base_config.embedding // dim_head
)
assert layer_base_config.embedding % dim_head == 0
# Fill in the attention mechanism
lc["multi_head_config"]["attention"][
"name"
] = layer_base_config.attention_mechanism
# FIll in the feedforward
lc["feedforward_config"]["name"] = layer_base_config.feedforward
print(lc)
xformers_config.append(lc)
# Handle repeated layers (without the patch embeddings)
if layer_base_config.repeat_layer > 1:
lc_repeat = copy.deepcopy(lc)
lc_repeat.pop("patch_embedding_config")
xformers_config += [lc_repeat] * (layer_base_config.repeat_layer - 1)
return xformers_config
| EXA-1-master | exa/libraries/xformers/xformers/helpers/hierarchical_configs.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .timm_sparse_attention import TimmSparseAttention # noqa
| EXA-1-master | exa/libraries/xformers/xformers/helpers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention.core import scaled_dot_product_attention
class TimmSparseAttention(torch.nn.Module):
"""
Almost drop-in replacement for timm attention
but using the sparsity-aware scaled_dot_product_attention from xformers
"""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.0,
proj_drop=0.0,
attn_mask=None,
):
super().__init__()
self.num_heads = num_heads
self.qkv = torch.nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = torch.nn.Dropout(attn_drop)
self.proj = torch.nn.Linear(dim, dim)
self.proj_drop = torch.nn.Dropout(proj_drop)
self.attn_mask = attn_mask
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
qkv = qkv.flatten(1, 2)
q, k, v = qkv.unbind()
x = scaled_dot_product_attention(
q, k, v, self.attn_mask, dropout=self.attn_drop
)
x = x.reshape(B, self.num_heads, N, C // self.num_heads)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
| EXA-1-master | exa/libraries/xformers/xformers/helpers/timm_sparse_attention.py |
from xformers.components import MultiHeadDispatchConfig # noqa
from xformers.components.attention import AttentionConfig # noqa
from xformers.components.feedforward import FeedforwardConfig # noqa
from xformers.components.positional_embedding import PositionEmbeddingConfig # noqa
from .block_factory import xFormerDecoderBlock # noqa
from .block_factory import xFormerDecoderConfig # noqa
from .block_factory import xFormerEncoderBlock # noqa
from .block_factory import xFormerEncoderConfig # noqa
from .model_factory import xFormer, xFormerConfig # noqa
from .weight_init import xFormerWeightInit # noqa
| EXA-1-master | exa/libraries/xformers/xformers/factory/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# register components configs into Hydra ConfigStore
# component config classes could be used to validate configs
import logging
from hydra.core.config_store import ConfigStore
from omegaconf.errors import ValidationError
from xformers.components.attention import ATTENTION_REGISTRY
from xformers.components.feedforward import FEEDFORWARD_REGISTRY
from xformers.components.positional_embedding import POSITION_EMBEDDING_REGISTRY
logger = logging.getLogger("xformers")
def import_xformer_config_schema():
"""
Best effort - OmegaConf supports limited typing, so we may fail to import
certain config classes. For example, pytorch typing are not supported.
"""
cs = ConfigStore.instance()
for k, v in {
"ff": FEEDFORWARD_REGISTRY,
"pe": POSITION_EMBEDDING_REGISTRY,
"attention": ATTENTION_REGISTRY,
}.items():
for kk in v.keys():
try:
cs.store(name=f"{kk}_schema", node=v[kk].config, group=f"xformers/{k}")
except ValidationError as e:
logger.debug(f"Error registering {kk}_schema, error: {e}")
| EXA-1-master | exa/libraries/xformers/xformers/factory/hydra_helper.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import asdict
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from xformers.components import (
PatchEmbeddingConfig,
PostNorm,
PreNorm,
Residual,
ResidualNormStyle,
build_multi_head_attention,
build_patch_embedding,
)
from xformers.components.attention import AttentionMask
from xformers.components.feedforward import build_feedforward
from xformers.components.positional_embedding import build_positional_embedding
from xformers.components.residual import get_deepnorm_coefficients
from xformers.components.simplicial_embedding import SimplicialEmbedding
from xformers.factory.block_configs import (
NormalizationType,
xFormerDecoderConfig,
xFormerEncoderConfig,
)
logger = logging.getLogger("xformers")
def _get_ln_factory(
d_model: int,
residual_norm_style: Optional[ResidualNormStyle],
use_triton: bool,
residual: bool,
normalization: NormalizationType = NormalizationType.LayerNorm,
residual_scale: float = 1.0,
):
"""
Handle all the supported residual path configurations.
..Note: we return the appropriate constructor, not an actual layer
"""
def get_layer_wrapper(
d_model: int,
sublayer: nn.Module,
residual_norm_style: Optional[ResidualNormStyle],
residual: bool,
residual_scale: float,
):
if residual:
if residual_norm_style == ResidualNormStyle.Pre:
return Residual(
layer=PreNorm(d_model, sublayer, normalization, use_triton),
scale=None,
)
elif residual_norm_style == ResidualNormStyle.Post:
return PostNorm(
d_model,
Residual(layer=sublayer, scale=None),
normalization,
use_triton,
)
elif residual_norm_style == ResidualNormStyle.DeepNorm:
return PostNorm(
d_model,
Residual(layer=sublayer, scale=residual_scale),
normalization,
use_triton=use_triton,
)
else:
raise ValueError
return (
PreNorm(d_model, sublayer, normalization, use_triton)
if residual_norm_style == ResidualNormStyle.Pre
else PostNorm(d_model, sublayer, normalization, use_triton)
)
def ln_factory(sublayer: nn.Module):
return get_layer_wrapper(
d_model, sublayer, residual_norm_style, residual, residual_scale
)
return ln_factory
class xFormerEncoderBlock(torch.nn.Module):
r"""A vanilla Transformer Encoder block"""
def __init__(self, config: xFormerEncoderConfig, **kwargs):
super().__init__()
self.reversible_f = None
self.reversible_g = None
self.residual_norm_style = config.residual_norm_style
self.dim_model = config.dim_model
# If this layer is the first one, and a pose encoding has been requested
if (
config.position_encoding_config is not None
and config.layer_position.is_first()
):
self.pose_encoding = build_positional_embedding(
asdict(config.position_encoding_config)
)
pos_encoding_dim = config.position_encoding_config.dim_model
mha_dim = config.multi_head_config["dim_model"]
if pos_encoding_dim != mha_dim:
logger.warning(
f"The embedding dim and model dim do not match ({pos_encoding_dim} vs {mha_dim}), adding a projector layer." # noqa
)
self.embedding_projector = nn.Linear(pos_encoding_dim, mha_dim)
else:
self.pose_encoding = None
if config.residual_norm_style == ResidualNormStyle.DeepNorm:
# Just use the layer norm coefficient here,
# the init will be handled at the xformers level (knows about encoder and decoder blocks)
deep_norm_coefficients, _ = get_deepnorm_coefficients(
encoder_layers=config.num_layers, decoder_layers=0
)
assert deep_norm_coefficients is not None
residual_scale = deep_norm_coefficients.alpha
else:
residual_scale = 1.0
# mini helper, builds a normalization layer with the right Pre/Post config, residuals, and the right dimensions
ln_factory = _get_ln_factory(
config.dim_model,
config.residual_norm_style,
use_triton=config.use_triton,
residual=True,
residual_scale=residual_scale,
normalization=config.normalization,
)
mha = build_multi_head_attention(config.multi_head_config)
feedforward = build_feedforward(asdict(config.feedforward_config))
# Expose attention specific capabilities
self.supports_attention_mask = mha.attention.supports_attention_mask
self.requires_same_k_q_dimensions = mha.attention.requires_same_k_q_dimensions
self.causal = (
mha.attention.causal if hasattr(mha.attention, "causal") else False
)
# Wrappers handle the different layer norm styles (pre- and post-) and the residual path
self.wrap_att = ln_factory(mha)
self.wrap_ff: Union[Residual, PostNorm] = ln_factory(feedforward)
if (
config.residual_norm_style == ResidualNormStyle.Pre
and config.layer_position.is_last()
):
self.wrap_ff = PostNorm(
config.dim_model,
self.wrap_ff,
normalization=config.normalization,
use_triton=config.use_triton,
)
# Simplicial embeddings are only used if specified, and on the last layer
self.simplicial_embedding: Optional[SimplicialEmbedding] = None
if config.simplicial_embeddings is not None and config.layer_position.is_last():
self.simplicial_embedding = SimplicialEmbedding(
**config.simplicial_embeddings
)
# Optional patch embedding
self.patch_emb: Optional[nn.Module] = None
if config.patch_embedding_config is not None:
self.patch_emb = build_patch_embedding(
PatchEmbeddingConfig(**config.patch_embedding_config)
)
@classmethod
def from_config(cls, config: xFormerEncoderConfig):
return cls(config)
@staticmethod
def get_reversible_layer(config) -> Tuple[nn.Module, nn.Module]:
ln_factory = _get_ln_factory(
config.dim_model,
config.residual_norm_style,
residual=False,
use_triton=config.use_triton,
normalization=config.normalization,
)
mha = build_multi_head_attention(config.multi_head_config)
feedforward = build_feedforward(asdict(config.feedforward_config))
reversible_f = ln_factory(mha)
reversible_g = ln_factory(feedforward)
return reversible_f, reversible_g
def forward(
self,
x: torch.Tensor,
att_mask: Optional[Union[torch.Tensor, AttentionMask]] = None,
input_mask: Optional[torch.Tensor] = None,
):
if self.patch_emb is not None:
x = self.patch_emb(x)
if self.pose_encoding is not None:
x = self.pose_encoding(x)
if hasattr(self, "embedding_projector"):
x = self.embedding_projector(x)
# Handle the optional input masking, differs on Q, K, V
if input_mask is not None:
q = x
k = x * input_mask.unsqueeze(-1)
v = k
else:
q, k, v = x, x, x
# Pre/Post norms and residual paths are already handled
x = self.wrap_att(inputs=[q, k, v], att_mask=att_mask)
x = self.wrap_ff(inputs=[x])
# Optional simplicial embeddings
if self.simplicial_embedding is not None:
x = self.simplicial_embedding(x)
return x
class xFormerDecoderBlock(torch.nn.Module):
r"""A vanilla Transformer Decoder block
... note: this implementation is not (yet ?) reversible"""
def __init__(self, config: xFormerDecoderConfig, **kwargs):
super().__init__()
# If this layer is the first one, and a pose encoding as been requested
if (
config.position_encoding_config is not None
and config.layer_position.is_first()
):
self.pose_encoding = build_positional_embedding(
config.position_encoding_config
)
pos_encoding_dim = config.position_encoding_config.dim_model
mha_dim = config.multi_head_config_masked["dim_model"]
if pos_encoding_dim != mha_dim:
logger.warning(
f"The embedding dim and model dim do not match ({pos_encoding_dim} vs {mha_dim}), adding a projector layer." # noqa
)
self.embedding_projector = nn.Linear(pos_encoding_dim, mha_dim)
else:
self.pose_encoding = None
if config.residual_norm_style == ResidualNormStyle.DeepNorm:
# Just use the layer norm coefficient here,
# the init will be handled at the xformers level (knows about encoder and decoder blocks)
_, deep_norm_coefficients = get_deepnorm_coefficients(
encoder_layers=0, decoder_layers=config.num_layers
)
assert deep_norm_coefficients is not None
residual_scale = deep_norm_coefficients.alpha
else:
residual_scale = 1.0
# mini helper, builds a LayerNorm with the right Pre/Post config and the right dimensions
ln_factory = _get_ln_factory(
config.dim_model,
config.residual_norm_style,
use_triton=config.use_triton,
residual=True,
residual_scale=residual_scale,
normalization=config.normalization,
)
mha = build_multi_head_attention(config.multi_head_config_masked)
cross_mha = build_multi_head_attention(config.multi_head_config_cross)
feedforward = build_feedforward(config.feedforward_config)
# Expose attention or feedforward specific capabilities
self.supports_attention_mask = mha.attention.supports_attention_mask
self.requires_same_k_q_dimensions = mha.attention.requires_same_k_q_dimensions
self.requires_squared_context_length = (
feedforward.requires_squared_context
or mha.attention.requires_squared_context
)
self.causal_attention = (
mha.attention.causal if hasattr(mha.attention, "causal") else False
)
# Wrappers handle the different layer norm styles (pre- and post-) and the residual path
self.wrap_att = ln_factory(mha)
self.wrap_cross = ln_factory(cross_mha)
self.wrap_ff: Union[Residual, PostNorm] = ln_factory(feedforward)
if (
config.residual_norm_style == ResidualNormStyle.Pre
and config.layer_position.is_last()
):
self.wrap_ff = PostNorm(
config.dim_model,
self.wrap_ff,
normalization=NormalizationType.LayerNorm,
)
@classmethod
def from_config(cls, config: xFormerDecoderConfig):
return cls(config)
def forward(
self,
target: torch.Tensor,
memory: torch.Tensor,
encoder_att_mask: Optional[Union[torch.Tensor, AttentionMask]] = None,
decoder_att_mask: Optional[Union[torch.Tensor, AttentionMask]] = None,
input_mask: Optional[torch.Tensor] = None,
):
if self.pose_encoding is not None:
target = self.pose_encoding(target)
if hasattr(self, "embedding_projector"):
target = self.embedding_projector(target)
# Handle the optional input masking, differs on Q, K, V
if input_mask is not None:
target_q = target
target_k = target * input_mask.unsqueeze(-1)
target_v = target_k
else:
target_q, target_k, target_v = target, target, target
x = self.wrap_att(
inputs=[target_q, target_k, target_v], att_mask=decoder_att_mask
)
x = self.wrap_cross(inputs=[x, memory, memory], att_mask=encoder_att_mask)
x = self.wrap_ff(inputs=[x])
return x
| EXA-1-master | exa/libraries/xformers/xformers/factory/block_factory.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import torch
from xformers.components import reversible as rv
from xformers.components.residual import ResidualNormStyle, get_deepnorm_coefficients
from xformers.factory.block_configs import (
xFormerBlockConfig,
xFormerDecoderConfig,
xFormerEncoderConfig,
)
from xformers.factory.block_factory import xFormerDecoderBlock, xFormerEncoderBlock
from xformers.factory.weight_init import get_weight_init_fn, xFormerWeightInit
logger = logging.getLogger("xformers")
@dataclass(init=False)
class xFormerConfig:
"""
The configuration structure to define a full Transformer.
This can include a stack of encoder layers, and a stack of decoder layers.
It is optionally possible to share the embedding weights in between
the encoder and decoder positional encoding, as proposed for instance by
`Using the Output Embedding to Improve Language Models`, Press et al.
A full config example is for instance as follows:
::
xformer_config = [
{
"reversible": False, # Turn on to test the effect of using reversible layers
"block_type": "encoder",
"num_layers": LAYERS,
"dim_model": EMB,
"residual_norm_style": "pre",
"position_encoding_config": {
"name": "vocab",
"seq_len": CONTEXT,
"vocab_size": VOCAB_SIZE,
},
"multi_head_config": {
"num_heads": NUM_HEADS,
"residual_dropout": RES_DROP,
"use_rotary_embeddings": True,
"attention": {
"name": ATTENTION_MECHANISM_STR,
"dropout": ATTN_DROP,
"causal": True,
"seq_len": CONTEXT,
},
},
"feedforward_config": {
"name": "FusedMLP", # Use MLP if Triton is not available
"dropout": MLP_DROP,
"activation": "gelu",
"hidden_layer_multiplier": MLP_MULTIPLIER,
},
}
]
.. _`Using the Output Embedding to Improve Language Models`: https://arxiv.org/pdf/1608.05859.pdf
"""
stack_configs: Union[List[xFormerBlockConfig], Dict[str, xFormerBlockConfig]]
tie_embedding_weights: bool = False
weight_init: xFormerWeightInit = xFormerWeightInit.ViT
def __init__(
self,
stack_configs: Union[List[Dict[str, Any]], Dict[str, Dict[str, Any]]],
tie_embedding_weights: bool = False,
weight_init: xFormerWeightInit = xFormerWeightInit.ViT,
):
# Type all the configurations. Possible typos are caught here
if isinstance(stack_configs, dict):
self.stack_configs = {}
for k, config in stack_configs.items():
if config["block_type"] == "encoder":
self.stack_configs[k] = xFormerEncoderConfig(**config)
else:
self.stack_configs[k] = xFormerDecoderConfig(**config)
else:
self.stack_configs = []
for config in stack_configs:
if config["block_type"] == "encoder":
self.stack_configs.append(xFormerEncoderConfig(**config))
else:
self.stack_configs.append(xFormerDecoderConfig(**config))
self.tie_embedding_weights = tie_embedding_weights
self.weight_init = weight_init
class xFormer(torch.nn.Module):
def __init__(
self,
stack_configs: Union[
xFormerBlockConfig, List[xFormerBlockConfig], Dict[str, xFormerBlockConfig]
],
tie_embedding_weights: bool = False,
weight_init: xFormerWeightInit = xFormerWeightInit.ViT,
):
"""
Given a serialized configuration, generate the corresponding model.
This is only a helper and can easily be bypassed
"""
super().__init__()
if isinstance(stack_configs, Dict):
stack_configs = list(stack_configs.values())
# Convenience, users can pass either a list of configs or a single one
if not isinstance(stack_configs, List):
stack_configs = [stack_configs]
# Sanity checks, some config combinations do not make sense
self._verify_reversible(stack_configs)
self._verify_deepnorm(stack_configs)
encoders: List[torch.nn.Module] = []
decoders: List[torch.nn.Module] = []
self.reversible_encoder = False
self.rev_enc_pose_encoding = None
# Unroll the configs and build the model
for config in stack_configs:
# Handle either Encoder or Decoder stacks
builder = (
xFormerEncoderBlock.from_config
if isinstance(config, xFormerEncoderConfig)
else xFormerDecoderBlock.from_config
)
recipient = (
encoders if isinstance(config, xFormerEncoderConfig) else decoders
)
# Build up the stack
for i in range(config.num_layers):
# Label where this layer is in the stack
# (for instance useful for the positional encoding, or late layer norm)
if len(recipient) > 0:
config.layer_position.mark_not_first()
if config != stack_configs[-1] or i < config.num_layers - 1:
config.layer_position.mark_not_last()
block = builder(config) # type: ignore
# If reversible: extract the reversible sub-parts, else append the block as-is
if config.reversible:
# WARNING: only one pose encoding is saved here (not Focal Transformer compatible for instance)
assert isinstance(config, xFormerEncoderConfig)
if block.pose_encoding is not None:
self.rev_enc_pose_encoding = block.pose_encoding
self.reversible_encoder = True
f, g = xFormerEncoderBlock.get_reversible_layer(config)
recipient.append(torch.nn.ModuleList([f, g]))
else:
recipient.append(block) # type: ignore
# Tie embedding weights, if requested and possible
assert (
not tie_embedding_weights or not self.reversible_encoder
), "Reversible layers and tied embeddings is not supported for now"
if (
tie_embedding_weights
and encoders
and encoders[0].pose_encoding
and decoders
and decoders[0].pose_encoding
and not config.reversible
):
logger.info("Tying encoder and decoder embeddings, as requested")
encoders[0].pose_encoding = decoders[0].pose_encoding
self.encoders: torch.nn.Module = (
rv.ReversibleSequence(torch.nn.ModuleList(encoders))
if self.reversible_encoder
else torch.nn.ModuleList(encoders)
)
self.decoders = torch.nn.ModuleList(decoders)
use_deepnorm = (
stack_configs[0].residual_norm_style == ResidualNormStyle.DeepNorm
)
assert (
not use_deepnorm or not self.reversible_encoder
), "Reversible layers and deepnorm is not supported for now"
self.init_weights(weight_init=weight_init, use_deep_norm=use_deepnorm)
@classmethod
def from_config(cls, config: xFormerConfig):
return cls(
config.stack_configs, config.tie_embedding_weights, config.weight_init
)
def _verify_reversible(self, stack_configs: List[xFormerBlockConfig]):
reversible = [
c.reversible
for c in filter(lambda x: x.block_type == "encoder", stack_configs)
]
assert all(reversible) or not any(reversible), (
"All layers need to have the same reversibility setting. "
+ f"Currently {reversible}"
)
def _verify_deepnorm(self, stack_configs: List[xFormerBlockConfig]):
deepnorm = [
c.residual_norm_style == ResidualNormStyle.DeepNorm for c in stack_configs
]
assert all(deepnorm) or not any(deepnorm), (
"All layers need to have the same deepnorm setting. "
+ f"Currently {deepnorm}"
)
def init_weights(self, weight_init: xFormerWeightInit, use_deep_norm: bool):
# The deepnorm weight initialization method requires different gain factors for the encoder
# and decoder, depending on the general model structure (number of respective layers)
if use_deep_norm:
encoder_coefficients, decoder_coefficients = get_deepnorm_coefficients(
encoder_layers=len(self.encoders), decoder_layers=len(self.decoders) # type: ignore
)
else:
encoder_coefficients, decoder_coefficients = None, None
encoder_gain = (
encoder_coefficients.beta if encoder_coefficients is not None else 1.0
)
decoder_gain = (
decoder_coefficients.beta if decoder_coefficients is not None else 1.0
)
# Pick the desired init function
init_fn = get_weight_init_fn(weight_init)
# Initialize all the encoder weights
for name, module in self.encoders.named_children():
init_fn(module=module, name=name, gain=encoder_gain)
for name, module in self.decoders.named_children():
init_fn(module=module, name=name, gain=decoder_gain)
def forward(
self,
src: torch.Tensor,
tgt: Optional[torch.Tensor] = None,
encoder_input_mask: Optional[torch.Tensor] = None,
decoder_input_mask: Optional[torch.Tensor] = None,
) -> Optional[torch.Tensor]:
# Encode to latent space if encoder is present
if len(list(self.encoders.parameters())) > 0:
encoders = self.encoders
memory = src.clone()
if isinstance(encoders, torch.nn.ModuleList):
for encoder in encoders:
memory = encoder(memory, input_mask=encoder_input_mask)
else:
if self.rev_enc_pose_encoding:
memory = self.rev_enc_pose_encoding(src)
# Reversible Encoder
x = torch.cat([memory, memory], dim=-1)
# Apply the optional input masking
if encoder_input_mask is not None:
if x.dim() - encoder_input_mask.dim() > 1:
encoder_input_mask.unsqueeze(0)
x += encoder_input_mask.unsqueeze(-1)
x = encoders(x)
memory = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if not self.decoders:
return memory
# If decoder: either use the encoder ouput, or just decode, both options are possible
if len(self.decoders) > 0:
tgt = src.clone() if tgt is None else tgt
for decoder in self.decoders:
tgt = decoder(
target=tgt,
# pyre-fixme[61]: `memory` is not always initialized here.
memory=memory,
input_mask=decoder_input_mask,
)
return tgt
return None
| EXA-1-master | exa/libraries/xformers/xformers/factory/model_factory.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional
from xformers.components import NormalizationType, ResidualNormStyle
from xformers.components.feedforward import FEEDFORWARD_REGISTRY, FeedforwardConfig
from xformers.components.positional_embedding import (
POSITION_EMBEDDING_REGISTRY,
PositionEmbeddingConfig,
)
from xformers.utils import generate_matching_config
class LayerPositionBitmask(int, Enum):
First = 0b01
Last = 0b10
Default = 0b11
class LayerPosition:
"""Bitmask to mark this layer as first, last, nothing or both"""
def __init__(self):
self.bitmask = LayerPositionBitmask.Default
def is_first(self):
return bool(self.bitmask & LayerPositionBitmask.First)
def is_last(self):
return bool(self.bitmask & LayerPositionBitmask.Last)
def mark_not_first(self):
self.bitmask &= ~LayerPositionBitmask.First
def mark_not_last(self):
self.bitmask &= ~LayerPositionBitmask.Last
class BlockType(str, Enum):
Encoder = "encoder"
Decoder = "decoder"
@dataclass(init=False) # handle constructors explicitly to force type changes
class xFormerBlockConfig:
"""
The configuration structure to define a Transformer block.
This base class is applicable to both encoder and decoder definitions.
This completely defines each of the blocks, for instance in terms of dimensions,
position encoding, pre or post layer norms or reversibility.
"""
dim_model: int
feedforward_config: FeedforwardConfig
position_encoding_config: Optional[PositionEmbeddingConfig]
block_type: BlockType
residual_norm_style: ResidualNormStyle
normalization: NormalizationType
layer_position: LayerPosition
use_triton: bool
reversible: bool
num_layers: int
def __init__(
self,
dim_model: int,
feedforward_config: Dict[str, Any],
position_encoding_config: Optional[Dict[str, Any]],
block_type: BlockType,
residual_norm_style: ResidualNormStyle = ResidualNormStyle("post"),
normalization: NormalizationType = NormalizationType.LayerNorm,
reversible: bool = False,
num_layers: int = 1,
layer_position: Optional[LayerPosition] = None,
):
self.dim_model = dim_model
self.block_type = block_type
self.residual_norm_style = residual_norm_style
self.reversible = reversible
self.num_layers = num_layers
self.normalization = normalization
# Fill in possible gaps in the config for subparts of the block
self.feedforward_config = generate_matching_config(
feedforward_config,
FEEDFORWARD_REGISTRY[feedforward_config["name"]].config,
)
self.position_encoding_config = (
generate_matching_config(
position_encoding_config,
POSITION_EMBEDDING_REGISTRY[position_encoding_config["name"]].config,
)
if position_encoding_config is not None
else None
)
# Default is that this layer is the only one, so both first and last
if layer_position:
self.layer_position = layer_position
else:
self.layer_position = LayerPosition()
@dataclass(init=False)
class xFormerEncoderConfig(xFormerBlockConfig):
"""
The configuration structure for an encoder block
"""
multi_head_config: Dict[str, Any]
use_triton: bool
simplicial_embeddings: Optional[Dict[str, Any]]
patch_embedding_config: Optional[Dict[str, Any]]
def __init__(
self,
dim_model: int,
feedforward_config: Dict[str, Any],
multi_head_config: Dict[str, Any],
position_encoding_config: Optional[Dict[str, Any]] = None,
residual_norm_style: str = "post",
normalization: NormalizationType = NormalizationType.LayerNorm,
use_triton: bool = True,
simplicial_embeddings: Optional[Dict[str, Any]] = None,
patch_embedding_config: Optional[Dict[str, Any]] = None,
**kwargs,
):
# Convenience, fill in duplicated fields
try:
if "dim_model" not in multi_head_config.keys():
multi_head_config["dim_model"] = dim_model
if "dim_model" not in feedforward_config.keys():
feedforward_config["dim_model"] = dim_model
if (
position_encoding_config is not None
and "dim_model" not in position_encoding_config.keys()
):
position_encoding_config["dim_model"] = dim_model
if (
patch_embedding_config is not None
and "out_channels" not in patch_embedding_config.keys()
):
patch_embedding_config["out_channels"] = dim_model
except AttributeError:
# A config instance was passed in, this is fine
pass
if "block_type" in kwargs:
assert kwargs["block_type"] == "encoder"
kwargs["block_type"] = BlockType("encoder")
super().__init__(
dim_model=dim_model,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style=ResidualNormStyle(residual_norm_style),
normalization=NormalizationType(normalization),
**kwargs,
)
self.multi_head_config = multi_head_config
self.use_triton = use_triton
self.simplicial_embeddings = simplicial_embeddings
self.patch_embedding_config = patch_embedding_config
@dataclass(init=False)
class xFormerDecoderConfig(xFormerBlockConfig):
"""
The configuration structure for a decoder block.
This specifically defines the masked and cross attention mechanisms,
on top of the settings defining all blocks.
"""
multi_head_config_masked: Dict[str, Any] # prior to encoder output
multi_head_config_cross: Dict[str, Any] # cross attention, takes encoder output
def __init__(
self,
dim_model: int,
feedforward_config: Dict[str, Any],
multi_head_config_masked: Dict[str, Any],
multi_head_config_cross: Dict[str, Any],
position_encoding_config: Optional[Dict[str, Any]] = None,
residual_norm_style: str = "post",
normalization: NormalizationType = NormalizationType.LayerNorm,
use_triton: bool = True,
**kwargs,
):
# Convenience, fill in duplicated field
try:
if "dim_model" not in multi_head_config_masked.keys():
multi_head_config_masked["dim_model"] = dim_model
if "dim_model" not in multi_head_config_cross.keys():
multi_head_config_cross["dim_model"] = dim_model
if "dim_model" not in feedforward_config.keys():
feedforward_config["dim_model"] = dim_model
if (
position_encoding_config is not None
and "dim_model" not in position_encoding_config.keys()
):
position_encoding_config["dim_model"] = dim_model
except AttributeError:
# A config instance was passed in, this is fine
pass
if "block_type" in kwargs.keys():
assert kwargs["block_type"] == "decoder"
kwargs["block_type"] = BlockType("decoder")
super().__init__(
dim_model=dim_model,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
residual_norm_style=ResidualNormStyle(residual_norm_style),
normalization=NormalizationType(normalization),
**kwargs,
)
self.multi_head_config_masked = multi_head_config_masked
self.multi_head_config_cross = multi_head_config_cross
self.use_triton = use_triton
| EXA-1-master | exa/libraries/xformers/xformers/factory/block_configs.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: Reusing a lot of code from the Timm repo
# main difference is probably the handling of deepnorm init, and adapting to some xformers specificities
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
import logging
import math
from enum import Enum
from typing import Callable
import torch
import torch.nn as nn
from torch.nn.init import (
_calculate_fan_in_and_fan_out,
_no_grad_trunc_normal_,
_no_grad_uniform_,
)
logger = logging.getLogger("xformers")
_assert_if_not_initialized = False
class xFormerWeightInit(str, Enum):
Timm = "timm"
ViT = "vit"
Moco = "moco"
Small = "small"
def get_weight_init_fn(init_choice: xFormerWeightInit):
"""
Provide the xFormers factory with weight init routines.
Supported initializations are:
- Small: follow the method outlined in `Transformer Without Tears`_
- ViT: follow the initialization in the reference ViT_ codebase
- Timm: follow the initialization in the reference Timm_ codebase
- Moco: follow the initialization in the reference MocoV3_ codebase
.. _ViT: https://github.com/google-research/vision_transformer
.. _Timm: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
.. _MocoV3: https://github.com/facebookresearch/moco-v3
"""
return {
xFormerWeightInit.Timm: _init_weights_vit_timm,
xFormerWeightInit.ViT: _init_weights_vit_jax,
xFormerWeightInit.Moco: _init_weights_vit_moco,
xFormerWeightInit.Small: _init_weights_small,
}[init_choice]
# Define pattern matches
def is_ffn(n):
return "feedforward" in n or ("wrap_ff" in n and not n.endswith("norm"))
def is_mha_input_projection(n):
return "q_proj" in n or "k_proj" in n or "v_proj" in n
# Define distribution helpers
def _small_init_(tensor: torch.Tensor, gain: float = 1.0) -> torch.Tensor:
r"""Fills the input `Tensor` with values according to the method
described in `Transformer Without Tears`_, using a uniform distribution.
This is a variation of the Xavier init. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + 4 * \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
.. _`Transformer Without Tears`: https://arxiv.org/abs/1910.05895
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + 4 * fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(tensor, -a, a)
def _lecun_normal(tensor, gain=1.0):
fan_in, _ = _calculate_fan_in_and_fan_out(tensor)
denom = fan_in
variance = gain / denom
# constant is stddev of standard normal truncated to (-2, 2)
_no_grad_trunc_normal_(
tensor,
mean=0.0,
std=math.sqrt(variance) / 0.87962566103423978,
a=-2.0,
b=2.0,
)
# Helpers to keep all the functions typesafe, and handle corner cases and common behaviours in one place
def _maybe_init_tensor(module: nn.Module, attr: str, distribution_: Callable, **kwargs):
# Small helper to catch all the corner cases, while staying type safe
if hasattr(module, attr):
maybe_tensor = getattr(module, attr)
if maybe_tensor is not None and isinstance(maybe_tensor, torch.Tensor):
distribution_(maybe_tensor, **kwargs)
def _maybe_report_no_init(module, name):
if len(list(module.named_children())) == 0 and (
hasattr(module, "weight") or hasattr(module, "bias")
):
# Skip layer norm, this is ok
if isinstance(module, torch.nn.LayerNorm):
return
# Skip nn.Embedding, we typically initialize it one level up, else Pytorch has a valid default
if isinstance(module, torch.nn.Embedding):
return
# This is unexpected, warn about a possible unhandled weight
logger.warning(
f"Not initializing weights in {name}, this could be a mistake.\nModule {module}"
)
if _assert_if_not_initialized:
assert False, (
f"Uninitialized weight found in {module}."
+ " If you have a custom module, please provide a `init_weights()` method"
)
# Define the different initialization schemes
def _init_weights_vit_jax(
module: nn.Module,
name: str = "",
head_bias: float = 0.0,
gain: float = 1.0,
deepnorm_style: bool = False,
**kwargs,
):
"""ViT weight initialization, matching JAX (Flax) impl"""
if is_ffn(name):
_maybe_init_tensor(module, "bias", nn.init.normal_, std=1e-6)
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
elif is_mha_input_projection(name) or isinstance(module, nn.Linear):
if deepnorm_style and (
"q_proj" in name.split(".") or "k_proj" in name.split(".")
):
gain = 1.0
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif isinstance(module, nn.Conv2d):
_maybe_init_tensor(module, "weight", _lecun_normal, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights() # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_vit_jax(child_module, f"{name}.{child_name}", head_bias, gain)
def _init_weights_vit_moco(
module: nn.Module,
name: str = "",
gain: float = 1.0,
**kwargs,
):
"""ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed"""
assert (
"deepnorm_style" not in kwargs.keys()
), "This initialization method does not support deepnorm"
if is_ffn(name):
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif is_mha_input_projection(name) or isinstance(module, nn.Linear):
if isinstance(module.weight, torch.Tensor):
val = (
math.sqrt(6.0 / float(module.weight.shape[0] + module.weight.shape[1]))
* gain
)
_maybe_init_tensor(module, "weight", nn.init.uniform_, a=-val, b=val)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights(gain=gain) # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_vit_moco(child_module, child_name, gain)
def _init_weights_small(
module: nn.Module,
name: str = "",
head_bias: float = 0.0,
gain: float = 1.0,
deepnorm_style: bool = False,
**kwargs,
):
"""Follow the `Transformer Without Tears`_ initialization for self-attention"""
if is_ffn(name):
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.normal_, std=1e-6)
elif is_mha_input_projection(name) or isinstance(module, nn.Linear):
# "small init" only scales the attention layers init, not the FFN
if deepnorm_style and (
"q_proj" in name.split(".") or "k_proj" in name.split(".")
):
gain = 1.0
_maybe_init_tensor(module, "weight", _small_init_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif isinstance(module, nn.Conv2d):
_maybe_init_tensor(module, "weight", _lecun_normal)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights() # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_small(child_module, f"{name}.{child_name}", head_bias, gain)
def _init_weights_vit_timm(
module: nn.Module,
name: str = "",
gain: float = 1.0,
deepnorm_style: bool = False,
**kwargs,
):
"""
ViT weight initialization, original timm impl (for reproducibility).
See DeepNet_ for all the DeepNorm specific codepaths
"""
if isinstance(module, nn.Linear):
if deepnorm_style and (
"q_proj" in name.split(".") or "k_proj" in name.split(".")
):
gain = 1
std = 0.02 * gain
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
_maybe_init_tensor(
module, "weight", _no_grad_trunc_normal_, mean=0.0, std=std, a=-a, b=a
)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights(gain=gain) # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_vit_timm(child_module, child_name, gain)
| EXA-1-master | exa/libraries/xformers/xformers/factory/weight_init.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import argparse
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import minigpt4.tasks as tasks
from minigpt4.common.config import Config
from minigpt4.common.dist_utils import get_rank, init_distributed_mode
from minigpt4.common.logger import setup_logger
from minigpt4.common.optims import (
LinearWarmupCosineLRScheduler,
LinearWarmupStepLRScheduler,
)
from minigpt4.common.registry import registry
from minigpt4.common.utils import now
# imports modules for registration
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from minigpt4.tasks import *
def parse_args():
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--cfg-path", required=True, help="path to configuration file.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def setup_seeds(config):
seed = config.run_cfg.seed + get_rank()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def get_runner_class(cfg):
"""
Get runner class from config. Default to epoch-based runner.
"""
runner_cls = registry.get_runner_class(cfg.run_cfg.get("runner", "runner_base"))
return runner_cls
def main():
# allow auto-dl completes on main process without timeout when using NCCL backend.
# os.environ["NCCL_BLOCKING_WAIT"] = "1"
# set before init_distributed_mode() to ensure the same job_id shared across all ranks.
job_id = now()
cfg = Config(parse_args())
init_distributed_mode(cfg.run_cfg)
setup_seeds(cfg)
# set after init_distributed_mode() to only log on master.
setup_logger()
cfg.pretty_print()
task = tasks.setup_task(cfg)
datasets = task.build_datasets(cfg)
model = task.build_model(cfg)
runner = get_runner_class(cfg)(
cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets
)
runner.train()
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/MiniGPT-4-main/train.py |
import argparse
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import gradio as gr
from minigpt4.common.config import Config
from minigpt4.common.dist_utils import get_rank
from minigpt4.common.registry import registry
from minigpt4.conversation.conversation import Chat, CONV_VISION
# imports modules for registration
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from minigpt4.tasks import *
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", required=True, help="path to configuration file.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args
def setup_seeds(config):
seed = config.run_cfg.seed + get_rank()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
# ========================================
# Model Initialization
# ========================================
print('Initializing Chat')
cfg = Config(parse_args())
model_config = cfg.model_cfg
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to('cuda:0')
vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
chat = Chat(model, vis_processor)
print('Initialization Finished')
# ========================================
# Gradio Setting
# ========================================
def gradio_reset(chat_state, img_list):
if chat_state is not None:
chat_state.messages = []
if img_list is not None:
img_list = []
return None, gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your image first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
def upload_img(gr_img, text_input, chat_state):
if gr_img is None:
return None, None, gr.update(interactive=True), chat_state, None
chat_state = CONV_VISION.copy()
img_list = []
llm_message = chat.upload_img(gr_img, chat_state, img_list)
return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list
def gradio_ask(user_message, chatbot, chat_state):
if len(user_message) == 0:
return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
chat.ask(user_message, chat_state)
chatbot = chatbot + [[user_message, None]]
return '', chatbot, chat_state
def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
llm_message = chat.answer(conv=chat_state, img_list=img_list, max_new_tokens=1000, num_beams=num_beams, temperature=temperature)[0]
chatbot[-1][1] = llm_message
return chatbot, chat_state, img_list
title = """<h1 align="center">Demo of MiniGPT-4</h1>"""
description = """<h3>This is the demo of MiniGPT-4. Upload your images and start chatting!</h3>"""
article = """<p><a href='https://minigpt-4.github.io'><img src='https://img.shields.io/badge/Project-Page-Green'></a></p><p><a href='https://github.com/Vision-CAIR/MiniGPT-4'><img src='https://img.shields.io/badge/Github-Code-blue'></a></p><p><a href='https://raw.githubusercontent.com/Vision-CAIR/MiniGPT-4/main/MiniGPT_4.pdf'><img src='https://img.shields.io/badge/Paper-PDF-red'></a></p>
"""
#TODO show examples below
with gr.Blocks() as demo:
gr.Markdown(title)
gr.Markdown(description)
gr.Markdown(article)
with gr.Row():
with gr.Column(scale=0.5):
image = gr.Image(type="pil")
upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary")
clear = gr.Button("Restart")
num_beams = gr.Slider(
minimum=1,
maximum=10,
value=1,
step=1,
interactive=True,
label="beam search numbers)",
)
temperature = gr.Slider(
minimum=0.1,
maximum=2.0,
value=1.0,
step=0.1,
interactive=True,
label="Temperature",
)
with gr.Column():
chat_state = gr.State()
img_list = gr.State()
chatbot = gr.Chatbot(label='MiniGPT-4')
text_input = gr.Textbox(label='User', placeholder='Please upload your image first', interactive=False)
upload_button.click(upload_img, [image, text_input, chat_state], [image, text_input, upload_button, chat_state, img_list])
text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
)
clear.click(gradio_reset, [chat_state, img_list], [chatbot, image, text_input, upload_button, chat_state, img_list], queue=False)
demo.launch(share=True, enable_queue=True)
| EXA-1-master | exa/models/MiniGPT-4-main/demo.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import sys
from omegaconf import OmegaConf
from minigpt4.common.registry import registry
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.tasks import *
root_dir = os.path.dirname(os.path.abspath(__file__))
default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml"))
registry.register_path("library_root", root_dir)
repo_root = os.path.join(root_dir, "..")
registry.register_path("repo_root", repo_root)
cache_root = os.path.join(repo_root, default_cfg.env.cache_root)
registry.register_path("cache_root", cache_root)
registry.register("MAX_INT", sys.maxsize)
registry.register("SPLIT_NAMES", ["train", "val", "test"])
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import torch
import torch.distributed as dist
from minigpt4.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized
from minigpt4.common.logger import MetricLogger, SmoothedValue
from minigpt4.common.registry import registry
from minigpt4.datasets.data_utils import prepare_sample
class BaseTask:
def __init__(self, **kwargs):
super().__init__()
self.inst_id_key = "instance_id"
@classmethod
def setup_task(cls, **kwargs):
return cls()
def build_model(self, cfg):
model_config = cfg.model_cfg
model_cls = registry.get_model_class(model_config.arch)
return model_cls.from_config(model_config)
def build_datasets(self, cfg):
"""
Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'.
Download dataset and annotations automatically if not exist.
Args:
cfg (common.config.Config): _description_
Returns:
dict: Dictionary of torch.utils.data.Dataset objects by split.
"""
datasets = dict()
datasets_config = cfg.datasets_cfg
assert len(datasets_config) > 0, "At least one dataset has to be specified."
for name in datasets_config:
dataset_config = datasets_config[name]
builder = registry.get_builder_class(name)(dataset_config)
dataset = builder.build_datasets()
dataset['train'].name = name
if 'sample_ratio' in dataset_config:
dataset['train'].sample_ratio = dataset_config.sample_ratio
datasets[name] = dataset
return datasets
def train_step(self, model, samples):
loss = model(samples)["loss"]
return loss
def valid_step(self, model, samples):
raise NotImplementedError
def before_evaluation(self, model, dataset, **kwargs):
model.before_evaluation(dataset=dataset, task_type=type(self))
def after_evaluation(self, **kwargs):
pass
def inference_step(self):
raise NotImplementedError
def evaluation(self, model, data_loader, cuda_enabled=True):
metric_logger = MetricLogger(delimiter=" ")
header = "Evaluation"
# TODO make it configurable
print_freq = 10
results = []
for samples in metric_logger.log_every(data_loader, print_freq, header):
samples = prepare_sample(samples, cuda_enabled=cuda_enabled)
eval_output = self.valid_step(model=model, samples=samples)
results.extend(eval_output)
if is_dist_avail_and_initialized():
dist.barrier()
return results
def train_epoch(
self,
epoch,
model,
data_loader,
optimizer,
lr_scheduler,
scaler=None,
cuda_enabled=False,
log_freq=50,
accum_grad_iters=1,
):
return self._train_inner_loop(
epoch=epoch,
iters_per_epoch=lr_scheduler.iters_per_epoch,
model=model,
data_loader=data_loader,
optimizer=optimizer,
scaler=scaler,
lr_scheduler=lr_scheduler,
log_freq=log_freq,
cuda_enabled=cuda_enabled,
accum_grad_iters=accum_grad_iters,
)
def train_iters(
self,
epoch,
start_iters,
iters_per_inner_epoch,
model,
data_loader,
optimizer,
lr_scheduler,
scaler=None,
cuda_enabled=False,
log_freq=50,
accum_grad_iters=1,
):
return self._train_inner_loop(
epoch=epoch,
start_iters=start_iters,
iters_per_epoch=iters_per_inner_epoch,
model=model,
data_loader=data_loader,
optimizer=optimizer,
scaler=scaler,
lr_scheduler=lr_scheduler,
log_freq=log_freq,
cuda_enabled=cuda_enabled,
accum_grad_iters=accum_grad_iters,
)
def _train_inner_loop(
self,
epoch,
iters_per_epoch,
model,
data_loader,
optimizer,
lr_scheduler,
scaler=None,
start_iters=None,
log_freq=50,
cuda_enabled=False,
accum_grad_iters=1,
):
"""
An inner training loop compatible with both epoch-based and iter-based training.
When using epoch-based, training stops after one epoch; when using iter-based,
training stops after #iters_per_epoch iterations.
"""
use_amp = scaler is not None
if not hasattr(data_loader, "__next__"):
# convert to iterator if not already
data_loader = iter(data_loader)
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}"))
# if iter-based runner, schedule lr based on inner epoch.
logging.info(
"Start training epoch {}, {} iters per inner epoch.".format(
epoch, iters_per_epoch
)
)
header = "Train: data epoch: [{}]".format(epoch)
if start_iters is None:
# epoch-based runner
inner_epoch = epoch
else:
# In iter-based runner, we schedule the learning rate based on iterations.
inner_epoch = start_iters // iters_per_epoch
header = header + "; inner epoch [{}]".format(inner_epoch)
for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header):
# if using iter-based runner, we stop after iters_per_epoch iterations.
if i >= iters_per_epoch:
break
samples = next(data_loader)
samples = prepare_sample(samples, cuda_enabled=cuda_enabled)
samples.update(
{
"epoch": inner_epoch,
"num_iters_per_epoch": iters_per_epoch,
"iters": i,
}
)
lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i)
with torch.cuda.amp.autocast(enabled=use_amp):
loss = self.train_step(model=model, samples=samples)
# after_train_step()
if use_amp:
scaler.scale(loss).backward()
else:
loss.backward()
# update gradients every accum_grad_iters iterations
if (i + 1) % accum_grad_iters == 0:
if use_amp:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
optimizer.zero_grad()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# after train_epoch()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
logging.info("Averaged stats: " + str(metric_logger.global_avg()))
return {
k: "{:.3f}".format(meter.global_avg)
for k, meter in metric_logger.meters.items()
}
@staticmethod
def save_result(result, result_dir, filename, remove_duplicate=""):
import json
result_file = os.path.join(
result_dir, "%s_rank%d.json" % (filename, get_rank())
)
final_result_file = os.path.join(result_dir, "%s.json" % filename)
json.dump(result, open(result_file, "w"))
if is_dist_avail_and_initialized():
dist.barrier()
if is_main_process():
logging.warning("rank %d starts merging results." % get_rank())
# combine results from all processes
result = []
for rank in range(get_world_size()):
result_file = os.path.join(
result_dir, "%s_rank%d.json" % (filename, rank)
)
res = json.load(open(result_file, "r"))
result += res
if remove_duplicate:
result_new = []
id_list = []
for res in result:
if res[remove_duplicate] not in id_list:
id_list.append(res[remove_duplicate])
result_new.append(res)
result = result_new
json.dump(result, open(final_result_file, "w"))
print("result file saved to %s" % final_result_file)
return final_result_file
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/tasks/base_task.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from minigpt4.common.registry import registry
from minigpt4.tasks.base_task import BaseTask
from minigpt4.tasks.image_text_pretrain import ImageTextPretrainTask
def setup_task(cfg):
assert "task" in cfg.run_cfg, "Task name must be provided."
task_name = cfg.run_cfg.task
task = registry.get_task_class(task_name).setup_task(cfg=cfg)
assert task is not None, "Task {} not properly registered.".format(task_name)
return task
__all__ = [
"BaseTask",
"ImageTextPretrainTask",
]
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/tasks/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from minigpt4.common.registry import registry
from minigpt4.tasks.base_task import BaseTask
@registry.register_task("image_text_pretrain")
class ImageTextPretrainTask(BaseTask):
def __init__(self):
super().__init__()
def evaluation(self, model, data_loader, cuda_enabled=True):
pass
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/tasks/image_text_pretrain.py |
EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/__init__.py |
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import gzip
import logging
import os
import random as rnd
import tarfile
import zipfile
import random
from typing import List
from tqdm import tqdm
import decord
from decord import VideoReader
import webdataset as wds
import numpy as np
import torch
from torch.utils.data.dataset import IterableDataset
from minigpt4.common.registry import registry
from minigpt4.datasets.datasets.base_dataset import ConcatDataset
decord.bridge.set_bridge("torch")
MAX_INT = registry.get("MAX_INT")
class ChainDataset(wds.DataPipeline):
r"""Dataset for chaining multiple :class:`DataPipeline` s.
This class is useful to assemble different existing dataset streams. The
chaining operation is done on-the-fly, so concatenating large-scale
datasets with this class will be efficient.
Args:
datasets (iterable of IterableDataset): datasets to be chained together
"""
def __init__(self, datasets: List[wds.DataPipeline]) -> None:
super().__init__()
self.datasets = datasets
self.prob = []
self.names = []
for dataset in self.datasets:
if hasattr(dataset, 'name'):
self.names.append(dataset.name)
else:
self.names.append('Unknown')
if hasattr(dataset, 'sample_ratio'):
self.prob.append(dataset.sample_ratio)
else:
self.prob.append(1)
logging.info("One of the datapipeline doesn't define ratio and set to 1 automatically.")
def __iter__(self):
datastreams = [iter(dataset) for dataset in self.datasets]
while True:
select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0]
yield next(select_datastream)
def apply_to_sample(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
def prepare_sample(samples, cuda_enabled=True):
if cuda_enabled:
samples = move_to_cuda(samples)
# TODO fp16 support
return samples
def reorg_datasets_by_split(datasets):
"""
Organizes datasets by split.
Args:
datasets: dict of torch.utils.data.Dataset objects by name.
Returns:
Dict of datasets by split {split_name: List[Datasets]}.
"""
# if len(datasets) == 1:
# return datasets[list(datasets.keys())[0]]
# else:
reorg_datasets = dict()
# reorganize by split
for _, dataset in datasets.items():
for split_name, dataset_split in dataset.items():
if split_name not in reorg_datasets:
reorg_datasets[split_name] = [dataset_split]
else:
reorg_datasets[split_name].append(dataset_split)
return reorg_datasets
def concat_datasets(datasets):
"""
Concatenates multiple datasets into a single dataset.
It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support
generic IterableDataset because it requires creating separate samplers.
Now only supports conctenating training datasets and assuming validation and testing
have only a single dataset. This is because metrics should not be computed on the concatenated
datasets.
Args:
datasets: dict of torch.utils.data.Dataset objects by split.
Returns:
Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets,
"val" and "test" remain the same.
If the input training datasets contain both map-style and DataPipeline datasets, returns
a tuple, where the first element is a concatenated map-style dataset and the second
element is a chained DataPipeline dataset.
"""
# concatenate datasets in the same split
for split_name in datasets:
if split_name != "train":
assert (
len(datasets[split_name]) == 1
), "Do not support multiple {} datasets.".format(split_name)
datasets[split_name] = datasets[split_name][0]
else:
iterable_datasets, map_datasets = [], []
for dataset in datasets[split_name]:
if isinstance(dataset, wds.DataPipeline):
logging.info(
"Dataset {} is IterableDataset, can't be concatenated.".format(
dataset
)
)
iterable_datasets.append(dataset)
elif isinstance(dataset, IterableDataset):
raise NotImplementedError(
"Do not support concatenation of generic IterableDataset."
)
else:
map_datasets.append(dataset)
# if len(iterable_datasets) > 0:
# concatenate map-style datasets and iterable-style datasets separately
if len(iterable_datasets) > 1:
chained_datasets = (
ChainDataset(iterable_datasets)
)
elif len(iterable_datasets) == 1:
chained_datasets = iterable_datasets[0]
else:
chained_datasets = None
concat_datasets = (
ConcatDataset(map_datasets) if len(map_datasets) > 0 else None
)
train_datasets = concat_datasets, chained_datasets
train_datasets = tuple([x for x in train_datasets if x is not None])
train_datasets = (
train_datasets[0] if len(train_datasets) == 1 else train_datasets
)
datasets[split_name] = train_datasets
return datasets
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/data_utils.py |
"""
This file is from
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import shutil
import warnings
from omegaconf import OmegaConf
import torch.distributed as dist
from torchvision.datasets.utils import download_url
import minigpt4.common.utils as utils
from minigpt4.common.dist_utils import is_dist_avail_and_initialized, is_main_process
from minigpt4.common.registry import registry
from minigpt4.processors.base_processor import BaseProcessor
class BaseDatasetBuilder:
train_dataset_cls, eval_dataset_cls = None, None
def __init__(self, cfg=None):
super().__init__()
if cfg is None:
# help to create datasets from default config.
self.config = load_dataset_config(self.default_config_path())
elif isinstance(cfg, str):
self.config = load_dataset_config(cfg)
else:
# when called from task.build_dataset()
self.config = cfg
self.data_type = self.config.data_type
self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
def build_datasets(self):
# download, split, etc...
# only called on 1 GPU/TPU in distributed
if is_main_process():
self._download_data()
if is_dist_avail_and_initialized():
dist.barrier()
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
datasets = self.build() # dataset['train'/'val'/'test']
return datasets
def build_processors(self):
vis_proc_cfg = self.config.get("vis_processor")
txt_proc_cfg = self.config.get("text_processor")
if vis_proc_cfg is not None:
vis_train_cfg = vis_proc_cfg.get("train")
vis_eval_cfg = vis_proc_cfg.get("eval")
self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg)
self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg)
if txt_proc_cfg is not None:
txt_train_cfg = txt_proc_cfg.get("train")
txt_eval_cfg = txt_proc_cfg.get("eval")
self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg)
self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg)
@staticmethod
def _build_proc_from_cfg(cfg):
return (
registry.get_processor_class(cfg.name).from_config(cfg)
if cfg is not None
else None
)
@classmethod
def default_config_path(cls, type="default"):
return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])
def _download_data(self):
self._download_ann()
self._download_vis()
def _download_ann(self):
"""
Download annotation files if necessary.
All the vision-language datasets should have annotations of unified format.
storage_path can be:
(1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.
(2) basename/dirname: will be suffixed with base name of URL if dirname is provided.
Local annotation paths should be relative.
"""
anns = self.config.build_info.annotations
splits = anns.keys()
cache_root = registry.get_path("cache_root")
for split in splits:
info = anns[split]
urls, storage_paths = info.get("url", None), info.storage
if isinstance(urls, str):
urls = [urls]
if isinstance(storage_paths, str):
storage_paths = [storage_paths]
assert len(urls) == len(storage_paths)
for url_or_filename, storage_path in zip(urls, storage_paths):
# if storage_path is relative, make it full by prefixing with cache_root.
if not os.path.isabs(storage_path):
storage_path = os.path.join(cache_root, storage_path)
dirname = os.path.dirname(storage_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isfile(url_or_filename):
src, dst = url_or_filename, storage_path
if not os.path.exists(dst):
shutil.copyfile(src=src, dst=dst)
else:
logging.info("Using existing file {}.".format(dst))
else:
if os.path.isdir(storage_path):
# if only dirname is provided, suffix with basename of URL.
raise ValueError(
"Expecting storage_path to be a file path, got directory {}".format(
storage_path
)
)
else:
filename = os.path.basename(storage_path)
download_url(url=url_or_filename, root=dirname, filename=filename)
def _download_vis(self):
storage_path = self.config.build_info.get(self.data_type).storage
storage_path = utils.get_cache_path(storage_path)
if not os.path.exists(storage_path):
warnings.warn(
f"""
The specified path {storage_path} for visual inputs does not exist.
Please provide a correct path to the visual inputs or
refer to datasets/download_scripts/README.md for downloading instructions.
"""
)
def build(self):
"""
Create by split datasets inheriting torch.utils.data.Datasets.
# build() can be dataset-specific. Overwrite to customize.
"""
self.build_processors()
build_info = self.config.build_info
ann_info = build_info.annotations
vis_info = build_info.get(self.data_type)
datasets = dict()
for split in ann_info.keys():
if split not in ["train", "val", "test"]:
continue
is_train = split == "train"
# processors
vis_processor = (
self.vis_processors["train"]
if is_train
else self.vis_processors["eval"]
)
text_processor = (
self.text_processors["train"]
if is_train
else self.text_processors["eval"]
)
# annotation path
ann_paths = ann_info.get(split).storage
if isinstance(ann_paths, str):
ann_paths = [ann_paths]
abs_ann_paths = []
for ann_path in ann_paths:
if not os.path.isabs(ann_path):
ann_path = utils.get_cache_path(ann_path)
abs_ann_paths.append(ann_path)
ann_paths = abs_ann_paths
# visual data storage path
vis_path = os.path.join(vis_info.storage, split)
if not os.path.isabs(vis_path):
# vis_path = os.path.join(utils.get_cache_path(), vis_path)
vis_path = utils.get_cache_path(vis_path)
if not os.path.exists(vis_path):
warnings.warn("storage path {} does not exist.".format(vis_path))
# create datasets
dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
datasets[split] = dataset_cls(
vis_processor=vis_processor,
text_processor=text_processor,
ann_paths=ann_paths,
vis_root=vis_path,
)
return datasets
def load_dataset_config(cfg_path):
cfg = OmegaConf.load(cfg_path).datasets
cfg = cfg[list(cfg.keys())[0]]
return cfg
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/builders/base_dataset_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from minigpt4.datasets.builders.base_dataset_builder import load_dataset_config
from minigpt4.datasets.builders.image_text_pair_builder import (
CCSBUBuilder,
LaionBuilder,
CCSBUAlignBuilder
)
from minigpt4.common.registry import registry
__all__ = [
"CCSBUBuilder",
"LaionBuilder",
"CCSBUAlignBuilder"
]
def load_dataset(name, cfg_path=None, vis_path=None, data_type=None):
"""
Example
>>> dataset = load_dataset("coco_caption", cfg=None)
>>> splits = dataset.keys()
>>> print([len(dataset[split]) for split in splits])
"""
if cfg_path is None:
cfg = None
else:
cfg = load_dataset_config(cfg_path)
try:
builder = registry.get_builder_class(name)(cfg)
except TypeError:
print(
f"Dataset {name} not found. Available datasets:\n"
+ ", ".join([str(k) for k in dataset_zoo.get_names()])
)
exit(1)
if vis_path is not None:
if data_type is None:
# use default data type in the config
data_type = builder.config.data_type
assert (
data_type in builder.config.build_info
), f"Invalid data_type {data_type} for {name}."
builder.config.build_info.get(data_type).storage = vis_path
dataset = builder.build_datasets()
return dataset
class DatasetZoo:
def __init__(self) -> None:
self.dataset_zoo = {
k: list(v.DATASET_CONFIG_DICT.keys())
for k, v in sorted(registry.mapping["builder_name_mapping"].items())
}
def get_names(self):
return list(self.dataset_zoo.keys())
dataset_zoo = DatasetZoo()
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/builders/__init__.py |
import os
import logging
import warnings
from minigpt4.common.registry import registry
from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from minigpt4.datasets.datasets.laion_dataset import LaionDataset
from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset
@registry.register_builder("cc_sbu")
class CCSBUBuilder(BaseDatasetBuilder):
train_dataset_cls = CCSBUDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
datasets = dict()
split = "train"
# create datasets
# [NOTE] return inner_datasets (wds.DataPipeline)
dataset_cls = self.train_dataset_cls
datasets[split] = dataset_cls(
vis_processor=self.vis_processors[split],
text_processor=self.text_processors[split],
location=build_info.storage,
).inner_dataset
return datasets
@registry.register_builder("laion")
class LaionBuilder(BaseDatasetBuilder):
train_dataset_cls = LaionDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
datasets = dict()
split = "train"
# create datasets
# [NOTE] return inner_datasets (wds.DataPipeline)
dataset_cls = self.train_dataset_cls
datasets[split] = dataset_cls(
vis_processor=self.vis_processors[split],
text_processor=self.text_processors[split],
location=build_info.storage,
).inner_dataset
return datasets
@registry.register_builder("cc_sbu_align")
class CCSBUAlignBuilder(BaseDatasetBuilder):
train_dataset_cls = CCSBUAlignDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/cc_sbu/align.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
storage_path = build_info.storage
datasets = dict()
if not os.path.exists(storage_path):
warnings.warn("storage path {} does not exist.".format(storage_path))
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
vis_processor=self.vis_processors["train"],
text_processor=self.text_processors["train"],
ann_paths=[os.path.join(storage_path, 'filter_cap.json')],
vis_root=os.path.join(storage_path, 'image'),
)
return datasets
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/builders/image_text_pair_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import webdataset as wds
from minigpt4.datasets.datasets.base_dataset import BaseDataset
class LaionDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, location):
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
self.inner_dataset = wds.DataPipeline(
wds.ResampledShards(location),
wds.tarfile_to_samples(handler=wds.warn_and_continue),
wds.shuffle(1000, handler=wds.warn_and_continue),
wds.decode("pilrgb", handler=wds.warn_and_continue),
wds.to_tuple("jpg", "json", handler=wds.warn_and_continue),
wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),
wds.map(self.to_dict, handler=wds.warn_and_continue),
)
def to_dict(self, sample):
return {
"image": sample[0],
"text_input": self.text_processor(sample[1]["caption"]),
}
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/datasets/laion_dataset.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
from typing import Iterable
from torch.utils.data import Dataset, ConcatDataset
from torch.utils.data.dataloader import default_collate
class BaseDataset(Dataset):
def __init__(
self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[]
):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = []
for ann_path in ann_paths:
self.annotation.extend(json.load(open(ann_path, "r"))['annotations'])
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __len__(self):
return len(self.annotation)
def collater(self, samples):
return default_collate(samples)
def set_processors(self, vis_processor, text_processor):
self.vis_processor = vis_processor
self.text_processor = text_processor
def _add_instance_ids(self, key="instance_id"):
for idx, ann in enumerate(self.annotation):
ann[key] = str(idx)
class ConcatDataset(ConcatDataset):
def __init__(self, datasets: Iterable[Dataset]) -> None:
super().__init__(datasets)
def collater(self, samples):
# TODO For now only supports datasets with same underlying collater implementations
all_keys = set()
for s in samples:
all_keys.update(s)
shared_keys = all_keys
for s in samples:
shared_keys = shared_keys & set(s.keys())
samples_shared_keys = []
for s in samples:
samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys})
return self.datasets[0].collater(samples_shared_keys)
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/datasets/base_dataset.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import time
import random
import torch
from minigpt4.datasets.data_utils import move_to_cuda
from torch.utils.data import DataLoader
class MultiIterLoader:
"""
A simple wrapper for iterating over multiple iterators.
Args:
loaders (List[Loader]): List of Iterator loaders.
ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly.
"""
def __init__(self, loaders, ratios=None):
# assert all loaders has __next__ method
for loader in loaders:
assert hasattr(
loader, "__next__"
), "Loader {} has no __next__ method.".format(loader)
if ratios is None:
ratios = [1.0] * len(loaders)
else:
assert len(ratios) == len(loaders)
ratios = [float(ratio) / sum(ratios) for ratio in ratios]
self.loaders = loaders
self.ratios = ratios
def __next__(self):
# random sample from each loader by ratio
loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0]
return next(self.loaders[loader_idx])
class PrefetchLoader(object):
"""
Modified from https://github.com/ChenRocks/UNITER.
overlap compute and cuda data transfer
(copied and then modified from nvidia apex)
"""
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while batch is not None:
is_tuple = isinstance(batch, tuple)
if is_tuple:
task, batch = batch
if is_tuple:
yield task, batch
else:
yield batch
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
# if record_stream() doesn't work, another option is to make sure
# device inputs are created on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input,
# device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target,
# device='cuda')
# Need to make sure the memory allocated for next_* is not still in use
# by the main stream at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this
# side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is not None:
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method
def record_cuda_stream(batch):
if isinstance(batch, torch.Tensor):
batch.record_stream(torch.cuda.current_stream())
elif isinstance(batch, list) or isinstance(batch, tuple):
for t in batch:
record_cuda_stream(t)
elif isinstance(batch, dict):
for t in batch.values():
record_cuda_stream(t)
else:
pass
class IterLoader:
"""
A wrapper to convert DataLoader as an infinite iterator.
Modified from:
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py
"""
def __init__(self, dataloader: DataLoader, use_distributed: bool = False):
self._dataloader = dataloader
self.iter_loader = iter(self._dataloader)
self._use_distributed = use_distributed
self._epoch = 0
@property
def epoch(self) -> int:
return self._epoch
def __next__(self):
try:
data = next(self.iter_loader)
except StopIteration:
self._epoch += 1
if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed:
self._dataloader.sampler.set_epoch(self._epoch)
time.sleep(2) # Prevent possible deadlock during epoch transition
self.iter_loader = iter(self._dataloader)
data = next(self.iter_loader)
return data
def __iter__(self):
return self
def __len__(self):
return len(self._dataloader)
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/datasets/dataloader_utils.py |
EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/datasets/__init__.py |
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from minigpt4.datasets.datasets.base_dataset import BaseDataset
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"caption": ann["caption"],
"image": sample["image"],
}
)
class CaptionDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["image_id"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
# TODO this assumes image input, not general enough
ann = self.annotation[index]
img_file = '{:0>12}.jpg'.format(ann["image_id"])
image_path = os.path.join(self.vis_root, img_file)
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = self.text_processor(ann["caption"])
return {
"image": image,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
}
class CaptionEvalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
return {
"image": image,
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/datasets/caption_datasets.py |
import os
from PIL import Image
import webdataset as wds
from minigpt4.datasets.datasets.base_dataset import BaseDataset
from minigpt4.datasets.datasets.caption_datasets import CaptionDataset
class CCSBUDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, location):
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
self.inner_dataset = wds.DataPipeline(
wds.ResampledShards(location),
wds.tarfile_to_samples(handler=wds.warn_and_continue),
wds.shuffle(1000, handler=wds.warn_and_continue),
wds.decode("pilrgb", handler=wds.warn_and_continue),
wds.to_tuple("jpg", "json", handler=wds.warn_and_continue),
wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),
wds.map(self.to_dict, handler=wds.warn_and_continue),
)
def to_dict(self, sample):
return {
"image": sample[0],
"text_input": self.text_processor(sample[1]["caption"]),
}
class CCSBUAlignDataset(CaptionDataset):
def __getitem__(self, index):
# TODO this assumes image input, not general enough
ann = self.annotation[index]
img_file = '{}.jpg'.format(ann["image_id"])
image_path = os.path.join(self.vis_root, img_file)
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = ann["caption"]
return {
"image": image,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
} | EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/datasets/datasets/cc_sbu_dataset.py |
import logging
import random
import torch
from torch.cuda.amp import autocast as autocast
import torch.nn as nn
from minigpt4.common.registry import registry
from minigpt4.models.blip2 import Blip2Base, disabled_train
from minigpt4.models.modeling_llama import LlamaForCausalLM
from transformers import LlamaTokenizer
@registry.register_model("mini_gpt4")
class MiniGPT4(Blip2Base):
"""
BLIP2 GPT-LLAMA model.
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"pretrain_vicuna": "configs/models/minigpt4.yaml",
}
def __init__(
self,
vit_model="eva_clip_g",
q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth",
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
freeze_qformer=True,
num_query_token=32,
llama_model="",
prompt_path="",
prompt_template="",
max_txt_len=32,
low_resource=False, # use 8 bit and put vit in cpu
end_sym='\n',
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.low_resource = low_resource
print('Loading VIT')
self.visual_encoder, self.ln_vision = self.init_vision_encoder(
vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision
)
if freeze_vit:
for name, param in self.visual_encoder.named_parameters():
param.requires_grad = False
self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
for name, param in self.ln_vision.named_parameters():
param.requires_grad = False
self.ln_vision = self.ln_vision.eval()
self.ln_vision.train = disabled_train
logging.info("freeze vision encoder")
print('Loading VIT Done')
print('Loading Q-Former')
self.Qformer, self.query_tokens = self.init_Qformer(
num_query_token, self.visual_encoder.num_features
)
self.Qformer.cls = None
self.Qformer.bert.embeddings.word_embeddings = None
self.Qformer.bert.embeddings.position_embeddings = None
for layer in self.Qformer.bert.encoder.layer:
layer.output = None
layer.intermediate = None
self.load_from_pretrained(url_or_filename=q_former_model)
if freeze_qformer:
for name, param in self.Qformer.named_parameters():
param.requires_grad = False
self.Qformer = self.Qformer.eval()
self.Qformer.train = disabled_train
self.query_tokens.requires_grad = False
logging.info("freeze Qformer")
print('Loading Q-Former Done')
print('Loading LLAMA')
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model, use_fast=False)
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
if self.low_resource:
self.llama_model = LlamaForCausalLM.from_pretrained(
llama_model,
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto"
)
else:
self.llama_model = LlamaForCausalLM.from_pretrained(
llama_model,
torch_dtype=torch.float16,
)
for name, param in self.llama_model.named_parameters():
param.requires_grad = False
print('Loading LLAMA Done')
self.llama_proj = nn.Linear(
self.Qformer.config.hidden_size, self.llama_model.config.hidden_size
)
self.max_txt_len = max_txt_len
self.end_sym = end_sym
if prompt_path:
with open(prompt_path, 'r') as f:
raw_prompts = f.read().splitlines()
filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "<ImageHere>" in raw_prompt]
self.prompt_list = [prompt_template.format(p) for p in filted_prompts]
print('Load {} training prompts'.format(len(self.prompt_list)))
print('Prompt Example \n{}'.format(random.choice(self.prompt_list)))
else:
self.prompt_list = []
def vit_to_cpu(self):
self.ln_vision.to("cpu")
self.ln_vision.float()
self.visual_encoder.to("cpu")
self.visual_encoder.float()
def encode_img(self, image):
device = image.device
if self.low_resource:
self.vit_to_cpu()
image = image.to("cpu")
with self.maybe_autocast():
image_embeds = self.ln_vision(self.visual_encoder(image)).to(device)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
inputs_llama = self.llama_proj(query_output.last_hidden_state)
atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device)
return inputs_llama, atts_llama
def prompt_wrap(self, img_embeds, atts_img, prompt):
if prompt:
batch_size = img_embeds.shape[0]
p_before, p_after = prompt.split('<ImageHere>')
p_before_tokens = self.llama_tokenizer(
p_before, return_tensors="pt", add_special_tokens=False).to(img_embeds.device)
p_after_tokens = self.llama_tokenizer(
p_after, return_tensors="pt", add_special_tokens=False).to(img_embeds.device)
p_before_embeds = self.llama_model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1)
p_after_embeds = self.llama_model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1)
wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds, p_after_embeds], dim=1)
wrapped_atts_img = atts_img[:, :1].expand(-1, wrapped_img_embeds.shape[1])
return wrapped_img_embeds, wrapped_atts_img
else:
return img_embeds, atts_img
def forward(self, samples):
image = samples["image"]
img_embeds, atts_img = self.encode_img(image)
if hasattr(samples, 'question_split'): # VQA dataset
print('VQA Batch')
vqa_prompt = '###Human: <Img><ImageHere></Img> '
img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, vqa_prompt)
elif self.prompt_list:
prompt = random.choice(self.prompt_list)
img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompt)
self.llama_tokenizer.padding_side = "right"
text = [t + self.end_sym for t in samples["text_input"]]
to_regress_tokens = self.llama_tokenizer(
text,
return_tensors="pt",
padding="longest",
truncation=True,
max_length=self.max_txt_len,
add_special_tokens=False
).to(image.device)
targets = to_regress_tokens.input_ids.masked_fill(
to_regress_tokens.input_ids == self.llama_tokenizer.pad_token_id, -100
)
empty_targets = (
torch.ones([atts_img.shape[0], atts_img.shape[1]+1],
dtype=torch.long).to(image.device).fill_(-100) # plus one for bos
)
targets = torch.cat([empty_targets, targets], dim=1)
batch_size = img_embeds.shape[0]
bos = torch.ones([batch_size, 1],
dtype=to_regress_tokens.input_ids.dtype,
device=to_regress_tokens.input_ids.device) * self.llama_tokenizer.bos_token_id
bos_embeds = self.llama_model.model.embed_tokens(bos)
atts_bos = atts_img[:, :1]
to_regress_embeds = self.llama_model.model.embed_tokens(to_regress_tokens.input_ids)
inputs_embeds = torch.cat([bos_embeds, img_embeds, to_regress_embeds], dim=1)
attention_mask = torch.cat([atts_bos, atts_img, to_regress_tokens.attention_mask], dim=1)
with self.maybe_autocast():
outputs = self.llama_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
return_dict=True,
labels=targets,
)
loss = outputs.loss
return {"loss": loss}
@classmethod
def from_config(cls, cfg):
vit_model = cfg.get("vit_model", "eva_clip_g")
q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth")
img_size = cfg.get("image_size")
num_query_token = cfg.get("num_query_token")
llama_model = cfg.get("llama_model")
drop_path_rate = cfg.get("drop_path_rate", 0)
use_grad_checkpoint = cfg.get("use_grad_checkpoint", False)
vit_precision = cfg.get("vit_precision", "fp16")
freeze_vit = cfg.get("freeze_vit", True)
freeze_qformer = cfg.get("freeze_qformer", True)
low_resource = cfg.get("low_resource", False)
prompt_path = cfg.get("prompt_path", "")
prompt_template = cfg.get("prompt_template", "")
max_txt_len = cfg.get("max_txt_len", 32)
end_sym = cfg.get("end_sym", '\n')
model = cls(
vit_model=vit_model,
q_former_model=q_former_model,
img_size=img_size,
drop_path_rate=drop_path_rate,
use_grad_checkpoint=use_grad_checkpoint,
vit_precision=vit_precision,
freeze_vit=freeze_vit,
freeze_qformer=freeze_qformer,
num_query_token=num_query_token,
llama_model=llama_model,
prompt_path=prompt_path,
prompt_template=prompt_template,
max_txt_len=max_txt_len,
low_resource=low_resource,
end_sym=end_sym
)
ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4
if ckpt_path:
print("Load BLIP2-LLM Checkpoint: {}".format(ckpt_path))
ckpt = torch.load(ckpt_path, map_location="cpu")
msg = model.load_state_dict(ckpt['model'], strict=False)
return model
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/mini_gpt4.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.modeling_outputs import (
ModelOutput,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
)
@dataclass
class BlipSimilarity(ModelOutput):
sim_i2t: torch.FloatTensor = None
sim_t2i: torch.FloatTensor = None
sim_i2t_m: Optional[torch.FloatTensor] = None
sim_t2i_m: Optional[torch.FloatTensor] = None
sim_i2t_targets: Optional[torch.FloatTensor] = None
sim_t2i_targets: Optional[torch.FloatTensor] = None
@dataclass
class BlipIntermediateOutput(ModelOutput):
"""
Data class for intermediate outputs of BLIP models.
image_embeds (torch.FloatTensor): Image embeddings, shape (batch_size, num_patches, embed_dim).
text_embeds (torch.FloatTensor): Text embeddings, shape (batch_size, seq_len, embed_dim).
image_embeds_m (torch.FloatTensor): Image embeddings from momentum visual encoder, shape (batch_size, num_patches, embed_dim).
text_embeds_m (torch.FloatTensor): Text embeddings from momentum text encoder, shape (batch_size, seq_len, embed_dim).
encoder_output (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder.
encoder_output_neg (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder for negative pairs.
decoder_output (CausalLMOutputWithCrossAttentions): output from the image-grounded text decoder.
decoder_labels (torch.LongTensor): labels for the captioning loss.
itm_logits (torch.FloatTensor): logits for the image-text matching loss, shape (batch_size * 3, 2).
itm_labels (torch.LongTensor): labels for the image-text matching loss, shape (batch_size * 3,)
"""
# uni-modal features
image_embeds: torch.FloatTensor = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds_m: Optional[torch.FloatTensor] = None
text_embeds_m: Optional[torch.FloatTensor] = None
# intermediate outputs of multimodal encoder
encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
itm_logits: Optional[torch.FloatTensor] = None
itm_labels: Optional[torch.LongTensor] = None
# intermediate outputs of multimodal decoder
decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None
decoder_labels: Optional[torch.LongTensor] = None
@dataclass
class BlipOutput(ModelOutput):
# some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional.
sims: Optional[BlipSimilarity] = None
intermediate_output: BlipIntermediateOutput = None
loss: Optional[torch.FloatTensor] = None
loss_itc: Optional[torch.FloatTensor] = None
loss_itm: Optional[torch.FloatTensor] = None
loss_lm: Optional[torch.FloatTensor] = None
@dataclass
class BlipOutputFeatures(ModelOutput):
"""
Data class of features from BlipFeatureExtractor.
Args:
image_embeds: (torch.FloatTensor) of shape (batch_size, num_patches+1, embed_dim), optional
image_features: (torch.FloatTensor) of shape (batch_size, num_patches+1, feature_dim), optional
text_embeds: (torch.FloatTensor) of shape (batch_size, sequence_length+1, embed_dim), optional
text_features: (torch.FloatTensor) of shape (batch_size, sequence_length+1, feature_dim), optional
The first embedding or feature is for the [CLS] token.
Features are obtained by projecting the corresponding embedding into a normalized low-dimensional space.
"""
image_embeds: Optional[torch.FloatTensor] = None
image_embeds_proj: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
text_embeds_proj: Optional[torch.FloatTensor] = None
multimodal_embeds: Optional[torch.FloatTensor] = None
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/blip2_outputs.py |
# Based on EVA, BEIT, timm and DeiT code bases
# https://github.com/baaivision/EVA
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from minigpt4.common.dist_utils import download_cached_file
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None and init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
use_mean_pooling=True, init_scale=0.001, use_checkpoint=False):
super().__init__()
self.image_size = img_size
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
self.use_checkpoint = use_checkpoint
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
# self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
# self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
# self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
# if isinstance(self.head, nn.Linear):
# trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
# if isinstance(self.head, nn.Linear):
# self.head.weight.data.mul_(init_scale)
# self.head.bias.data.mul_(init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, rel_pos_bias)
else:
x = blk(x, rel_pos_bias)
return x
# x = self.norm(x)
# if self.fc_norm is not None:
# t = x[:, 1:, :]
# return self.fc_norm(t.mean(1))
# else:
# return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
def get_intermediate_layers(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
features = []
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias)
features.append(x)
return features
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed'].float()
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
def convert_weights_to_fp16(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
# if isinstance(l, (nn.MultiheadAttention, Attention)):
# for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
# tensor = getattr(l, attr)
# if tensor is not None:
# tensor.data = tensor.data.half()
model.apply(_convert_weights_to_fp16)
def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
img_size=img_size,
patch_size=14,
use_mean_pooling=False,
embed_dim=1408,
depth=39,
num_heads=1408//88,
mlp_ratio=4.3637,
qkv_bias=True,
drop_path_rate=drop_path_rate,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
use_checkpoint=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth"
cached_file = download_cached_file(
url, check_hash=False, progress=True
)
state_dict = torch.load(cached_file, map_location="cpu")
interpolate_pos_embed(model,state_dict)
incompatible_keys = model.load_state_dict(state_dict, strict=False)
# print(incompatible_keys)
if precision == "fp16":
# model.to("cuda")
convert_weights_to_fp16(model)
return model | EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/eva_vit.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import torch
from omegaconf import OmegaConf
from minigpt4.common.registry import registry
from minigpt4.models.base_model import BaseModel
from minigpt4.models.blip2 import Blip2Base
from minigpt4.models.mini_gpt4 import MiniGPT4
from minigpt4.processors.base_processor import BaseProcessor
__all__ = [
"load_model",
"BaseModel",
"Blip2Base",
"MiniGPT4",
]
def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None):
"""
Load supported models.
To list all available models and types in registry:
>>> from minigpt4.models import model_zoo
>>> print(model_zoo)
Args:
name (str): name of the model.
model_type (str): type of the model.
is_eval (bool): whether the model is in eval mode. Default: False.
device (str): device to use. Default: "cpu".
checkpoint (str): path or to checkpoint. Default: None.
Note that expecting the checkpoint to have the same keys in state_dict as the model.
Returns:
model (torch.nn.Module): model.
"""
model = registry.get_model_class(name).from_pretrained(model_type=model_type)
if checkpoint is not None:
model.load_checkpoint(checkpoint)
if is_eval:
model.eval()
if device == "cpu":
model = model.float()
return model.to(device)
def load_preprocess(config):
"""
Load preprocessor configs and construct preprocessors.
If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing.
Args:
config (dict): preprocessor configs.
Returns:
vis_processors (dict): preprocessors for visual inputs.
txt_processors (dict): preprocessors for text inputs.
Key is "train" or "eval" for processors used in training and evaluation respectively.
"""
def _build_proc_from_cfg(cfg):
return (
registry.get_processor_class(cfg.name).from_config(cfg)
if cfg is not None
else BaseProcessor()
)
vis_processors = dict()
txt_processors = dict()
vis_proc_cfg = config.get("vis_processor")
txt_proc_cfg = config.get("text_processor")
if vis_proc_cfg is not None:
vis_train_cfg = vis_proc_cfg.get("train")
vis_eval_cfg = vis_proc_cfg.get("eval")
else:
vis_train_cfg = None
vis_eval_cfg = None
vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg)
vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg)
if txt_proc_cfg is not None:
txt_train_cfg = txt_proc_cfg.get("train")
txt_eval_cfg = txt_proc_cfg.get("eval")
else:
txt_train_cfg = None
txt_eval_cfg = None
txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg)
txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg)
return vis_processors, txt_processors
def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"):
"""
Load model and its related preprocessors.
List all available models and types in registry:
>>> from minigpt4.models import model_zoo
>>> print(model_zoo)
Args:
name (str): name of the model.
model_type (str): type of the model.
is_eval (bool): whether the model is in eval mode. Default: False.
device (str): device to use. Default: "cpu".
Returns:
model (torch.nn.Module): model.
vis_processors (dict): preprocessors for visual inputs.
txt_processors (dict): preprocessors for text inputs.
"""
model_cls = registry.get_model_class(name)
# load model
model = model_cls.from_pretrained(model_type=model_type)
if is_eval:
model.eval()
# load preprocess
cfg = OmegaConf.load(model_cls.default_config_path(model_type))
if cfg is not None:
preprocess_cfg = cfg.preprocess
vis_processors, txt_processors = load_preprocess(preprocess_cfg)
else:
vis_processors, txt_processors = None, None
logging.info(
f"""No default preprocess for model {name} ({model_type}).
This can happen if the model is not finetuned on downstream datasets,
or it is not intended for direct use without finetuning.
"""
)
if device == "cpu" or device == torch.device("cpu"):
model = model.float()
return model.to(device), vis_processors, txt_processors
class ModelZoo:
"""
A utility class to create string representation of available model architectures and types.
>>> from minigpt4.models import model_zoo
>>> # list all available models
>>> print(model_zoo)
>>> # show total number of models
>>> print(len(model_zoo))
"""
def __init__(self) -> None:
self.model_zoo = {
k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys())
for k, v in registry.mapping["model_name_mapping"].items()
}
def __str__(self) -> str:
return (
"=" * 50
+ "\n"
+ f"{'Architectures':<30} {'Types'}\n"
+ "=" * 50
+ "\n"
+ "\n".join(
[
f"{name:<30} {', '.join(types)}"
for name, types in self.model_zoo.items()
]
)
)
def __iter__(self):
return iter(self.model_zoo.items())
def __len__(self):
return sum([len(v) for v in self.model_zoo.values()])
model_zoo = ModelZoo()
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from minigpt4.common.dist_utils import download_cached_file, is_dist_avail_and_initialized
from minigpt4.common.utils import get_abs_path, is_url
from omegaconf import OmegaConf
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[0].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
def load_checkpoint_from_config(self, cfg, **kwargs):
"""
Load checkpoint as specified in the config file.
If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.
When loading the pretrained model, each task-specific architecture may define their
own load_from_pretrained() method.
"""
load_finetuned = cfg.get("load_finetuned", True)
if load_finetuned:
finetune_path = cfg.get("finetuned", None)
assert (
finetune_path is not None
), "Found load_finetuned is True, but finetune_path is None."
self.load_checkpoint(url_or_filename=finetune_path)
else:
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
assert "Found load_finetuned is False, but pretrain_path is None."
self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)
def before_evaluation(self, **kwargs):
pass
def show_n_params(self, return_str=True):
tot = 0
for p in self.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return "{:.1f}M".format(tot / 1e6)
else:
return "{:.1f}K".format(tot / 1e3)
else:
return tot
class BaseEncoder(nn.Module):
"""
Base class for primitive encoders, such as ViT, TimeSformer, etc.
"""
def __init__(self):
super().__init__()
def forward_features(self, samples, **kwargs):
raise NotImplementedError
@property
def device(self):
return list(self.parameters())[0].device
class SharedQueueMixin:
@torch.no_grad()
def _dequeue_and_enqueue(self, image_feat, text_feat, idxs=None):
# gather keys before updating queue
image_feats = concat_all_gather(image_feat)
text_feats = concat_all_gather(text_feat)
batch_size = image_feats.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.image_queue[:, ptr : ptr + batch_size] = image_feats.T
self.text_queue[:, ptr : ptr + batch_size] = text_feats.T
if idxs is not None:
idxs = concat_all_gather(idxs)
self.idx_queue[:, ptr : ptr + batch_size] = idxs.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr
class MomentumDistilationMixin:
@torch.no_grad()
def copy_params(self):
for model_pair in self.model_pairs:
for param, param_m in zip(
model_pair[0].parameters(), model_pair[1].parameters()
):
param_m.data.copy_(param.data) # initialize
param_m.requires_grad = False # not update by gradient
@torch.no_grad()
def _momentum_update(self):
for model_pair in self.model_pairs:
for param, param_m in zip(
model_pair[0].parameters(), model_pair[1].parameters()
):
param_m.data = param_m.data * self.momentum + param.data * (
1.0 - self.momentum
)
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [
torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
torch.distributed.all_reduce(all_gradients)
return all_gradients[torch.distributed.get_rank()]
def all_gather_with_grad(tensors):
"""
Performs all_gather operation on the provided tensors.
Graph remains connected for backward grad computation.
"""
# Queue the gathered tensors
world_size = torch.distributed.get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
# tensor_all = GatherLayer.apply(tensors)
tensor_all = GatherLayer.apply(tensors)
return torch.cat(tensor_all, dim=0)
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
# if use distributed training
if not is_dist_avail_and_initialized():
return tensor
tensors_gather = [
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def tile(x, dim, n_tile):
init_dim = x.size(dim)
repeat_idx = [1] * x.dim()
repeat_idx[dim] = n_tile
x = x.repeat(*(repeat_idx))
order_index = torch.LongTensor(
np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])
)
return torch.index_select(x, dim, order_index.to(x.device))
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/base_model.py |
# This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
""" PyTorch LLaMA model."""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from transformers.models.llama.configuration_llama import LlamaConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LlamaConfig"
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class LlamaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class LlamaRotaryEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
self.register_buffer("inv_freq", inv_freq)
# Build here to make `torch.jit.trace` work.
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class LlamaMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
class LlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: LlamaConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.max_position_embeddings = config.max_position_embeddings
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
class LlamaDecoderLayer(nn.Module):
def __init__(self, config: LlamaConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = LlamaAttention(config=config)
self.mlp = LlamaMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
)
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LlamaConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class LlamaPreTrainedModel(PreTrainedModel):
config_class = LlamaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LlamaModel):
module.gradient_checkpointing = value
LLAMA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class LlamaModel(LlamaPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if query_embeds is not None:
inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1)
batch_size, seq_length, _ = inputs_embeds.shape
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
)
hidden_states = inputs_embeds
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, None)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
position_ids,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class LlamaForCausalLM(LlamaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model = LlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
query_embeds=query_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
query_embeds = None
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"query_embeds": query_embeds,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/modeling_llama.py |
"""
* Copyright (c) 2023, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By Junnan Li
* Based on huggingface code base
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
"""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Dict, Any
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
self.config = config
def forward(
self,
input_ids=None,
position_ids=None,
query_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
].clone()
if input_ids is not None:
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
if query_embeds is not None:
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
embeddings = query_embeds
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, "embedding_size"
):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * config.max_position_embeddings - 1, self.attention_head_size
)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
mixed_query_layer = self.query(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(
distance + self.max_position_embeddings - 1
)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores
+ relative_position_scores_query
+ relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (
(context_layer, attention_probs) if output_attentions else (context_layer,)
)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads,
self.self.num_attention_heads,
self.self.attention_head_size,
self.pruned_heads,
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = (
self.self.attention_head_size * self.self.num_attention_heads
)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.layer_num = layer_num
if (
self.config.add_cross_attention
and layer_num % self.config.cross_attention_freq == 0
):
self.crossattention = BertAttention(
config, is_cross_attention=self.config.add_cross_attention
)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.intermediate_query = BertIntermediate(config)
self.output_query = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
query_length=0,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if query_length > 0:
query_attention_output = attention_output[:, :query_length, :]
if self.has_cross_attention:
assert (
encoder_hidden_states is not None
), "encoder_hidden_states must be given for cross-attention layers"
cross_attention_outputs = self.crossattention(
query_attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
query_attention_output = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:-1]
) # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk_query,
self.chunk_size_feed_forward,
self.seq_len_dim,
query_attention_output,
)
if attention_output.shape[1] > query_length:
layer_output_text = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[:, query_length:, :],
)
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
else:
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config, i) for i in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
next_decoder_cache = () if use_cache else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(
*inputs, past_key_value, output_attentions, query_length
)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
query_length,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=False):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: Tuple[int],
device: device,
is_decoder: bool,
has_query: bool = False,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
if has_query: # UniLM style attention mask
causal_mask = torch.cat(
[
torch.zeros(
(batch_size, prefix_seq_len, seq_length),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=1,
)
causal_mask = torch.cat(
[
torch.ones(
(batch_size, causal_mask.shape[1], prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# use_cache = use_cache if use_cache is not None else self.config.use_cache
if input_ids is None:
assert (
query_embeds is not None
), "You have to specify query_embeds when input_ids is None"
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] - self.config.query_length
if past_key_values is not None
else 0
)
query_length = query_embeds.shape[1] if query_embeds is not None else 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
query_embeds=query_embeds,
past_key_values_length=past_key_values_length,
)
input_shape = embedding_output.size()[:-1]
batch_size, seq_length = input_shape
device = embedding_output.device
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if is_decoder:
extended_attention_mask = self.get_extended_attention_mask(
attention_mask,
input_ids.shape,
device,
is_decoder,
has_query=(query_embeds is not None),
)
else:
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device, is_decoder
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
0
].size()
else:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [
self.invert_attention_mask(mask) for mask in encoder_attention_mask
]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
query_length=query_length,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=True,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=True,
reduction="mean",
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if labels is not None:
use_cache = False
if past_key_values is not None:
query_embeds = None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
query_embeds=query_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
sequence_output = outputs[0]
if query_embeds is not None:
sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
if reduction == "none":
lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(
self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
query_mask = input_ids.new_ones(query_embeds.shape[:-1])
attention_mask = torch.cat([query_mask, attention_mask], dim=-1)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"query_embeds": query_embeds,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
query_embeds=query_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
if query_embeds is not None:
sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (
((masked_lm_loss,) + output) if masked_lm_loss is not None else output
)
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/Qformer.py |
"""
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import contextlib
import logging
import os
import time
import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import minigpt4.common.dist_utils as dist_utils
from minigpt4.common.dist_utils import download_cached_file
from minigpt4.common.utils import is_url
from minigpt4.common.logger import MetricLogger
from minigpt4.models.base_model import BaseModel
from minigpt4.models.Qformer import BertConfig, BertLMHeadModel
from minigpt4.models.eva_vit import create_eva_vit_g
from transformers import BertTokenizer
class Blip2Base(BaseModel):
@classmethod
def init_tokenizer(cls):
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
tokenizer.add_special_tokens({"bos_token": "[DEC]"})
return tokenizer
def maybe_autocast(self, dtype=torch.float16):
# if on cpu, don't use autocast
# if on gpu, use autocast with dtype if provided, otherwise use torch.float16
enable_autocast = self.device != torch.device("cpu")
if enable_autocast:
return torch.cuda.amp.autocast(dtype=dtype)
else:
return contextlib.nullcontext()
@classmethod
def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):
encoder_config = BertConfig.from_pretrained("bert-base-uncased")
encoder_config.encoder_width = vision_width
# insert cross-attention layer every other block
encoder_config.add_cross_attention = True
encoder_config.cross_attention_freq = cross_attention_freq
encoder_config.query_length = num_query_token
Qformer = BertLMHeadModel(config=encoder_config)
query_tokens = nn.Parameter(
torch.zeros(1, num_query_token, encoder_config.hidden_size)
)
query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
return Qformer, query_tokens
@classmethod
def init_vision_encoder(
cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision
):
assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4"
visual_encoder = create_eva_vit_g(
img_size, drop_path_rate, use_grad_checkpoint, precision
)
ln_vision = LayerNorm(visual_encoder.num_features)
return visual_encoder, ln_vision
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
msg = self.load_state_dict(state_dict, strict=False)
# logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
def compute_sim_matrix(model, data_loader, **kwargs):
k_test = kwargs.pop("k_test")
metric_logger = MetricLogger(delimiter=" ")
header = "Evaluation:"
logging.info("Computing features for evaluation...")
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i : min(num_text, i + text_bs)]
text_input = model.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=35,
return_tensors="pt",
).to(model.device)
text_feat = model.forward_text(text_input)
text_embed = F.normalize(model.text_proj(text_feat))
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_ids = torch.cat(text_ids, dim=0)
text_atts = torch.cat(text_atts, dim=0)
vit_feats = []
image_embeds = []
for samples in data_loader:
image = samples["image"]
image = image.to(model.device)
image_feat, vit_feat = model.forward_image(image)
image_embed = model.vision_proj(image_feat)
image_embed = F.normalize(image_embed, dim=-1)
vit_feats.append(vit_feat.cpu())
image_embeds.append(image_embed)
vit_feats = torch.cat(vit_feats, dim=0)
image_embeds = torch.cat(image_embeds, dim=0)
sims_matrix = []
for image_embed in image_embeds:
sim_q2t = image_embed @ text_embeds.t()
sim_i2t, _ = sim_q2t.max(0)
sims_matrix.append(sim_i2t)
sims_matrix = torch.stack(sims_matrix, dim=0)
score_matrix_i2t = torch.full(
(len(data_loader.dataset.image), len(texts)), -100.0
).to(model.device)
num_tasks = dist_utils.get_world_size()
rank = dist_utils.get_rank()
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
image_inputs = vit_feats[start + i].repeat(k_test, 1, 1).to(model.device)
score = model.compute_itm(
image_inputs=image_inputs,
text_ids=text_ids[topk_idx],
text_atts=text_atts[topk_idx],
).float()
score_matrix_i2t[start + i, topk_idx] = score + topk_sim
sims_matrix = sims_matrix.t()
score_matrix_t2i = torch.full(
(len(texts), len(data_loader.dataset.image)), -100.0
).to(model.device)
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
image_inputs = vit_feats[topk_idx.cpu()].to(model.device)
score = model.compute_itm(
image_inputs=image_inputs,
text_ids=text_ids[start + i].repeat(k_test, 1),
text_atts=text_atts[start + i].repeat(k_test, 1),
).float()
score_matrix_t2i[start + i, topk_idx] = score + topk_sim
if dist_utils.is_dist_avail_and_initialized():
dist.barrier()
torch.distributed.all_reduce(
score_matrix_i2t, op=torch.distributed.ReduceOp.SUM
)
torch.distributed.all_reduce(
score_matrix_t2i, op=torch.distributed.ReduceOp.SUM
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Evaluation time {}".format(total_time_str))
return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/models/blip2.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import re
from minigpt4.common.registry import registry
from minigpt4.processors.base_processor import BaseProcessor
from minigpt4.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
@registry.register_processor("blip_caption")
class BlipCaptionProcessor(BaseProcessor):
def __init__(self, prompt="", max_words=50):
self.prompt = prompt
self.max_words = max_words
def __call__(self, caption):
caption = self.prompt + self.pre_caption(caption)
return caption
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
prompt = cfg.get("prompt", "")
max_words = cfg.get("max_words", 50)
return cls(prompt=prompt, max_words=max_words)
def pre_caption(self, caption):
caption = re.sub(
r"([.!\"()*#:;~])",
" ",
caption.lower(),
)
caption = re.sub(
r"\s{2,}",
" ",
caption,
)
caption = caption.rstrip("\n")
caption = caption.strip(" ")
# truncate caption
caption_words = caption.split(" ")
if len(caption_words) > self.max_words:
caption = " ".join(caption_words[: self.max_words])
return caption
@registry.register_processor("blip2_image_train")
class Blip2ImageTrainProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(
image_size,
scale=(min_scale, max_scale),
interpolation=InterpolationMode.BICUBIC,
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 224)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.5)
max_scale = cfg.get("max_scale", 1.0)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
)
@registry.register_processor("blip2_image_eval")
class Blip2ImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=224, mean=None, std=None):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.Resize(
(image_size, image_size), interpolation=InterpolationMode.BICUBIC
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 224)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
return cls(image_size=image_size, mean=mean, std=std) | EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/processors/blip_processors.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from minigpt4.processors.base_processor import BaseProcessor
from minigpt4.processors.blip_processors import (
Blip2ImageTrainProcessor,
Blip2ImageEvalProcessor,
BlipCaptionProcessor,
)
from minigpt4.common.registry import registry
__all__ = [
"BaseProcessor",
"Blip2ImageTrainProcessor",
"Blip2ImageEvalProcessor",
"BlipCaptionProcessor",
]
def load_processor(name, cfg=None):
"""
Example
>>> processor = load_processor("alpro_video_train", cfg=None)
"""
processor = registry.get_processor_class(name).from_config(cfg)
return processor
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/processors/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from omegaconf import OmegaConf
class BaseProcessor:
def __init__(self):
self.transform = lambda x: x
return
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
return cls()
def build(self, **kwargs):
cfg = OmegaConf.create(kwargs)
return self.from_config(cfg)
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/processors/base_processor.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import cv2
import numpy as np
import torch
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
"""
same output as PIL.ImageOps.autocontrast
"""
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
"""
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
"""
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0:
return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
"""
like PIL, rotate by degree, not radians
"""
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
"""
same output as PIL.ImageOps.posterize
"""
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
"""
same output as PIL.ImageEnhance.Color
"""
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = np.float32(
[[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]
) * factor + np.float32([[0.114], [0.587], [0.299]])
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = (
np.array([(el - mean) * factor + mean for el in range(256)])
.clip(0, 255)
.astype(np.uint8)
)
out = table[img]
return out
def brightness_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
"""
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
"""
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def posterize_func(img, bits):
"""
same output as PIL.ImageOps.posterize
"""
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level,)
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level,)
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
"Identity": identity_func,
"AutoContrast": autocontrast_func,
"Equalize": equalize_func,
"Rotate": rotate_func,
"Solarize": solarize_func,
"Color": color_func,
"Contrast": contrast_func,
"Brightness": brightness_func,
"Sharpness": sharpness_func,
"ShearX": shear_x_func,
"TranslateX": translate_x_func,
"TranslateY": translate_y_func,
"Posterize": posterize_func,
"ShearY": shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
"Identity": none_level_to_args,
"AutoContrast": none_level_to_args,
"Equalize": none_level_to_args,
"Rotate": rotate_level_to_args(MAX_LEVEL, replace_value),
"Solarize": solarize_level_to_args(MAX_LEVEL),
"Color": enhance_level_to_args(MAX_LEVEL),
"Contrast": enhance_level_to_args(MAX_LEVEL),
"Brightness": enhance_level_to_args(MAX_LEVEL),
"Sharpness": enhance_level_to_args(MAX_LEVEL),
"ShearX": shear_level_to_args(MAX_LEVEL, replace_value),
"TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"Posterize": posterize_level_to_args(MAX_LEVEL),
"ShearY": shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
class VideoRandomAugment(object):
def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):
self.N = N
self.M = M
self.p = p
self.tensor_in_tensor_out = tensor_in_tensor_out
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N, replace=False)
return [(op, self.M) for op in sampled_ops]
def __call__(self, frames):
assert (
frames.shape[-1] == 3
), "Expecting last dimension for 3-channels RGB (b, h, w, c)."
if self.tensor_in_tensor_out:
frames = frames.numpy().astype(np.uint8)
num_frames = frames.shape[0]
ops = num_frames * [self.get_random_ops()]
apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]
frames = torch.stack(
list(map(self._aug, frames, ops, apply_or_not)), dim=0
).float()
return frames
def _aug(self, img, ops, apply_or_not):
for i, (name, level) in enumerate(ops):
if not apply_or_not[i]:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return torch.from_numpy(img)
if __name__ == "__main__":
a = RandomAugment()
img = np.random.randn(32, 32, 3)
a(img)
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/processors/randaugment.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import math
from minigpt4.common.registry import registry
@registry.register_lr_scheduler("linear_warmup_step_lr")
class LinearWarmupStepLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
decay_rate=1,
warmup_start_lr=-1,
warmup_steps=0,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.decay_rate = decay_rate
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
step_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
init_lr=self.init_lr,
min_lr=self.min_lr,
decay_rate=self.decay_rate,
)
@registry.register_lr_scheduler("linear_warmup_cosine_lr")
class LinearWarmupCosineLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
iters_per_epoch,
min_lr,
init_lr,
warmup_steps=0,
warmup_start_lr=-1,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.iters_per_epoch = iters_per_epoch
self.min_lr = min_lr
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
total_cur_step = cur_epoch * self.iters_per_epoch + cur_step
if total_cur_step < self.warmup_steps:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
cosine_lr_schedule(
epoch=total_cur_step,
optimizer=self.optimizer,
max_epoch=self.max_epoch * self.iters_per_epoch,
init_lr=self.init_lr,
min_lr=self.min_lr,
)
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
"""Decay the learning rate"""
lr = (init_lr - min_lr) * 0.5 * (
1.0 + math.cos(math.pi * epoch / max_epoch)
) + min_lr
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
"""Warmup the learning rate"""
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
"""Decay the learning rate"""
lr = max(min_lr, init_lr * (decay_rate**epoch))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/optims.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import json
from typing import Dict
from omegaconf import OmegaConf
from minigpt4.common.registry import registry
class Config:
def __init__(self, args):
self.config = {}
self.args = args
# Register the config and configuration for setup
registry.register("configuration", self)
user_config = self._build_opt_list(self.args.options)
config = OmegaConf.load(self.args.cfg_path)
runner_config = self.build_runner_config(config)
model_config = self.build_model_config(config, **user_config)
dataset_config = self.build_dataset_config(config)
# Validate the user-provided runner configuration
# model and dataset configuration are supposed to be validated by the respective classes
# [TODO] validate the model/dataset configuration
# self._validate_runner_config(runner_config)
# Override the default configuration with user options.
self.config = OmegaConf.merge(
runner_config, model_config, dataset_config, user_config
)
def _validate_runner_config(self, runner_config):
"""
This method validates the configuration, such that
1) all the user specified options are valid;
2) no type mismatches between the user specified options and the config.
"""
runner_config_validator = create_runner_config_validator()
runner_config_validator.validate(runner_config)
def _build_opt_list(self, opts):
opts_dot_list = self._convert_to_dot_list(opts)
return OmegaConf.from_dotlist(opts_dot_list)
@staticmethod
def build_model_config(config, **kwargs):
model = config.get("model", None)
assert model is not None, "Missing model configuration file."
model_cls = registry.get_model_class(model.arch)
assert model_cls is not None, f"Model '{model.arch}' has not been registered."
model_type = kwargs.get("model.model_type", None)
if not model_type:
model_type = model.get("model_type", None)
# else use the model type selected by user.
assert model_type is not None, "Missing model_type."
model_config_path = model_cls.default_config_path(model_type=model_type)
model_config = OmegaConf.create()
# hierarchy override, customized config > default config
model_config = OmegaConf.merge(
model_config,
OmegaConf.load(model_config_path),
{"model": config["model"]},
)
return model_config
@staticmethod
def build_runner_config(config):
return {"run": config.run}
@staticmethod
def build_dataset_config(config):
datasets = config.get("datasets", None)
if datasets is None:
raise KeyError(
"Expecting 'datasets' as the root key for dataset configuration."
)
dataset_config = OmegaConf.create()
for dataset_name in datasets:
builder_cls = registry.get_builder_class(dataset_name)
dataset_config_type = datasets[dataset_name].get("type", "default")
dataset_config_path = builder_cls.default_config_path(
type=dataset_config_type
)
# hierarchy override, customized config > default config
dataset_config = OmegaConf.merge(
dataset_config,
OmegaConf.load(dataset_config_path),
{"datasets": {dataset_name: config["datasets"][dataset_name]}},
)
return dataset_config
def _convert_to_dot_list(self, opts):
if opts is None:
opts = []
if len(opts) == 0:
return opts
has_equal = opts[0].find("=") != -1
if has_equal:
return opts
return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])]
def get_config(self):
return self.config
@property
def run_cfg(self):
return self.config.run
@property
def datasets_cfg(self):
return self.config.datasets
@property
def model_cfg(self):
return self.config.model
def pretty_print(self):
logging.info("\n===== Running Parameters =====")
logging.info(self._convert_node_to_json(self.config.run))
logging.info("\n====== Dataset Attributes ======")
datasets = self.config.datasets
for dataset in datasets:
if dataset in self.config.datasets:
logging.info(f"\n======== {dataset} =======")
dataset_config = self.config.datasets[dataset]
logging.info(self._convert_node_to_json(dataset_config))
else:
logging.warning(f"No dataset named '{dataset}' in config. Skipping")
logging.info(f"\n====== Model Attributes ======")
logging.info(self._convert_node_to_json(self.config.model))
def _convert_node_to_json(self, node):
container = OmegaConf.to_container(node, resolve=True)
return json.dumps(container, indent=4, sort_keys=True)
def to_dict(self):
return OmegaConf.to_container(self.config)
def node_to_dict(node):
return OmegaConf.to_container(node)
class ConfigValidator:
"""
This is a preliminary implementation to centralize and validate the configuration.
May be altered in the future.
A helper class to validate configurations from yaml file.
This serves the following purposes:
1. Ensure all the options in the yaml are defined, raise error if not.
2. when type mismatches are found, the validator will raise an error.
3. a central place to store and display helpful messages for supported configurations.
"""
class _Argument:
def __init__(self, name, choices=None, type=None, help=None):
self.name = name
self.val = None
self.choices = choices
self.type = type
self.help = help
def __str__(self):
s = f"{self.name}={self.val}"
if self.type is not None:
s += f", ({self.type})"
if self.choices is not None:
s += f", choices: {self.choices}"
if self.help is not None:
s += f", ({self.help})"
return s
def __init__(self, description):
self.description = description
self.arguments = dict()
self.parsed_args = None
def __getitem__(self, key):
assert self.parsed_args is not None, "No arguments parsed yet."
return self.parsed_args[key]
def __str__(self) -> str:
return self.format_help()
def add_argument(self, *args, **kwargs):
"""
Assume the first argument is the name of the argument.
"""
self.arguments[args[0]] = self._Argument(*args, **kwargs)
def validate(self, config=None):
"""
Convert yaml config (dict-like) to list, required by argparse.
"""
for k, v in config.items():
assert (
k in self.arguments
), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}."""
if self.arguments[k].type is not None:
try:
self.arguments[k].val = self.arguments[k].type(v)
except ValueError:
raise ValueError(f"{k} is not a valid {self.arguments[k].type}.")
if self.arguments[k].choices is not None:
assert (
v in self.arguments[k].choices
), f"""{k} must be one of {self.arguments[k].choices}."""
return config
def format_arguments(self):
return str([f"{k}" for k in sorted(self.arguments.keys())])
def format_help(self):
# description + key-value pair string for each argument
help_msg = str(self.description)
return help_msg + ", available arguments: " + self.format_arguments()
def print_help(self):
# display help message
print(self.format_help())
def create_runner_config_validator():
validator = ConfigValidator(description="Runner configurations")
validator.add_argument(
"runner",
type=str,
choices=["runner_base", "runner_iter"],
help="""Runner to use. The "runner_base" uses epoch-based training while iter-based
runner runs based on iters. Default: runner_base""",
)
# add argumetns for training dataset ratios
validator.add_argument(
"train_dataset_ratios",
type=Dict[str, float],
help="""Ratios of training dataset. This is used in iteration-based runner.
Do not support for epoch-based runner because how to define an epoch becomes tricky.
Default: None""",
)
validator.add_argument(
"max_iters",
type=float,
help="Maximum number of iterations to run.",
)
validator.add_argument(
"max_epoch",
type=int,
help="Maximum number of epochs to run.",
)
# add arguments for iters_per_inner_epoch
validator.add_argument(
"iters_per_inner_epoch",
type=float,
help="Number of iterations per inner epoch. This is required when runner is runner_iter.",
)
lr_scheds_choices = registry.list_lr_schedulers()
validator.add_argument(
"lr_sched",
type=str,
choices=lr_scheds_choices,
help="Learning rate scheduler to use, from {}".format(lr_scheds_choices),
)
task_choices = registry.list_tasks()
validator.add_argument(
"task",
type=str,
choices=task_choices,
help="Task to use, from {}".format(task_choices),
)
# add arguments for init_lr
validator.add_argument(
"init_lr",
type=float,
help="Initial learning rate. This will be the learning rate after warmup and before decay.",
)
# add arguments for min_lr
validator.add_argument(
"min_lr",
type=float,
help="Minimum learning rate (after decay).",
)
# add arguments for warmup_lr
validator.add_argument(
"warmup_lr",
type=float,
help="Starting learning rate for warmup.",
)
# add arguments for learning rate decay rate
validator.add_argument(
"lr_decay_rate",
type=float,
help="Learning rate decay rate. Required if using a decaying learning rate scheduler.",
)
# add arguments for weight decay
validator.add_argument(
"weight_decay",
type=float,
help="Weight decay rate.",
)
# add arguments for training batch size
validator.add_argument(
"batch_size_train",
type=int,
help="Training batch size.",
)
# add arguments for evaluation batch size
validator.add_argument(
"batch_size_eval",
type=int,
help="Evaluation batch size, including validation and testing.",
)
# add arguments for number of workers for data loading
validator.add_argument(
"num_workers",
help="Number of workers for data loading.",
)
# add arguments for warm up steps
validator.add_argument(
"warmup_steps",
type=int,
help="Number of warmup steps. Required if a warmup schedule is used.",
)
# add arguments for random seed
validator.add_argument(
"seed",
type=int,
help="Random seed.",
)
# add arguments for output directory
validator.add_argument(
"output_dir",
type=str,
help="Output directory to save checkpoints and logs.",
)
# add arguments for whether only use evaluation
validator.add_argument(
"evaluate",
help="Whether to only evaluate the model. If true, training will not be performed.",
)
# add arguments for splits used for training, e.g. ["train", "val"]
validator.add_argument(
"train_splits",
type=list,
help="Splits to use for training.",
)
# add arguments for splits used for validation, e.g. ["val"]
validator.add_argument(
"valid_splits",
type=list,
help="Splits to use for validation. If not provided, will skip the validation.",
)
# add arguments for splits used for testing, e.g. ["test"]
validator.add_argument(
"test_splits",
type=list,
help="Splits to use for testing. If not provided, will skip the testing.",
)
# add arguments for accumulating gradient for iterations
validator.add_argument(
"accum_grad_iters",
type=int,
help="Number of iterations to accumulate gradient for.",
)
# ====== distributed training ======
validator.add_argument(
"device",
type=str,
choices=["cpu", "cuda"],
help="Device to use. Support 'cuda' or 'cpu' as for now.",
)
validator.add_argument(
"world_size",
type=int,
help="Number of processes participating in the job.",
)
validator.add_argument("dist_url", type=str)
validator.add_argument("distributed", type=bool)
# add arguments to opt using distributed sampler during evaluation or not
validator.add_argument(
"use_dist_eval_sampler",
type=bool,
help="Whether to use distributed sampler during evaluation or not.",
)
# ====== task specific ======
# generation task specific arguments
# add arguments for maximal length of text output
validator.add_argument(
"max_len",
type=int,
help="Maximal length of text output.",
)
# add arguments for minimal length of text output
validator.add_argument(
"min_len",
type=int,
help="Minimal length of text output.",
)
# add arguments number of beams
validator.add_argument(
"num_beams",
type=int,
help="Number of beams used for beam search.",
)
# vqa task specific arguments
# add arguments for number of answer candidates
validator.add_argument(
"num_ans_candidates",
type=int,
help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""",
)
# add arguments for inference method
validator.add_argument(
"inference_method",
type=str,
choices=["genearte", "rank"],
help="""Inference method to use for question answering. If rank, requires a answer list.""",
)
# ====== model specific ======
validator.add_argument(
"k_test",
type=int,
help="Number of top k most similar samples from ITC/VTC selection to be tested.",
)
return validator
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/config.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class Registry:
mapping = {
"builder_name_mapping": {},
"task_name_mapping": {},
"processor_name_mapping": {},
"model_name_mapping": {},
"lr_scheduler_name_mapping": {},
"runner_name_mapping": {},
"state": {},
"paths": {},
}
@classmethod
def register_builder(cls, name):
r"""Register a dataset builder to registry with key 'name'
Args:
name: Key with which the builder will be registered.
Usage:
from minigpt4.common.registry import registry
from minigpt4.datasets.base_dataset_builder import BaseDatasetBuilder
"""
def wrap(builder_cls):
from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder
assert issubclass(
builder_cls, BaseDatasetBuilder
), "All builders must inherit BaseDatasetBuilder class, found {}".format(
builder_cls
)
if name in cls.mapping["builder_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["builder_name_mapping"][name]
)
)
cls.mapping["builder_name_mapping"][name] = builder_cls
return builder_cls
return wrap
@classmethod
def register_task(cls, name):
r"""Register a task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from minigpt4.common.registry import registry
"""
def wrap(task_cls):
from minigpt4.tasks.base_task import BaseTask
assert issubclass(
task_cls, BaseTask
), "All tasks must inherit BaseTask class"
if name in cls.mapping["task_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["task_name_mapping"][name]
)
)
cls.mapping["task_name_mapping"][name] = task_cls
return task_cls
return wrap
@classmethod
def register_model(cls, name):
r"""Register a task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from minigpt4.common.registry import registry
"""
def wrap(model_cls):
from minigpt4.models import BaseModel
assert issubclass(
model_cls, BaseModel
), "All models must inherit BaseModel class"
if name in cls.mapping["model_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["model_name_mapping"][name]
)
)
cls.mapping["model_name_mapping"][name] = model_cls
return model_cls
return wrap
@classmethod
def register_processor(cls, name):
r"""Register a processor to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from minigpt4.common.registry import registry
"""
def wrap(processor_cls):
from minigpt4.processors import BaseProcessor
assert issubclass(
processor_cls, BaseProcessor
), "All processors must inherit BaseProcessor class"
if name in cls.mapping["processor_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["processor_name_mapping"][name]
)
)
cls.mapping["processor_name_mapping"][name] = processor_cls
return processor_cls
return wrap
@classmethod
def register_lr_scheduler(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from minigpt4.common.registry import registry
"""
def wrap(lr_sched_cls):
if name in cls.mapping["lr_scheduler_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["lr_scheduler_name_mapping"][name]
)
)
cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls
return lr_sched_cls
return wrap
@classmethod
def register_runner(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from minigpt4.common.registry import registry
"""
def wrap(runner_cls):
if name in cls.mapping["runner_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["runner_name_mapping"][name]
)
)
cls.mapping["runner_name_mapping"][name] = runner_cls
return runner_cls
return wrap
@classmethod
def register_path(cls, name, path):
r"""Register a path to registry with key 'name'
Args:
name: Key with which the path will be registered.
Usage:
from minigpt4.common.registry import registry
"""
assert isinstance(path, str), "All path must be str."
if name in cls.mapping["paths"]:
raise KeyError("Name '{}' already registered.".format(name))
cls.mapping["paths"][name] = path
@classmethod
def register(cls, name, obj):
r"""Register an item to registry with key 'name'
Args:
name: Key with which the item will be registered.
Usage::
from minigpt4.common.registry import registry
registry.register("config", {})
"""
path = name.split(".")
current = cls.mapping["state"]
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = obj
# @classmethod
# def get_trainer_class(cls, name):
# return cls.mapping["trainer_name_mapping"].get(name, None)
@classmethod
def get_builder_class(cls, name):
return cls.mapping["builder_name_mapping"].get(name, None)
@classmethod
def get_model_class(cls, name):
return cls.mapping["model_name_mapping"].get(name, None)
@classmethod
def get_task_class(cls, name):
return cls.mapping["task_name_mapping"].get(name, None)
@classmethod
def get_processor_class(cls, name):
return cls.mapping["processor_name_mapping"].get(name, None)
@classmethod
def get_lr_scheduler_class(cls, name):
return cls.mapping["lr_scheduler_name_mapping"].get(name, None)
@classmethod
def get_runner_class(cls, name):
return cls.mapping["runner_name_mapping"].get(name, None)
@classmethod
def list_runners(cls):
return sorted(cls.mapping["runner_name_mapping"].keys())
@classmethod
def list_models(cls):
return sorted(cls.mapping["model_name_mapping"].keys())
@classmethod
def list_tasks(cls):
return sorted(cls.mapping["task_name_mapping"].keys())
@classmethod
def list_processors(cls):
return sorted(cls.mapping["processor_name_mapping"].keys())
@classmethod
def list_lr_schedulers(cls):
return sorted(cls.mapping["lr_scheduler_name_mapping"].keys())
@classmethod
def list_datasets(cls):
return sorted(cls.mapping["builder_name_mapping"].keys())
@classmethod
def get_path(cls, name):
return cls.mapping["paths"].get(name, None)
@classmethod
def get(cls, name, default=None, no_warning=False):
r"""Get an item from registry with key 'name'
Args:
name (string): Key whose value needs to be retrieved.
default: If passed and key is not in registry, default value will
be returned with a warning. Default: None
no_warning (bool): If passed as True, warning when key doesn't exist
will not be generated. Useful for MMF's
internal operations. Default: False
"""
original_name = name
name = name.split(".")
value = cls.mapping["state"]
for subname in name:
value = value.get(subname, default)
if value is default:
break
if (
"writer" in cls.mapping["state"]
and value == default
and no_warning is False
):
cls.mapping["state"]["writer"].warning(
"Key {} is not present in registry, returning default value "
"of {}".format(original_name, default)
)
return value
@classmethod
def unregister(cls, name):
r"""Remove an item from registry with key 'name'
Args:
name: Key which needs to be removed.
Usage::
from mmf.common.registry import registry
config = registry.unregister("config")
"""
return cls.mapping["state"].pop(name, None)
registry = Registry()
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/registry.py |
EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/__init__.py |
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import logging
import time
from collections import defaultdict, deque
import torch
import torch.distributed as dist
from minigpt4.common import dist_utils
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not dist_utils.is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_msg.append("max mem: {memory:.0f}")
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def setup_logger():
logging.basicConfig(
level=logging.INFO if dist_utils.is_main_process() else logging.WARN,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()],
)
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/logger.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import io
import json
import logging
import os
import pickle
import re
import shutil
import urllib
import urllib.error
import urllib.request
from typing import Optional
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import yaml
from iopath.common.download import download
from iopath.common.file_io import file_lock, g_pathmgr
from minigpt4.common.registry import registry
from torch.utils.model_zoo import tqdm
from torchvision.datasets.utils import (
check_integrity,
download_file_from_google_drive,
extract_archive,
)
def now():
from datetime import datetime
return datetime.now().strftime("%Y%m%d%H%M")[:-1]
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def get_cache_path(rel_path):
return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path))
def get_abs_path(rel_path):
return os.path.join(registry.get_path("library_root"), rel_path)
def load_json(filename):
with open(filename, "r") as f:
return json.load(f)
# The following are adapted from torchvision and vissl
# torchvision: https://github.com/pytorch/vision
# vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
print(f"Error creating directory: {dir_path}")
return is_success
def get_redirected_url(url: str):
"""
Given a URL, returns the URL it redirects to or the
original URL in case of no indirection
"""
import requests
with requests.Session() as session:
with session.get(url, stream=True, allow_redirects=True) as response:
if response.history:
return response.url
else:
return url
def to_google_drive_download_url(view_url: str) -> str:
"""
Utility function to transform a view URL of google drive
to a download URL for google drive
Example input:
https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view
Example output:
https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp
"""
splits = view_url.split("/")
assert splits[-1] == "view"
file_id = splits[-2]
return f"https://drive.google.com/uc?export=download&id={file_id}"
def download_google_drive_url(url: str, output_path: str, output_file_name: str):
"""
Download a file from google drive
Downloading an URL from google drive requires confirmation when
the file of the size is too big (google drive notifies that
anti-viral checks cannot be performed on such files)
"""
import requests
with requests.Session() as session:
# First get the confirmation token and append it to the URL
with session.get(url, stream=True, allow_redirects=True) as response:
for k, v in response.cookies.items():
if k.startswith("download_warning"):
url = url + "&confirm=" + v
# Then download the content of the file
with session.get(url, stream=True, verify=True) as response:
makedir(output_path)
path = os.path.join(output_path, output_file_name)
total_size = int(response.headers.get("Content-length", 0))
with open(path, "wb") as file:
from tqdm import tqdm
with tqdm(total=total_size) as progress_bar:
for block in response.iter_content(
chunk_size=io.DEFAULT_BUFFER_SIZE
):
file.write(block)
progress_bar.update(len(block))
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
return None
match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
if match is None:
return None
return match.group("id")
def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
with open(filename, "wb") as fh:
with urllib.request.urlopen(
urllib.request.Request(url, headers={"User-Agent": "vissl"})
) as response:
with tqdm(total=response.length) as pbar:
for chunk in iter(lambda: response.read(chunk_size), ""):
if not chunk:
break
pbar.update(chunk_size)
fh.write(chunk)
def download_url(
url: str,
root: str,
filename: Optional[str] = None,
md5: Optional[str] = None,
) -> None:
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under.
If None, use the basename of the URL.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir(root)
# check if file is already present locally
if check_integrity(fpath, md5):
print("Using downloaded and verified file: " + fpath)
return
# expand redirect chain if needed
url = get_redirected_url(url)
# check if file is located on Google Drive
file_id = _get_google_drive_file_id(url)
if file_id is not None:
return download_file_from_google_drive(file_id, root, filename, md5)
# download the file
try:
print("Downloading " + url + " to " + fpath)
_urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined]
if url[:5] == "https":
url = url.replace("https:", "http:")
print(
"Failed download. Trying https -> http instead."
" Downloading " + url + " to " + fpath
)
_urlretrieve(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def download_and_extract_archive(
url: str,
download_root: str,
extract_root: Optional[str] = None,
filename: Optional[str] = None,
md5: Optional[str] = None,
remove_finished: bool = False,
) -> None:
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def cache_url(url: str, cache_dir: str) -> str:
"""
This implementation downloads the remote resource and caches it locally.
The resource will only be downloaded if not previously requested.
"""
parsed_url = urlparse(url)
dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/")))
makedir(dirname)
filename = url.split("/")[-1]
cached = os.path.join(dirname, filename)
with file_lock(cached):
if not os.path.isfile(cached):
logging.info(f"Downloading {url} to {cached} ...")
cached = download(url, dirname, filename=filename)
logging.info(f"URL {url} cached in {cached}")
return cached
# TODO (prigoyal): convert this into RAII-style API
def create_file_symlink(file1, file2):
"""
Simply create the symlinks for a given file1 to file2.
Useful during model checkpointing to symlinks to the
latest successful checkpoint.
"""
try:
if g_pathmgr.exists(file2):
g_pathmgr.rm(file2)
g_pathmgr.symlink(file1, file2)
except Exception as e:
logging.info(f"Could NOT create symlink. Error: {e}")
def save_file(data, filename, append_to_json=True, verbose=True):
"""
Common i/o utility to handle saving data to various file formats.
Supported:
.pkl, .pickle, .npy, .json
Specifically for .json, users have the option to either append (default)
or rewrite by passing in Boolean value to append_to_json.
"""
if verbose:
logging.info(f"Saving data to file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "wb") as fopen:
pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)
elif file_ext == ".npy":
with g_pathmgr.open(filename, "wb") as fopen:
np.save(fopen, data)
elif file_ext == ".json":
if append_to_json:
with g_pathmgr.open(filename, "a") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
else:
with g_pathmgr.open(filename, "w") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "w") as fopen:
dump = yaml.dump(data)
fopen.write(dump)
fopen.flush()
else:
raise Exception(f"Saving {file_ext} is not supported yet")
if verbose:
logging.info(f"Saved data to file: {filename}")
def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False):
"""
Common i/o utility to handle loading data from various file formats.
Supported:
.pkl, .pickle, .npy, .json
For the npy files, we support reading the files in mmap_mode.
If the mmap_mode of reading is not successful, we load data without the
mmap_mode.
"""
if verbose:
logging.info(f"Loading data from file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext == ".txt":
with g_pathmgr.open(filename, "r") as fopen:
data = fopen.readlines()
elif file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "rb") as fopen:
data = pickle.load(fopen, encoding="latin1")
elif file_ext == ".npy":
if mmap_mode:
try:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(
fopen,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
except ValueError as e:
logging.info(
f"Could not mmap {filename}: {e}. Trying without g_pathmgr"
)
data = np.load(
filename,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
logging.info("Successfully loaded without g_pathmgr")
except Exception:
logging.info("Could not mmap without g_pathmgr. Trying without mmap")
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
else:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
elif file_ext == ".json":
with g_pathmgr.open(filename, "r") as fopen:
data = json.load(fopen)
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "r") as fopen:
data = yaml.load(fopen, Loader=yaml.FullLoader)
elif file_ext == ".csv":
with g_pathmgr.open(filename, "r") as fopen:
data = pd.read_csv(fopen)
else:
raise Exception(f"Reading from {file_ext} is not supported yet")
return data
def abspath(resource_path: str):
"""
Make a path absolute, but take into account prefixes like
"http://" or "manifold://"
"""
regex = re.compile(r"^\w+://")
if regex.match(resource_path) is None:
return os.path.abspath(resource_path)
else:
return resource_path
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
logging.info(f"Error creating directory: {dir_path}")
return is_success
def is_url(input_url):
"""
Check if an input string is a url. look for http(s):// and ignoring the case
"""
is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None
return is_url
def cleanup_dir(dir):
"""
Utility for deleting a directory. Useful for cleaning the storage space
that contains various training artifacts like checkpoints, data etc.
"""
if os.path.exists(dir):
logging.info(f"Deleting directory: {dir}")
shutil.rmtree(dir)
logging.info(f"Deleted contents of directory: {dir}")
def get_file_size(filename):
"""
Given a file, get the size of file in MB
"""
size_in_mb = os.path.getsize(filename) / float(1024**2)
return size_in_mb
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/utils.py |
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import filters
from skimage import transform as skimage_transform
def getAttMap(img, attMap, blur=True, overlap=True):
attMap -= attMap.min()
if attMap.max() > 0:
attMap /= attMap.max()
attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant")
if blur:
attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2]))
attMap -= attMap.min()
attMap /= attMap.max()
cmap = plt.get_cmap("jet")
attMapV = cmap(attMap)
attMapV = np.delete(attMapV, 3, 2)
if overlap:
attMap = (
1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img
+ (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV
)
return attMap
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/gradcam.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import functools
import os
import torch
import torch.distributed as dist
import timm.models.hub as timm_hub
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}, world {}): {}".format(
args.rank, args.world_size, args.dist_url
),
flush=True,
)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
timeout=datetime.timedelta(
days=365
), # allow auto-downloading and de-compressing
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def get_dist_info():
if torch.__version__ < "1.0":
initialized = dist._initialized
else:
initialized = dist.is_initialized()
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else: # non-distributed training
rank = 0
world_size = 1
return rank, world_size
def main_process(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
def download_cached_file(url, check_hash=True, progress=False):
"""
Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
"""
def get_cached_file_path():
# a hack to sync the file path across processes
parts = torch.hub.urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
return cached_file
if is_main_process():
timm_hub.download_cached_file(url, check_hash, progress)
if is_dist_avail_and_initialized():
dist.barrier()
return get_cached_file_path()
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/common/dist_utils.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from minigpt4.runners.runner_base import RunnerBase
__all__ = ["RunnerBase"]
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/runners/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import json
import logging
import os
import time
from pathlib import Path
import torch
import torch.distributed as dist
import webdataset as wds
from minigpt4.common.dist_utils import (
download_cached_file,
get_rank,
get_world_size,
is_main_process,
main_process,
)
from minigpt4.common.registry import registry
from minigpt4.common.utils import is_url
from minigpt4.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset
from minigpt4.datasets.datasets.dataloader_utils import (
IterLoader,
MultiIterLoader,
PrefetchLoader,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler
@registry.register_runner("runner_base")
class RunnerBase:
"""
A runner class to train and evaluate a model given a task and datasets.
The runner uses pytorch distributed data parallel by default. Future release
will support other distributed frameworks.
"""
def __init__(self, cfg, task, model, datasets, job_id):
self.config = cfg
self.job_id = job_id
self.task = task
self.datasets = datasets
self._model = model
self._wrapped_model = None
self._device = None
self._optimizer = None
self._scaler = None
self._dataloaders = None
self._lr_sched = None
self.start_epoch = 0
# self.setup_seeds()
self.setup_output_dir()
@property
def device(self):
if self._device is None:
self._device = torch.device(self.config.run_cfg.device)
return self._device
@property
def use_distributed(self):
return self.config.run_cfg.distributed
@property
def model(self):
"""
A property to get the DDP-wrapped model on the device.
"""
# move model to device
if self._model.device != self.device:
self._model = self._model.to(self.device)
# distributed training wrapper
if self.use_distributed:
if self._wrapped_model is None:
self._wrapped_model = DDP(
self._model, device_ids=[self.config.run_cfg.gpu]
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
# TODO make optimizer class and configurations
if self._optimizer is None:
num_parameters = 0
p_wd, p_non_wd = [], []
for n, p in self.model.named_parameters():
if not p.requires_grad:
continue # frozen weights
print(n)
if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n:
p_non_wd.append(p)
else:
p_wd.append(p)
num_parameters += p.data.nelement()
logging.info("number of trainable parameters: %d" % num_parameters)
optim_params = [
{
"params": p_wd,
"weight_decay": float(self.config.run_cfg.weight_decay),
},
{"params": p_non_wd, "weight_decay": 0},
]
beta2 = self.config.run_cfg.get("beta2", 0.999)
self._optimizer = torch.optim.AdamW(
optim_params,
lr=float(self.config.run_cfg.init_lr),
weight_decay=float(self.config.run_cfg.weight_decay),
betas=(0.9, beta2),
)
return self._optimizer
@property
def scaler(self):
amp = self.config.run_cfg.get("amp", False)
if amp:
if self._scaler is None:
self._scaler = torch.cuda.amp.GradScaler()
return self._scaler
@property
def lr_scheduler(self):
"""
A property to get and create learning rate scheduler by split just in need.
"""
if self._lr_sched is None:
lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)
# max_epoch = self.config.run_cfg.max_epoch
max_epoch = self.max_epoch
# min_lr = self.config.run_cfg.min_lr
min_lr = self.min_lr
# init_lr = self.config.run_cfg.init_lr
init_lr = self.init_lr
# optional parameters
decay_rate = self.config.run_cfg.get("lr_decay_rate", None)
warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1)
warmup_steps = self.config.run_cfg.get("warmup_steps", 0)
iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None)
if iters_per_epoch is None:
try:
iters_per_epoch = len(self.dataloaders['train'])
except (AttributeError, TypeError):
iters_per_epoch = 10000
self._lr_sched = lr_sched_cls(
optimizer=self.optimizer,
max_epoch=max_epoch,
iters_per_epoch=iters_per_epoch,
min_lr=min_lr,
init_lr=init_lr,
decay_rate=decay_rate,
warmup_start_lr=warmup_start_lr,
warmup_steps=warmup_steps,
)
return self._lr_sched
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = datasets
# self.datasets = concat_datasets(datasets)
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
if hasattr(self.datasets[split_name], "__len__"):
# a single map-style dataset
num_records = len(self.datasets[split_name])
else:
# a single wds.DataPipeline
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
@property
def cuda_enabled(self):
return self.device.type == "cuda"
@property
def max_epoch(self):
return int(self.config.run_cfg.max_epoch)
@property
def log_freq(self):
log_freq = self.config.run_cfg.get("log_freq", 50)
return int(log_freq)
@property
def init_lr(self):
return float(self.config.run_cfg.init_lr)
@property
def min_lr(self):
return float(self.config.run_cfg.min_lr)
@property
def accum_grad_iters(self):
return int(self.config.run_cfg.get("accum_grad_iters", 1))
@property
def valid_splits(self):
valid_splits = self.config.run_cfg.get("valid_splits", [])
if len(valid_splits) == 0:
logging.info("No validation splits found.")
return valid_splits
@property
def test_splits(self):
test_splits = self.config.run_cfg.get("test_splits", [])
return test_splits
@property
def train_splits(self):
train_splits = self.config.run_cfg.get("train_splits", [])
if len(train_splits) == 0:
logging.info("Empty train splits.")
return train_splits
@property
def evaluate_only(self):
"""
Set to True to skip training.
"""
return self.config.run_cfg.evaluate
@property
def use_dist_eval_sampler(self):
return self.config.run_cfg.get("use_dist_eval_sampler", True)
@property
def resume_ckpt_path(self):
return self.config.run_cfg.get("resume_ckpt_path", None)
@property
def train_loader(self):
train_dataloader = self.dataloaders["train"]
return train_dataloader
def setup_output_dir(self):
lib_root = Path(registry.get_path("library_root"))
output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id
result_dir = output_dir / "result"
output_dir.mkdir(parents=True, exist_ok=True)
result_dir.mkdir(parents=True, exist_ok=True)
registry.register_path("result_dir", str(result_dir))
registry.register_path("output_dir", str(output_dir))
self.result_dir = result_dir
self.output_dir = output_dir
def train(self):
start_time = time.time()
best_agg_metric = 0
best_epoch = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for cur_epoch in range(self.start_epoch, self.max_epoch):
# training phase
if not self.evaluate_only:
logging.info("Start training")
train_stats = self.train_epoch(cur_epoch)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_epoch, best_agg_metric = cur_epoch, agg_metrics
self._save_checkpoint(cur_epoch, is_best=True)
val_log.update({"best_epoch": best_epoch})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each epoch.
if not self.evaluate_only:
self._save_checkpoint(cur_epoch, is_best=False)
if self.evaluate_only:
break
if self.config.run_cfg.distributed:
dist.barrier()
# testing phase
test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch
self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def evaluate(self, cur_epoch="best", skip_reload=False):
test_logs = dict()
if len(self.test_splits) > 0:
for split_name in self.test_splits:
test_logs[split_name] = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload
)
return test_logs
def train_epoch(self, epoch):
# train
self.model.train()
return self.task.train_epoch(
epoch=epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@torch.no_grad()
def eval_epoch(self, split_name, cur_epoch, skip_reload=False):
"""
Evaluate the model on a given split.
Args:
split_name (str): name of the split to evaluate on.
cur_epoch (int): current epoch.
skip_reload_best (bool): whether to skip reloading the best checkpoint.
During training, we will reload the best checkpoint for validation.
During testing, we will use provided weights and skip reloading the best checkpoint .
"""
data_loader = self.dataloaders.get(split_name, None)
assert data_loader, "data_loader for split {} is None.".format(split_name)
# TODO In validation, you need to compute loss as well as metrics
# TODO consider moving to model.before_evaluation()
model = self.unwrap_dist_model(self.model)
if not skip_reload and cur_epoch == "best":
model = self._reload_best_model(model)
model.eval()
self.task.before_evaluation(
model=model,
dataset=self.datasets[split_name],
)
results = self.task.evaluation(model, data_loader)
if results is not None:
return self.task.after_evaluation(
val_result=results,
split_name=split_name,
epoch=cur_epoch,
)
def unwrap_dist_model(self, model):
if self.use_distributed:
return model.module
else:
return model
def create_loaders(
self,
datasets,
num_workers,
batch_sizes,
is_trains,
collate_fns,
dataset_ratios=None,
):
"""
Create dataloaders for training and validation.
"""
def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):
# create a single dataloader for each split
if isinstance(dataset, ChainDataset) or isinstance(
dataset, wds.DataPipeline
):
# wds.WebdDataset instance are chained together
# webdataset.DataPipeline has its own sampler and collate_fn
loader = iter(
DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
)
)
else:
# map-style dataset are concatenated together
# setup distributed sampler
if self.use_distributed:
sampler = DistributedSampler(
dataset,
shuffle=is_train,
num_replicas=get_world_size(),
rank=get_rank(),
)
if not self.use_dist_eval_sampler:
# e.g. retrieval evaluation
sampler = sampler if is_train else None
else:
sampler = None
loader = DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
sampler=sampler,
shuffle=sampler is None and is_train,
collate_fn=collate_fn,
drop_last=True if is_train else False,
)
loader = PrefetchLoader(loader)
if is_train:
loader = IterLoader(loader, use_distributed=self.use_distributed)
return loader
loaders = []
for dataset, bsz, is_train, collate_fn in zip(
datasets, batch_sizes, is_trains, collate_fns
):
if isinstance(dataset, list) or isinstance(dataset, tuple):
if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None:
dataset_ratios = [d.sample_ratio for d in dataset]
loader = MultiIterLoader(
loaders=[
_create_loader(d, num_workers, bsz, is_train, collate_fn[i])
for i, d in enumerate(dataset)
],
ratios=dataset_ratios,
)
else:
loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)
loaders.append(loader)
return loaders
@main_process
def _save_checkpoint(self, cur_epoch, is_best=False):
"""
Save the checkpoint at the current epoch.
"""
model_no_ddp = self.unwrap_dist_model(self.model)
param_grad_dic = {
k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()
}
state_dict = model_no_ddp.state_dict()
for k in list(state_dict.keys()):
if k in param_grad_dic.keys() and not param_grad_dic[k]:
# delete parameters that do not require gradient
del state_dict[k]
save_obj = {
"model": state_dict,
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"epoch": cur_epoch,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_epoch),
)
logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to))
torch.save(save_obj, save_to)
def _reload_best_model(self, model):
"""
Load the best checkpoint for evaluation.
"""
checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth")
logging.info("Loading checkpoint from {}.".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location="cpu")
try:
model.load_state_dict(checkpoint["model"])
except RuntimeError as e:
logging.warning(
"""
Key mismatch when loading checkpoint. This is expected if only part of the model is saved.
Trying to load the model with strict=False.
"""
)
model.load_state_dict(checkpoint["model"], strict=False)
return model
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device, strict=False)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device, strict=False)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_epoch = checkpoint["epoch"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@main_process
def log_stats(self, stats, split_name):
if isinstance(stats, dict):
log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}}
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(log_stats) + "\n")
elif isinstance(stats, list):
pass
@main_process
def log_config(self):
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/runners/runner_base.py |
import argparse
import time
from PIL import Image
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
from transformers import StoppingCriteria, StoppingCriteriaList
import dataclasses
from enum import auto, Enum
from typing import List, Tuple, Any
from minigpt4.common.registry import registry
class SeparatorStyle(Enum):
"""Different separator style."""
SINGLE = auto()
TWO = auto()
@dataclasses.dataclass
class Conversation:
"""A class that keeps all conversation history."""
system: str
roles: List[str]
messages: List[List[str]]
offset: int
# system_img: List[Image.Image] = []
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
sep: str = "###"
sep2: str = None
skip_next: bool = False
conv_id: Any = None
def get_prompt(self):
if self.sep_style == SeparatorStyle.SINGLE:
ret = self.system + self.sep
for role, message in self.messages:
if message:
ret += role + ": " + message + self.sep
else:
ret += role + ":"
return ret
elif self.sep_style == SeparatorStyle.TWO:
seps = [self.sep, self.sep2]
ret = self.system + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def append_message(self, role, message):
self.messages.append([role, message])
def to_gradio_chatbot(self):
ret = []
for i, (role, msg) in enumerate(self.messages[self.offset:]):
if i % 2 == 0:
ret.append([msg, None])
else:
ret[-1][-1] = msg
return ret
def copy(self):
return Conversation(
system=self.system,
# system_img=self.system_img,
roles=self.roles,
messages=[[x, y] for x, y in self.messages],
offset=self.offset,
sep_style=self.sep_style,
sep=self.sep,
sep2=self.sep2,
conv_id=self.conv_id)
def dict(self):
return {
"system": self.system,
# "system_img": self.system_img,
"roles": self.roles,
"messages": self.messages,
"offset": self.offset,
"sep": self.sep,
"sep2": self.sep2,
"conv_id": self.conv_id,
}
class StoppingCriteriaSub(StoppingCriteria):
def __init__(self, stops=[], encounters=1):
super().__init__()
self.stops = stops
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
for stop in self.stops:
if torch.all((stop == input_ids[0][-len(stop):])).item():
return True
return False
CONV_VISION = Conversation(
system="Give the following image: <Img>ImageContent</Img>. "
"You will be able to see the image once I provide it to you. Please answer my questions.",
roles=("Human", "Assistant"),
messages=[],
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
class Chat:
def __init__(self, model, vis_processor, device='cuda:0'):
self.device = device
self.model = model
self.vis_processor = vis_processor
stop_words_ids = [torch.tensor([835]).to(self.device),
torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways.
self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
def ask(self, text, conv):
if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[0] \
and conv.messages[-1][1][-6:] == '</Img>': # last message is image.
conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text])
else:
conv.append_message(conv.roles[0], text)
def answer(self, conv, img_list, max_new_tokens=200, num_beams=1, min_length=1, top_p=0.9,
repetition_penalty=1.0, length_penalty=1, temperature=1.0):
conv.append_message(conv.roles[1], None)
embs = self.get_context_emb(conv, img_list)
outputs = self.model.llama_model.generate(
inputs_embeds=embs,
max_new_tokens=max_new_tokens,
stopping_criteria=self.stopping_criteria,
num_beams=num_beams,
do_sample=True,
min_length=min_length,
top_p=top_p,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
temperature=temperature,
)
output_token = outputs[0]
if output_token[0] == 0:
output_token = output_token[1:]
output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False)
output_text = output_text.split('###')[0] # remove the stop sign '###'
output_text = output_text.split('Assistant:')[-1].strip()
conv.messages[-1][1] = output_text
return output_text, output_token.cpu().numpy()
def upload_img(self, image, conv, img_list):
if isinstance(image, str): # is a image path
raw_image = Image.open(image).convert('RGB')
image = self.vis_processor(raw_image).unsqueeze(0).to(self.device)
elif isinstance(image, Image.Image):
raw_image = image
image = self.vis_processor(raw_image).unsqueeze(0).to(self.device)
elif isinstance(image, torch.Tensor):
if len(image.shape) == 3:
image = image.unsqueeze(0)
image = image.to(self.device)
image_emb, _ = self.model.encode_img(image)
img_list.append(image_emb)
conv.append_message(conv.roles[0], "<Img><ImageHere></Img>")
msg = "Received."
# self.conv.append_message(self.conv.roles[1], msg)
return msg
def get_context_emb(self, conv, img_list):
prompt = conv.get_prompt()
prompt_segs = prompt.split('<ImageHere>')
assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images."
seg_tokens = [
self.model.llama_tokenizer(
seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids
# only add bos to the first seg
for i, seg in enumerate(prompt_segs)
]
seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens]
mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]]
mixed_embs = torch.cat(mixed_embs, dim=1)
return mixed_embs
| EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/conversation/conversation.py |
EXA-1-master | exa/models/MiniGPT-4-main/minigpt4/conversation/__init__.py |
|
import json
import csv
# specify input and output file paths
input_file = 'laion_synthetic_filtered_large.json'
output_file = 'laion_synthetic_filtered_large.tsv'
# load JSON data from input file
with open(input_file, 'r') as f:
data = json.load(f)
# extract header and data from JSON
header = data[0].keys()
rows = [x.values() for x in data]
# write data to TSV file
with open(output_file, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(header)
writer.writerows(rows)
| EXA-1-master | exa/models/MiniGPT-4-main/dataset/convert_laion.py |
import json
import csv
# specify input and output file paths
input_file = 'ccs_synthetic_filtered_large.json'
output_file = 'ccs_synthetic_filtered_large.tsv'
# load JSON data from input file
with open(input_file, 'r') as f:
data = json.load(f)
# extract header and data from JSON
header = data[0].keys()
rows = [x.values() for x in data]
# write data to TSV file
with open(output_file, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(header)
writer.writerows(rows)
| EXA-1-master | exa/models/MiniGPT-4-main/dataset/convert_cc_sbu.py |
import os
import argparse
import torch
import json
from collections import defaultdict
def parse_args():
parser = argparse.ArgumentParser(description='Extract MMProjector weights')
parser.add_argument('--model_name_or_path', type=str, help='model folder')
parser.add_argument('--output', type=str, help='output file')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
model_indices = json.load(open(os.path.join(args.model_name_or_path, 'pytorch_model.bin.index.json')))
keys_to_match = ['mm_projector', 'embed_tokens']
ckpt_to_key = defaultdict(list)
for k, v in model_indices['weight_map'].items():
if any(key_match in k for key_match in keys_to_match):
ckpt_to_key[v].append(k)
loaded_weights = {}
for ckpt_name, weight_keys in ckpt_to_key.items():
ckpt = torch.load(os.path.join(args.model_name_or_path, ckpt_name), map_location='cpu')
for k in weight_keys:
loaded_weights[k] = ckpt[k]
torch.save(loaded_weights, args.output)
| EXA-1-master | exa/models/LLaVA-main/scripts/extract_mm_projector.py |
import dataclasses
from enum import auto, Enum
from typing import List, Tuple
class SeparatorStyle(Enum):
"""Different separator style."""
SINGLE = auto()
TWO = auto()
@dataclasses.dataclass
class Conversation:
"""A class that keeps all conversation history."""
system: str
roles: List[str]
messages: List[List[str]]
offset: int
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
sep: str = "###"
sep2: str = None
skip_next: bool = False
def get_prompt(self):
if self.sep_style == SeparatorStyle.SINGLE:
ret = self.system + self.sep
for role, message in self.messages:
if message:
if type(message) is tuple:
message, _ = message
ret += role + ": " + message + self.sep
else:
ret += role + ":"
return ret
elif self.sep_style == SeparatorStyle.TWO:
seps = [self.sep, self.sep2]
ret = self.system + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
if type(message) is tuple:
message, _ = message
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def append_message(self, role, message):
self.messages.append([role, message])
def get_images(self):
images = []
for i, (role, msg) in enumerate(self.messages[self.offset:]):
if i % 2 == 0:
if type(msg) is tuple:
import base64
from io import BytesIO
msg, image = msg
max_hw, min_hw = max(image.size), min(image.size)
aspect_ratio = max_hw / min_hw
max_len, min_len = 800, 400
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
longest_edge = int(shortest_edge * aspect_ratio)
H, W = image.size
if H > W:
H, W = longest_edge, shortest_edge
else:
H, W = shortest_edge, longest_edge
image = image.resize((H, W))
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
images.append(img_b64_str)
return images
def to_gradio_chatbot(self):
ret = []
for i, (role, msg) in enumerate(self.messages[self.offset:]):
if i % 2 == 0:
if type(msg) is tuple:
import base64
from io import BytesIO
msg, image = msg
max_hw, min_hw = max(image.size), min(image.size)
aspect_ratio = max_hw / min_hw
max_len, min_len = 800, 400
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
longest_edge = int(shortest_edge * aspect_ratio)
H, W = image.size
if H > W:
H, W = longest_edge, shortest_edge
else:
H, W = shortest_edge, longest_edge
image = image.resize((H, W))
# image = image.resize((224, 224))
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
msg = msg.replace('<image>', img_str)
ret.append([msg, None])
else:
ret[-1][-1] = msg
return ret
def copy(self):
return Conversation(
system=self.system,
roles=self.roles,
messages=[[x, y] for x, y in self.messages],
offset=self.offset,
sep_style=self.sep_style,
sep=self.sep,
sep2=self.sep2)
def dict(self):
if len(self.get_images()) > 0:
return {
"system": self.system,
"roles": self.roles,
"messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
"offset": self.offset,
"sep": self.sep,
"sep2": self.sep2,
}
return {
"system": self.system,
"roles": self.roles,
"messages": self.messages,
"offset": self.offset,
"sep": self.sep,
"sep2": self.sep2,
}
conv_v1 = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
roles=("Human", "Assistant"),
messages=(
("Human", "Give three tips for staying healthy."),
("Assistant",
"Sure, here are three tips for staying healthy:\n"
"1. Exercise regularly: Regular physical activity can help improve your overall health and wellbeing. "
"It can also help reduce your risk of chronic conditions such as obesity, diabetes, heart disease, "
"and certain cancers. Aim for at least 150 minutes of moderate-intensity aerobic exercise or "
"75 minutes of vigorous-intensity aerobic exercise per week, along with muscle-strengthening "
"activities at least two days per week.\n"
"2. Eat a balanced diet: Eating a balanced diet that is rich in fruits, "
"vegetables, whole grains, lean proteins, and healthy fats can help support "
"your overall health. Try to limit your intake of processed and high-sugar foods, "
"and aim to drink plenty of water throughout the day.\n"
"3. Get enough sleep: Getting enough quality sleep is essential for your physical "
"and mental health. Adults should aim for seven to nine hours of sleep per night. "
"Establish a regular sleep schedule and try to create a relaxing bedtime routine to "
"help improve the quality of your sleep.")
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
conv_v1_2 = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
roles=("Human", "Assistant"),
messages=(
("Human", "What are the key differences between renewable and non-renewable energy sources?"),
("Assistant",
"Renewable energy sources are those that can be replenished naturally in a relatively "
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
"renewable and non-renewable energy sources:\n"
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
"energy sources are finite and will eventually run out.\n"
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
"and other negative effects.\n"
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
"have lower operational costs than non-renewable sources.\n"
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
"locations than non-renewable sources.\n"
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
"non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
conv_bair_v1 = Conversation(
system="BEGINNING OF CONVERSATION:",
roles=("USER", "GPT"),
messages=(),
offset=0,
sep_style=SeparatorStyle.TWO,
sep=" ",
sep2="</s>",
)
simple_conv = Conversation(
system="You are LLaVA, a large language model trained by UW Madison WAIV Lab, based on LLaMA architecture."
"You are designed to assist human with a variety of tasks using natural language."
"Follow the instructions carefully.",
roles=("Human", "Assistant"),
messages=(
("Human", "Hi!"),
("Assistant", "Hi there! How can I help you today?\n")
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
simple_conv_multimodal = Conversation(
system="You are LLaVA, a large multimodal model trained by UW Madison WAIV Lab, based on LLaMA architecture."
"You are able to understand the image and visual content that the user provides, and explain to human using natural language."
"You are designed to assist human with a variety of tasks using natural language."
"Follow the instructions carefully and explain your answers in detail. Provide examples when necessary.",
roles=("Human", "Assistant"),
messages=(
("Human", "Hi!"),
("Assistant", "Hi there! How can I help you today?\n")
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
simple_conv_legacy = Conversation(
system="You are LLaVA, a large language model trained by UW Madison WAIV Lab, based on LLaMA architecture."
"You are designed to assist human with a variety of tasks using natural language."
"Follow the instructions carefully.",
roles=("Human", "Assistant"),
messages=(
("Human", "Hi!\n\n### Response:"),
("Assistant", "Hi there! How can I help you today?\n")
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
default_conversation = conv_v1_2
conv_templates = {
"default": conv_v1_2,
"simple": simple_conv,
"simple_legacy": simple_conv_legacy,
"multimodal": simple_conv_multimodal,
# fastchat
"v1": conv_v1_2,
"bair_v1": conv_bair_v1,
}
if __name__ == "__main__":
print(default_conversation.get_prompt())
| EXA-1-master | exa/models/LLaVA-main/llava/conversation.py |
CONTROLLER_HEART_BEAT_EXPIRATION = 2 * 60
WORKER_HEART_BEAT_INTERVAL = 30
LOGDIR = "."
| EXA-1-master | exa/models/LLaVA-main/llava/constants.py |
EXA-1-master | exa/models/LLaVA-main/llava/__init__.py |
|
import datetime
import logging
import logging.handlers
import os
import sys
import requests
from llava.constants import LOGDIR
server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
handler = None
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# Add a file handler for all loggers
if handler is None:
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(
filename, when='D', utc=True)
handler.setFormatter(formatter)
for name, item in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ''
for line in temp_linebuf.splitlines(True):
# From the io.TextIOWrapper docs:
# On output, if newline is None, any '\n' characters written
# are translated to the system default line separator.
# By default sys.stdout.write() expects '\n' newlines and then
# translates them so this is still cross platform.
if line[-1] == '\n':
self.logger.log(self.log_level, line.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != '':
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ''
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def violates_moderation(text):
"""
Check whether the text violates OpenAI moderation API.
"""
url = "https://api.openai.com/v1/moderations"
headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]}
text = text.replace("\n", "")
data = "{" + '"input": ' + f'"{text}"' + "}"
data = data.encode("utf-8")
try:
ret = requests.post(url, headers=headers, data=data, timeout=5)
flagged = ret.json()["results"][0]["flagged"]
except requests.exceptions.RequestException as e:
flagged = False
except KeyError as e:
flagged = False
return flagged
def pretty_print_semaphore(semaphore):
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
| EXA-1-master | exa/models/LLaVA-main/llava/utils.py |
"""
Usage:
python3 -m fastchat.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta ~/model_weights/vicuna-7b-delta --hub-repo-id lmsys/vicuna-7b-delta
"""
import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print("Loading target model")
target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
DEFAULT_PAD_TOKEN = "[PAD]"
base_tokenizer = AutoTokenizer.from_pretrained(base_model_path)
num_new_tokens = base_tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN))
base.resize_token_embeddings(len(base_tokenizer))
input_embeddings = base.get_input_embeddings().weight.data
output_embeddings = base.get_output_embeddings().weight.data
input_embeddings[-num_new_tokens:] = 0
output_embeddings[-num_new_tokens:] = 0
print("Calculating delta")
for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
if name not in base.state_dict():
assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
continue
if param.data.shape == base.state_dict()[name].shape:
param.data -= base.state_dict()[name]
else:
assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
bparam = base.state_dict()[name]
param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam
print("Saving delta")
if hub_repo_id:
kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
else:
kwargs = {}
target.save_pretrained(delta_path, **kwargs)
target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
target_tokenizer.save_pretrained(delta_path, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--base-model-path", type=str, required=True)
parser.add_argument("--target-model-path", type=str, required=True)
parser.add_argument("--delta-path", type=str, required=True)
parser.add_argument("--hub-repo-id", type=str, default=None)
args = parser.parse_args()
make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id)
| EXA-1-master | exa/models/LLaVA-main/llava/model/make_delta.py |
EXA-1-master | exa/models/LLaVA-main/llava/model/__init__.py |
|
"""
Usage:
python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
"""
import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
def apply_delta(base_model_path, target_model_path, delta_path):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print("Loading delta")
delta = AutoModelForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
print("Applying delta")
for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
if name not in base.state_dict():
assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
continue
if param.data.shape == base.state_dict()[name].shape:
param.data += base.state_dict()[name]
else:
assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \
f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
bparam = base.state_dict()[name]
param.data[:bparam.shape[0], :bparam.shape[1]] += bparam
print("Saving target model")
delta.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--base-model-path", type=str, required=True)
parser.add_argument("--target-model-path", type=str, required=True)
parser.add_argument("--delta-path", type=str, required=True)
args = parser.parse_args()
apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
| EXA-1-master | exa/models/LLaVA-main/llava/model/apply_delta.py |
# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
# Need to call this before importing transformers.
from llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
replace_llama_attn_with_flash_attn()
from llava.train.train import train
if __name__ == "__main__":
train()
| EXA-1-master | exa/models/LLaVA-main/llava/train/train_mem.py |
# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
from typing import List, Optional, Tuple
import torch
from torch import nn
import transformers
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from einops import rearrange
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input
def forward(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor],
Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel
attention_mask: [bsz, q_len]
"""
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states).view(
bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(
bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(
bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
# [bsz, q_len, nh, hd]
# [bsz, nh, q_len, hd]
kv_seq_len = key_states.shape[-2]
offset = 0
if past_key_value is not None:
offset = past_key_value[0].shape[-2]
kv_seq_len += offset
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states,
key_states,
cos,
sin,
offset=offset)
# [bsz, nh, t, hd]
assert not output_attentions, "output_attentions is not supported"
assert not use_cache, "use_cache is not supported"
assert past_key_value is None, "past_key_value is not supported"
# Flash attention codes from
# https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py
# transform the data into the format required by flash attention
qkv = torch.stack([query_states, key_states, value_states], dim=2) # [bsz, nh, 3, q_len, hd]
qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
# We have disabled _prepare_decoder_attention_mask in LlamaModel
# the attention_mask should be the same as the key_padding_mask
key_padding_mask = attention_mask
if key_padding_mask is None:
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = q_len
cu_q_lens = torch.arange(0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32,
device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_q_lens, max_s, 0.0,
softmax_scale=None, causal=True
)
output = rearrange(output, '(b s) ... -> b s ...', b=bsz)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad, cu_q_lens, max_s, 0.0,
softmax_scale=None, causal=True
)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
indices, bsz, q_len),
'b s (h d) -> b s h d', h=nheads)
return self.o_proj(rearrange(output,
'b s h d -> b s (h d)')), None, None
# Disable the transformation of the attention mask in LlamaModel as the flash attention
# requires the attention mask to be the same as the key_padding_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape,
inputs_embeds, past_key_values_length):
# [bsz, seq_len]
return attention_mask
def replace_llama_attn_with_flash_attn():
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
| EXA-1-master | exa/models/LLaVA-main/llava/train/llama_flash_attn_monkey_patch.py |
# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from dataclasses import dataclass, field
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence
import torch
import transformers
from torch.utils.data import Dataset
from transformers import Trainer
from llava import conversation as conversation_lib
from PIL import Image
import torch.nn as nn
# TODO: import and use code from ../data/dataset.py
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "</s>"
DEFAULT_UNK_TOKEN = "<unk>"
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
freeze_backbone: bool = field(default=False)
tune_mm_mlp_adapter: bool = field(default=False)
vision_tower: Optional[str] = field(default=None)
mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer
pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
mm_use_im_start_end: bool = field(default=False)
@dataclass
class DataArguments:
data_path: str = field(default=None,
metadata={"help": "Path to the training data."})
lazy_preprocess: bool = False
is_multimodal: bool = False
image_token_len: int = 0
image_folder: Optional[str] = field(default=None)
image_aspect_ratio: str = 'square'
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
remove_unused_columns: bool = field(default=False)
model_max_length: int = field(
default=512,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def _tokenize_fn(strings: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
) for text in strings
]
input_ids = labels = [
tokenized.input_ids[0] for tokenized in tokenized_list
]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def _mask_targets(target, tokenized_lens, speakers):
# cur_idx = 0
cur_idx = tokenized_lens[0]
tokenized_lens = tokenized_lens[1:]
target[:cur_idx] = IGNORE_INDEX
for tokenized_len, speaker in zip(tokenized_lens, speakers):
if speaker == "human":
target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX
cur_idx += tokenized_len
def _add_speaker_and_signal(header, source, get_conversation=True):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = "### "
END_SIGNAL = "\n"
conversation = header
for sentence in source:
from_str = sentence["from"]
if from_str.lower() == "human":
from_str = conversation_lib.default_conversation.roles[0]
elif from_str.lower() == "gpt":
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = 'unknown'
sentence["value"] = (BEGIN_SIGNAL + from_str + ": " +
sentence["value"] + END_SIGNAL)
if get_conversation:
conversation += sentence["value"]
conversation += BEGIN_SIGNAL
return conversation
def preprocess_multimodal(
sources: Sequence[str],
multimodal_cfg: dict,
cur_token_len: int,
) -> Dict:
is_multimodal = multimodal_cfg['is_multimodal']
# image_token_len = multimodal_cfg['image_token_len']
image_token_len = cur_token_len
if not is_multimodal:
return sources
for source in sources:
for sentence in source:
replace_token = DEFAULT_IMAGE_PATCH_TOKEN * image_token_len
if multimodal_cfg['use_im_start_end']:
replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
return sources
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
# add end signal and concatenate together
conversations = []
for source in sources:
header = f"{conversation_lib.default_conversation.system}\n\n"
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
# tokenize conversations
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized["input_ids"]
targets = copy.deepcopy(input_ids)
for target, source in zip(targets, sources):
tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source],
tokenizer)["input_ids_lens"]
speakers = [sentence["from"] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets)
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...")
sources = [example["conversations"] for example in list_data_dict]
data_dict = preprocess(sources, tokenizer)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer,
multimodal_cfg: dict):
super(LazySupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
self.multimodal_cfg = multimodal_cfg
def __len__(self):
return len(self.list_data_dict)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
if 'image' in sources[0]:
image_file = self.list_data_dict[i]['image']
image_folder = self.multimodal_cfg['image_folder']
processor = self.multimodal_cfg['image_processor']
image = Image.open(os.path.join(image_folder, image_file))
if self.multimodal_cfg['image_aspect_ratio'] == 'keep':
max_hw, min_hw = max(image.size), min(image.size)
aspect_ratio = max_hw / min_hw
max_len, min_len = 448, 224
shortest_edge = int(min(max_len / aspect_ratio, min_len))
image = processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={"shortest_edge": shortest_edge})['pixel_values'][0]
else:
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
cur_token_len = (image.shape[1]//14) * (image.shape[2]//14) # FIXME: 14 is hardcoded patch size
sources = preprocess_multimodal(
copy.deepcopy([e["conversations"] for e in sources]),
self.multimodal_cfg, cur_token_len)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
data_dict = preprocess(
sources,
self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
if 'image' in self.list_data_dict[i]:
data_dict['image'] = image
elif self.multimodal_cfg['is_multimodal']:
# image does not exist in the data, but the model is multimodal
crop_size = self.multimodal_cfg['image_processor'].crop_size
data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
return data_dict
@dataclass
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (LazySupervisedDataset
if data_args.lazy_preprocess else SupervisedDataset)
train_dataset = dataset_cls(tokenizer=tokenizer,
data_path=data_args.data_path,
multimodal_cfg=dict(
is_multimodal=data_args.is_multimodal,
image_token_len=data_args.image_token_len,
image_folder=data_args.image_folder,
image_aspect_ratio=data_args.image_aspect_ratio,
use_im_start_end=getattr(data_args, 'mm_use_im_start_end', False),
image_processor=getattr(data_args, 'image_processor', None)))
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model = transformers.LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
)
if model_args.freeze_backbone:
model.model.requires_grad_(False)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
if tokenizer.pad_token is None:
smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN),
tokenizer=tokenizer,
model=model,
)
if "llama" in model_args.model_name_or_path:
tokenizer.add_special_tokens({
"eos_token": DEFAULT_EOS_TOKEN,
"bos_token": DEFAULT_BOS_TOKEN,
"unk_token": DEFAULT_UNK_TOKEN,
})
if model_args.vision_tower is not None:
model.config.mm_vision_tower = model_args.vision_tower
from transformers import CLIPVisionModel, CLIPImageProcessor
dtype = torch.float32
if training_args.fp16:
dtype = torch.float16
if training_args.bf16:
dtype = torch.bfloat16
if not hasattr(model.model, 'vision_tower'):
vision_tower = CLIPVisionModel.from_pretrained(model_args.vision_tower)
else:
vision_tower = model.model.vision_tower[0]
image_processor = CLIPImageProcessor.from_pretrained(model_args.vision_tower)
vision_config = vision_tower.config
num_patches = (vision_config.image_size // vision_config.patch_size) ** 2
data_args.image_token_len = num_patches
data_args.image_processor = image_processor
data_args.is_multimodal = True
vision_tower.requires_grad_(False)
# model.model.vision_tower = vision_tower
# HACK: for FSDP
vision_tower.to(dtype=dtype, device=training_args.device)
model.model.vision_tower = [vision_tower]
model.config.use_mm_proj = True
model.config.mm_hidden_size = vision_config.hidden_size
model.config.mm_vision_select_layer = model_args.mm_vision_select_layer
if not hasattr(model.model, 'mm_projector'):
mm_projector = nn.Linear(vision_config.hidden_size, model.config.hidden_size)
else:
mm_projector = model.model.mm_projector
model.model.mm_projector = mm_projector
if model_args.pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
mm_projector.load_state_dict({k.split('.')[-1]: v for k, v in mm_projector_weights.items()})
model.config.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
if model_args.tune_mm_mlp_adapter:
model.requires_grad_(False)
for p in mm_projector.parameters():
p.requires_grad = True
model.config.mm_use_im_start_end = model_args.mm_use_im_start_end
data_args.mm_use_im_start_end = model_args.mm_use_im_start_end
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
vision_config.use_im_start_end = model_args.mm_use_im_start_end
if model_args.mm_use_im_start_end:
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
vision_config.im_start_token, vision_config.im_end_token = tokenizer.convert_tokens_to_ids([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN])
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
if model_args.tune_mm_mlp_adapter:
model.model.orig_embeds_params = [model.get_input_embeddings().weight.data.clone().to(device=training_args.device)]
for p in model.get_input_embeddings().parameters():
p.requires_grad = True
for p in model.get_output_embeddings().parameters():
p.requires_grad = False
if model_args.pretrain_mm_mlp_adapter:
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
assert input_embeddings.shape == embed_tokens_weight.shape
assert num_new_tokens == 2
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
vision_config.im_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_IMAGE_PATCH_TOKEN])[0]
data_module = make_supervised_data_module(tokenizer=tokenizer,
data_args=data_args)
trainer = Trainer(model=model,
tokenizer=tokenizer,
args=training_args,
**data_module)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer,
output_dir=training_args.output_dir)
if __name__ == "__main__":
train()
| EXA-1-master | exa/models/LLaVA-main/llava/train/train.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.