python_code
stringlengths 0
229k
|
---|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 8
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Albert", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='efficientnet_b0', device=device,
batch_size=batch_size, extra_args=extra_args)
|
"""
Maskrcnn model from torchvision
"""
import torch
import os
import itertools
import random
import numpy as np
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from pathlib import Path
from typing import Tuple
# Model specific imports
import torchvision
from .coco_utils import ConvertCocoPolysToMask
from torchvision.datasets.coco import CocoDetection
# silence some spam
from pycocotools import coco
coco.print = lambda *args: None
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(DATA_DIR), "Couldn't find coco2017 minimal data dir, please run install.py again."
COCO_DATA_KEY = "coco_2017_val_100"
COCO_DATA = {
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json")
}
def _collate_fn(batch):
return tuple(zip(*batch))
def _prefetch(loader, device):
items = []
for images, targets in loader:
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
items.append((images, targets))
return items
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
# MaskRCNN doesn't actually take the inputs in batches; it takes a list
# of tensors which individually are CHW
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
NUM_OF_BATCHES = 1
ALLOW_CUSTOMIZE_BSIZE = False
def __init__(self, test, device, batch_size=None, extra_args=[], model_kwargs={}):
# reduce the eval batch size when running on CPU
# see: https://github.com/pytorch/benchmark/issues/895
if device == "cpu":
self.DEFAULT_EVAL_BSIZE = 1
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = torchvision.models.detection.maskrcnn_resnet50_fpn(
weights=torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights.COCO_V1, **model_kwargs
).to(self.device)
# setup optimizer
# optimizer parameters copied from
# https://github.com/pytorch/vision/blob/30f4d108319b0cd28ae5662947e300aad98c32e9/references/detection/train.py#L77
lr = 0.02
momentum = 0.9
weight_decay = 1e-4
params = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
transforms = ConvertCocoPolysToMask()
dataset = CocoDetection(root=os.path.join(DATA_DIR, COCO_DATA[COCO_DATA_KEY][0]),
annFile=os.path.join(DATA_DIR, COCO_DATA[COCO_DATA_KEY][1]),
transforms=transforms)
sampler = torch.utils.data.SequentialSampler(dataset)
self.data_loader = _prefetch(torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
sampler=sampler,
collate_fn=_collate_fn), self.device)
def get_module(self):
self.model.eval()
for (example_inputs, _example_targets) in self.data_loader:
return self.model, (example_inputs, )
def train(self):
self.model.train()
for _batch_id, (images, targets) in zip(range(self.NUM_OF_BATCHES), self.data_loader):
# images = list(image.to(self.device) for image in images)
# targets = [{k: v.to(self.device) for k, v in t.items()} for t in targets]
loss_dict = self.model(images, targets)
losses = sum(loss for loss in loss_dict.values())
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
with torch.no_grad():
for _batch_id, (images, _targets) in zip(range(self.NUM_OF_BATCHES), self.data_loader):
out = self.model(images)
out = list(map(lambda x: x.values(), out))
return tuple(itertools.chain(*out))
|
import sys
import subprocess
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "coco2017-minimal.tar.gz", decompress=True)
pip_install_requirements()
|
import torch
from pycocotools import mask as coco_mask
from torchvision.transforms import functional as F
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask:
def __call__(self, image, target):
w, h = image.size
image_id = target[0]["image_id"] if target else []
image_id = torch.tensor([image_id])
anno = target
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
# Convert image from PIL to tensor
image = F.pil_to_tensor(image)
image = F.convert_image_dtype(image)
return image, target
|
import os
import logging
import torch
from pathlib import Path
from contextlib import suppress
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
# effdet imports
from effdet import create_model, create_loader
from effdet.data import resolve_input_config
# timm imports
from timm.models.layers import set_layer_config
from timm.optim import create_optimizer
from timm.utils import ModelEmaV2, NativeScaler
from timm.scheduler import create_scheduler
# local imports
from .args import get_args
from .train import train_epoch, validate
from .loader import create_datasets_and_loaders
from torch.utils._pytree import tree_map
from typing import Tuple
# setup coco2017 input path
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco2017-minimal", "coco")
def prefetch(loader, device, num_of_batches):
prefetched_loader = []
for _bid, (input, target) in zip(range(num_of_batches), loader):
prefetched_loader.append((tree_map(lambda x: x.to(device, dtype=torch.float32) if isinstance(x, torch.Tensor) else x, input),
tree_map(lambda x: x.to(device, dtype=torch.float32) if isinstance(x, torch.Tensor) else x, target)))
return prefetched_loader
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
# Original Train batch size 32 on 2x RTX 3090 (24 GB cards)
# Downscale to batch size 16 on single GPU
DEFAULT_TRAIN_BSIZE = 16
DEFAULT_EVAL_BSIZE = 128
# prefetch only 1 batch
NUM_OF_BATCHES = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
if not device == "cuda":
# Only implemented on CUDA because the original model code explicitly calls the `Tensor.cuda()` API
# https://github.com/rwightman/efficientdet-pytorch/blob/9cb43186711d28bd41f82f132818c65663b33c1f/effdet/data/loader.py#L114
raise NotImplementedError("The original model code forces the use of CUDA.")
# generate arguments
args = get_args()
# setup train and eval batch size
args.batch_size = self.batch_size
# Disable distributed
args.distributed = False
args.device = self.device
args.torchscript = False
args.world_size = 1
args.rank = 0
args.pretrained_backbone = not args.no_pretrained_backbone
args.prefetcher = not args.no_prefetcher
args.root = DATA_DIR
with set_layer_config(scriptable=args.torchscript):
timm_extra_args = {}
if args.img_size is not None:
timm_extra_args = dict(image_size=(args.img_size, args.img_size))
if test == "train":
model = create_model(
model_name=args.model,
bench_task='train',
num_classes=args.num_classes,
pretrained=args.pretrained,
pretrained_backbone=args.pretrained_backbone,
redundant_bias=args.redundant_bias,
label_smoothing=args.smoothing,
legacy_focal=args.legacy_focal,
jit_loss=args.jit_loss,
soft_nms=args.soft_nms,
bench_labeler=args.bench_labeler,
checkpoint_path=args.initial_checkpoint,
)
elif test == "eval":
model = create_model(
model_name=args.model,
bench_task='predict',
num_classes=args.num_classes,
pretrained=args.pretrained,
redundant_bias=args.redundant_bias,
soft_nms=args.soft_nms,
checkpoint_path=args.checkpoint,
checkpoint_ema=args.use_ema,
**timm_extra_args,
)
model_config = model.config # grab before we obscure with DP/DDP wrappers
self.model = model.to(device)
if args.channels_last:
self.model = self.model.to(memory_format=torch.channels_last)
self.loader_train, self.loader_eval, self.evaluator, _, dataset_eval = create_datasets_and_loaders(args, model_config)
self.amp_autocast = suppress
if test == "train":
self.optimizer = create_optimizer(args, model)
self.loss_scaler = None
self.model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
self.model_ema = ModelEmaV2(model, decay=args.model_ema_decay)
self.lr_scheduler, self.num_epochs = create_scheduler(args, self.optimizer)
if model_config.num_classes < self.loader_train.dataset.parser.max_label:
logging.error(
f'Model {model_config.num_classes} has fewer classes than dataset {self.loader_train.dataset.parser.max_label}.')
exit(1)
if model_config.num_classes > self.loader_train.dataset.parser.max_label:
logging.warning(
f'Model {model_config.num_classes} has more classes than dataset {self.loader_train.dataset.parser.max_label}.')
self.loader_train = prefetch(self.loader_train, self.device, self.NUM_OF_BATCHES)
self.loader_eval = prefetch(self.loader_eval, self.device, self.NUM_OF_BATCHES)
self.loader = self.loader_train
elif test == "eval":
# Create eval loader
input_config = resolve_input_config(args, model_config)
self.loader = create_loader(
dataset_eval,
input_size=input_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=args.eval_interpolation,
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
pin_mem=args.pin_mem)
self.loader = prefetch(self.loader, self.device, self.NUM_OF_BATCHES)
self.args = args
# Only run 1 epoch
self.num_epochs = 1
def get_module(self):
for _, (input, target) in zip(range(self.NUM_OF_BATCHES), self.loader):
return self.model, (input, target)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.lr_scheduler, self.num_epochs = create_scheduler(args, self.optimizer)
def enable_amp(self):
if self.device == "cuda":
self.amp_autocast = torch.cuda.amp.autocast
elif self.device == "cpu":
self.amp_autocast = torch.cpu.amp.autocast
self.loss_scaler = NativeScaler()
def train(self):
eval_metric = self.args.eval_metric
for epoch in range(self.num_epochs):
train_metrics = train_epoch(
epoch, self.model, self.loader_train,
self.optimizer, self.args,
lr_scheduler=self.lr_scheduler, amp_autocast = self.amp_autocast,
loss_scaler=self.loss_scaler, model_ema=self.model_ema,
num_batch=self.NUM_OF_BATCHES,
)
# TorchBench: skip validation step in train
# the overhead of evaluating with coco style datasets is fairly high, so just ema or non, not both
# if self.model_ema is not None:
# eval_metrics = validate(self.model_ema.module, self.loader_eval, self.args, self.evaluator, log_suffix=' (EMA)', num_batch=self.NUM_OF_BATCHES)
# else:
# eval_metrics = validate(self.model, self.loader_eval, self.args, self.evaluator, num_batch=self.NUM_OF_BATCHES)
# if self.lr_scheduler is not None:
# # step LR for next epoch
# self.lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
def eval(self) -> Tuple[torch.Tensor]:
with torch.no_grad():
for input, target in self.loader:
with self.amp_autocast():
output = self.model(input, img_info=target)
self.evaluator.add_predictions(output, target)
return (output, )
|
from effdet.data import resolve_input_config, SkipSubset
from effdet import create_loader, create_dataset, create_evaluator
from effdet.anchors import Anchors, AnchorLabeler
from effdet.data.dataset_config import CocoCfg
from dataclasses import dataclass, field
from typing import Dict
@dataclass
class Coco2017MinimalCfg(CocoCfg):
variant: str = '2017-minimal'
splits: Dict[str, dict] = field(default_factory=lambda: dict(
train=dict(ann_filename='annotations/instances_val2017_100.json', img_dir='val2017', has_labels=True),
val=dict(ann_filename='annotations/instances_val2017_100.json', img_dir='val2017', has_labels=True),
))
def create_datasets_and_loaders(
args,
model_config,
transform_train_fn=None,
transform_eval_fn=None,
collate_fn=None,
):
""" Setup datasets, transforms, loaders, evaluator.
Args:
args: Command line args / config for training
model_config: Model specific configuration dict / struct
transform_train_fn: Override default image + annotation transforms (see note in loaders.py)
transform_eval_fn: Override default image + annotation transforms (see note in loaders.py)
collate_fn: Override default fast collate function
Returns:
Train loader, validation loader, evaluator
"""
input_config = resolve_input_config(args, model_config=model_config)
dataset_train, dataset_eval = create_dataset(args.dataset, args.root, custom_dataset_cfg=Coco2017MinimalCfg())
# setup labeler in loader/collate_fn if not enabled in the model bench
labeler = None
if not args.bench_labeler:
labeler = AnchorLabeler(
Anchors.from_config(model_config), model_config.num_classes, match_threshold=0.5)
loader_train = create_loader(
dataset_train,
input_size=input_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
# color_jitter=args.color_jitter,
# auto_augment=args.aa,
interpolation=args.train_interpolation or input_config['interpolation'],
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
anchor_labeler=labeler,
transform_fn=transform_train_fn,
collate_fn=collate_fn,
)
if args.val_skip > 1:
dataset_eval = SkipSubset(dataset_eval, args.val_skip)
loader_eval = create_loader(
dataset_eval,
input_size=input_config['input_size'],
batch_size=args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=input_config['interpolation'],
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
anchor_labeler=labeler,
transform_fn=transform_eval_fn,
collate_fn=collate_fn,
)
evaluator = create_evaluator(args.dataset, loader_eval.dataset, distributed=args.distributed, pred_yxyx=False)
return loader_train, loader_eval, evaluator, dataset_train, dataset_eval |
import torch
from collections import OrderedDict
from contextlib import suppress
from timm.utils import AverageMeter, reduce_tensor
def train_epoch(
epoch, model, loader, optimizer, args,
lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None,
num_batch=1):
# batch_time_m = AverageMeter()
# data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
# end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in zip(range(num_batch), loader):
last_batch = batch_idx == last_idx
# data_time_m.update(time.time() - end)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input, target)
loss = output['loss']
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters())
else:
loss.backward()
if args.clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
# batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
# if args.distributed:
# reduced_loss = reduce_tensor(loss.data, args.world_size)
# losses_m.update(reduced_loss.item(), input.size(0))
#
# if args.local_rank == 0:
# logging.info(
# 'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
# 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
# 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
# '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
# 'LR: {lr:.3e} '
# 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
# epoch,
# batch_idx, len(loader),
# 100. * batch_idx / last_idx,
# loss=losses_m,
# batch_time=batch_time_m,
# rate=input.size(0) * args.world_size / batch_time_m.val,
# rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
# lr=lr,
# data_time=data_time_m))
# if args.save_images and output_dir:
# torchvision.utils.save_image(
# input,
# os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
# padding=0,
# normalize=True)
# if saver is not None and args.recovery_interval and (
# last_batch or (batch_idx + 1) % args.recovery_interval == 0):
# saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
# end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, args, evaluator=None, log_suffix='',
num_batch=1):
# batch_time_m = AverageMeter()
losses_m = AverageMeter()
model.eval()
# end = time.time()
# last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in zip(range(num_batch), loader):
# last_batch = batch_idx == last_idx
output = model(input, target)
loss = output['loss']
if evaluator is not None:
evaluator.add_predictions(output['detections'], target)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
# batch_time_m.update(time.time() - end)
# end = time.time()
# if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
# log_name = 'Test' + log_suffix
# logging.info(
# '{0}: [{1:>4d}/{2}] '
# 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
# 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '.format(
# log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m))
metrics = OrderedDict([('loss', losses_m.avg)])
if evaluator is not None:
metrics['map'] = evaluator.evaluate()
return metrics |
import yaml
import argparse
from timm.utils import add_bool_arg
def get_args(config_file=None):
def _parse_args():
if config_file:
with open(config_file, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# There may be remaining unrecognized options
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args, _ = parser.parse_known_args()
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
# parser.add_argument('root', metavar='DIR',
# help='path to dataset')
parser.add_argument('--dataset', default='coco', type=str, metavar='DATASET',
help='Name of dataset to train (default: "coco"')
parser.add_argument('--model', default='tf_efficientdet_d1', type=str, metavar='MODEL',
help='Name of model to train (default: "tf_efficientdet_d1"')
add_bool_arg(parser, 'redundant-bias', default=None, help='override model config for redundant bias')
add_bool_arg(parser, 'soft-nms', default=None, help='override model config for soft-nms')
parser.add_argument('--val-skip', type=int, default=0, metavar='N',
help='Skip every N validation samples.')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='Override num_classes in model config if set. For fine-tuning from pretrained.')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--no-pretrained-backbone', action='store_true', default=False,
help='Do not start with pretrained backbone weights, fully random.')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--fill-color', default=None, type=str, metavar='NAME',
help='Image augmentation fill (background) color ("mean" or int)')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--clip-grad', type=float, default=10.0, metavar='NORM',
help='Clip gradient norm (default: 10.0)')
# Optimizer parameters
parser.add_argument('--opt', default='momentum', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "momentum"')
parser.add_argument('--opt-eps', default=1e-3, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-3)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=4e-5,
help='weight decay (default: 0.00004)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
# loss
parser.add_argument('--smoothing', type=float, default=None, help='override model config label smoothing')
add_bool_arg(parser, 'jit-loss', default=None, help='override model config for torchscript jit loss fn')
add_bool_arg(parser, 'legacy-focal', default=None, help='override model config to use legacy focal loss')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=0, metavar='N',
help='how many training processes to use (default: 0)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
add_bool_arg(parser, 'bench-labeler', default=False,
help='label targets in model bench, increases GPU load at expense of loader processes')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='map', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "map"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
# Evaluation parameters
parser.add_argument('--eval-interpolation', default='bilinear', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
args, _ = _parse_args()
return args
|
import os
import sys
import patch
from pathlib import Path
import subprocess
from utils import s3_utils
def patch_effdet():
import effdet
current_dir = os.path.dirname(os.path.abspath(__file__))
patch_file = os.path.join(current_dir, "effdet.patch")
target_dir = os.path.dirname(effdet.__file__)
p = patch.fromfile(patch_file)
if not p.apply(strip=1, root=target_dir):
print("Failed to patch effdet. Exit.")
exit(1)
def patch_pycocotools():
import pycocotools
current_dir = os.path.dirname(os.path.abspath(__file__))
patch_file = os.path.join(current_dir, "pycocotools.patch")
target_dir = os.path.dirname(os.path.abspath(pycocotools.__file__))
p = patch.fromfile(patch_file)
if not p.apply(strip=1, root=target_dir):
print("Failed to patch pycocotools. Exit.")
exit(1)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "coco2017-minimal.tar.gz", decompress=True)
pip_install_requirements()
patch_effdet()
patch_pycocotools()
|
import os
import torch
from torch.distributed._tensor import DeviceMesh
from torch.distributed.tensor.parallel import parallelize_module
from torch.distributed.tensor.parallel.style import ColwiseParallel, RowwiseParallel
from torchbenchmark.tasks import NLP
from ...util.model import BenchmarkModel
from .model import LLaMA
class Model(BenchmarkModel):
task = NLP.GENERATION
DEFAULT_EVAL_BSIZE = 1
def validate_environment(self):
if not torch.cuda.is_available() or "cuda" not in self.device:
return NotImplementedError("Model requires CUDA")
if not torch.cuda.is_bf16_supported():
return NotImplementedError("Model requires BF16")
if not hasattr(self, "_world_size"):
return NotImplementedError("Model needs to be run via dynamo torchbench and be provided distributed parameters")
if self._world_size != torch.cuda.device_count():
return NotImplementedError(
f"DTensor and all local GPUs to be within the device mesh. {torch.cuda.device_count()} local GPUs, but only world size is only {self._world_size}"
)
return None
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(
test=test,
device=device,
batch_size=batch_size,
extra_args=extra_args,
)
error = self.validate_environment()
if error:
raise error
self.model = LLaMA.from_name("7B", self._world_size).to(device=device, dtype=torch.bfloat16)
# Tensor parallelism using DTensor
mesh = DeviceMesh("cuda", list(range(self._world_size)))
for block in self.model.transformer.h:
# prepare attention weights to be parallelized
block.attn.prepare_qkv_for_dtensor_tp()
parallelize_module(
module=block,
device_mesh=mesh,
parallelize_plan={
"attn.c_attn_q": ColwiseParallel(),
"attn.c_attn_k": ColwiseParallel(),
"attn.c_attn_v": ColwiseParallel(),
"attn.c_proj": RowwiseParallel(),
"mlp.c_fc1": ColwiseParallel(),
"mlp.c_fc2": ColwiseParallel(),
"mlp.c_proj": RowwiseParallel(),
},
tp_mesh_dim=0,
)
max_batch_size = self.DEFAULT_EVAL_BSIZE
self.model.setup_caches(
max_batch_size=max_batch_size, max_seq_length=self.model.config.block_size
)
prompt_size = 10
idx = torch.randint(
self.model.config.vocab_size,
(max_batch_size, prompt_size),
dtype=torch.int32,
device=device,
)
input_pos = torch.arange(prompt_size, device=device)
self.example_inputs = [idx, input_pos]
def get_module(self):
return self.model, self.example_inputs
def train(self):
raise NotImplementedError("Training not supported for this model")
def eval(self):
raise NotImplementedError("Model needs to be run via dynamo torchbench and be provided distributed parameters")
|
"""Full definition of a LLaMA Language Model, all of it in this single file.
Based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
"""
# mypy: ignore-errors
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from typing_extensions import Self
import torch
import torch.nn as nn
from torch.nn import functional as F
MaskCache = torch.Tensor
RoPECache = torch.Tensor
KVCache = Tuple[torch.Tensor, torch.Tensor]
def find_multiple(n: int, k: int) -> int:
if n % k == 0:
return n
return n + k - (n % k)
class LinearInt8(torch.nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: torch.Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.register_buffer("weight", torch.empty((out_features, in_features), dtype=torch.int8))
# if bias:
# self.register_buffer("bias", torch.empty(out_features, **factory_kwargs, dtype=torch.int8))
# else:
# self.bias('bias', None)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.weight.to(dtype=input.dtype))
# nn.Linear = LinearInt8
@dataclass
class LLaMAConfig:
block_size: int = 2048
vocab_size: int = 32000
padded_vocab_size: Optional[int] = None
n_layer: int = 32
n_head: int = 32
n_embd: int = 4096
def __post_init__(self):
if self.padded_vocab_size is None:
self.padded_vocab_size = find_multiple(self.vocab_size, 64)
@classmethod
def from_name(cls, name: str) -> Self:
return cls(**llama_configs[name])
llama_configs = {
"7B": dict(n_layer=32, n_head=32, n_embd=4096),
"13B": dict(n_layer=40, n_head=40, n_embd=5120),
"30B": dict(n_layer=60, n_head=52, n_embd=6656),
"65B": dict(n_layer=80, n_head=64, n_embd=8192),
}
class KVCache(nn.Module):
@torch.no_grad()
def __init__(self, max_batch_size, max_seq_length, n_heads, head_size, device='cuda', dtype=torch.bfloat16):
super().__init__()
cache_shape = (max_batch_size, n_heads, max_seq_length, head_size)
self.k_cache = torch.nn.Parameter(torch.zeros(cache_shape, device=device, dtype=dtype))
self.v_cache = torch.nn.Parameter(torch.zeros(cache_shape, device=device, dtype=dtype))
@torch.no_grad()
def update(self, input_pos, k_val, v_val):
# input_pos: [S], k_val: [B, H, S, D]
assert input_pos.shape[0] == k_val.shape[2]
self.k_cache[:, :, input_pos] = k_val
self.v_cache[:, :, input_pos] = v_val
return self.k_cache, self.v_cache
class KVCacheAggregator(nn.Module):
def __init__(self):
super().__init__()
self.kv_caches = nn.ModuleList([])
def initialize(self,layers, max_batch_size, max_seq_length, n_heads, head_size, device='cuda', dtype=torch.bfloat16):
cache_shape = (max_batch_size, n_heads, max_seq_length, head_size)
self.kv_caches = nn.ModuleList([KVCache(max_batch_size, max_seq_length, n_heads, head_size) for _ in range(layers)])
def __getitem__(self, idx):
return self.kv_caches[idx]
def clear(self):
self.kv_caches = nn.ParameterList([])
class LLaMA(nn.Module):
def __init__(self, config: LLaMAConfig, world_size: int) -> None:
super().__init__()
self.world_size = world_size
assert config.padded_vocab_size is not None
self.config = config
self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=False)
self.transformer = nn.ModuleDict(
dict(
wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
h=nn.ModuleList(Block(config, self.world_size) for _ in range(config.n_layer)),
ln_f=RMSNorm(config.n_embd),
)
)
self.rope_cache: Optional[RoPECache] = None
self.mask_cache: Optional[MaskCache] = None
self.kv_caches = KVCacheAggregator()
self.max_batch_size = None
self.max_seq_length = None
def setup_caches(self, max_batch_size, max_seq_length, device='cuda', dtype=torch.bfloat16):
n_embd = self.config.n_embd // self.world_size
n_head = self.config.n_head // self.world_size
head_size = n_embd // n_head
self.max_seq_length = max_seq_length
self.max_batch_size = max_batch_size
self.kv_caches.initialize(layers=self.config.n_layer, max_batch_size=max_batch_size, max_seq_length=max_seq_length, n_heads=n_head, head_size=head_size)
self.rope_cache = build_rope_cache(
seq_len=self.config.block_size,
n_elem=head_size,
dtype=dtype,
device=device,
)
ones = torch.ones((self.config.block_size, self.config.block_size), device=device, dtype=torch.bool)
self.mask_cache = torch.tril(ones).unsqueeze(0).unsqueeze(0)
def _init_weights(self, module: nn.Module) -> None:
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))
def forward(
self, idx: torch.Tensor, input_pos: Optional[torch.Tensor] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, List[KVCache]]]:
B, T = idx.size()
assert self.rope_cache is not None, "Caches must be initialized first"
block_size = self.config.block_size
max_seq_length = self.max_seq_length
if max_seq_length is None:
max_seq_length = block_size
assert T <= max_seq_length, f"Cannot forward sequence of length {T}, max seq length is only {max_seq_length}"
assert max_seq_length <= block_size, f"Cannot attend to {max_seq_length}, block size is only {block_size}"
assert T <= block_size, f"Cannot forward sequence of length {T}, block size is only {block_size}"
rope = self.rope_cache.index_select(0, input_pos)
mask = self.mask_cache.index_select(2, input_pos)
mask = mask[:, :, :, :max_seq_length]
# forward the model itself
x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
for i, block in enumerate(self.transformer.h):
x, new_kv_cache = block(x, rope, mask, max_seq_length, input_pos, self.kv_caches[i])
x = self.transformer.ln_f(x)
logits = self.lm_head(x) # (b, t, vocab_size)
return logits
@classmethod
def from_name(cls, name: str, world_size: int) -> Self:
return cls(LLaMAConfig.from_name(name), world_size)
def reset_cache(self) -> None:
self.kv_caches.clear()
class Block(nn.Module):
def __init__(self, config: LLaMAConfig, world_size: int) -> None:
super().__init__()
self.rms_1 = RMSNorm(config.n_embd)
self.attn = CausalSelfAttention(config, world_size)
self.rms_2 = RMSNorm(config.n_embd)
self.mlp = MLP(config)
def forward(
self,
x: torch.Tensor,
rope: RoPECache,
mask: MaskCache,
max_seq_length: int,
input_pos: Optional[torch.Tensor] = None,
kv_cache: Optional[KVCache] = None,
) -> Tuple[torch.Tensor, Optional[KVCache]]:
h, new_kv_cache = self.attn(self.rms_1(x), rope, mask, max_seq_length, input_pos, kv_cache)
x = x + h
x = x + self.mlp(self.rms_2(x))
return x, new_kv_cache
class CausalSelfAttention(nn.Module):
def __init__(self, config: LLaMAConfig, world_size: int) -> None:
super().__init__()
self.world_size = world_size
assert config.n_embd % config.n_head == 0
self.config = config
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
self.n_head = config.n_head
self.n_embd = config.n_embd
self.block_size = config.block_size
def forward(
self,
x: torch.Tensor,
rope: RoPECache,
mask: MaskCache,
max_seq_length: int,
input_pos: Optional[torch.Tensor] = None,
kv_cache: Optional[KVCache] = None,
) -> Tuple[torch.Tensor, Optional[KVCache]]:
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
_C = C // self.world_size
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
q = self.c_attn_q(x)
k = self.c_attn_k(x)
v = self.c_attn_v(x)
n_head = self.n_head // self.world_size
head_size = _C // n_head
k = k.view(B, T, n_head, head_size)
q = q.view(B, T, n_head, head_size)
v = v.view(B, T, n_head, head_size)
q = apply_rope(q, rope)
k = apply_rope(k, rope)
k = k.transpose(1, 2) # (B, nh, T, hs)
q = q.transpose(1, 2) # (B, nh, T, hs)
v = v.transpose(1, 2) # (B, nh, T, hs)
if kv_cache is not None:
k, v = kv_cache.update(input_pos, k, v)
# efficient attention using Flash Attention CUDA kernels
# y = F.scaled_dot_product_attention(q, k, v)
y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
y = y.transpose(1, 2).contiguous().view(B, T, _C) # re-assemble all head outputs side by side
# output projection
y = self.c_proj(y)
return y, kv_cache
def prepare_qkv_for_dtensor_tp(self):
attn = self.c_attn
assert attn.in_features % self.world_size == 0 # q, k, v must be shardeable
attn.out_features = attn.out_features // self.world_size
# Shard on dim 0 since attn.weight is transposed
# Shard q, k, v separately
q, k, v = attn.weight.split(self.config.n_embd, dim=0) # (C, C)
self.c_attn_q = nn.Linear(self.config.n_embd, self.config.n_embd, bias=False)
self.c_attn_q.weight = nn.Parameter(q)
self.c_attn_k = nn.Linear(self.config.n_embd, self.config.n_embd, bias=False)
self.c_attn_k.weight = nn.Parameter(k)
self.c_attn_v = nn.Linear(self.config.n_embd, self.config.n_embd, bias=False)
self.c_attn_v.weight = nn.Parameter(v)
del self.c_attn
class MLP(nn.Module):
def __init__(self, config: LLaMAConfig) -> None:
super().__init__()
hidden_dim = 4 * config.n_embd
n_hidden = int(2 * hidden_dim / 3)
n_hidden = find_multiple(n_hidden, 256)
self.c_fc1 = nn.Linear(config.n_embd, n_hidden, bias=False)
self.c_fc2 = nn.Linear(config.n_embd, n_hidden, bias=False)
self.c_proj = nn.Linear(n_hidden, config.n_embd, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.silu(self.c_fc1(x)) * self.c_fc2(x)
x = self.c_proj(x)
return x
class RMSNorm(nn.Module):
"""Root Mean Square Layer Normalization.
Derived from https://github.com/bzhangGo/rmsnorm/blob/master/rmsnorm_torch.py. BSD 3-Clause License:
https://github.com/bzhangGo/rmsnorm/blob/master/LICENSE.
"""
def __init__(self, size: int, dim: int = -1, eps: float = 1e-5) -> None:
super().__init__()
self.scale = nn.Parameter(torch.ones(size))
self.eps = eps
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
# NOTE: the original RMSNorm paper implementation is not equivalent
# norm_x = x.norm(2, dim=self.dim, keepdim=True)
# rms_x = norm_x * d_x ** (-1. / 2)
# x_normed = x / (rms_x + self.eps)
norm_x = torch.mean(x * x, dim=self.dim, keepdim=True)
x_normed = x * torch.rsqrt(norm_x + self.eps)
return self.scale * x_normed
def build_rope_cache(
seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
) -> RoPECache:
"""Enhanced Transformer with Rotary Position Embedding.
Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
transformers/rope/__init__.py. MIT License:
https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
"""
# $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))
# Create position indexes `[0, 1, ..., seq_len - 1]`
seq_idx = torch.arange(seq_len, dtype=dtype, device=device)
# Calculate the product of position index and $\theta_i$
idx_theta = torch.outer(seq_idx, theta).float()
cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
# this is to mimic the behaviour of complex32, else we will get different results
if dtype in (torch.float16, torch.bfloat16, torch.int8):
cache = cache.half()
return cache
def apply_rope(x: torch.Tensor, rope_cache: RoPECache) -> torch.Tensor:
# truncate to support variable sizes
T = x.size(1)
rope_cache = rope_cache[:T]
# cast because the reference does
xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
rope_cache = rope_cache.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
x_out2 = torch.stack(
[
xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
],
-1,
)
x_out2 = x_out2.flatten(3)
return x_out2.type_as(x)
|
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from .model import SequenceGenerator, create_model
import torch
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
embed_dim = 1536
beam_size = 1
# This is quite a bit smaller than, e.g., T5, because this model is
# quite a bit slower to run
generate_size = 64
self.model = SequenceGenerator(
create_model(embed_dim),
beam_size,
generate_size,
).eval().to(self.device)
prompt_size = 64
vocab_size = 128 # cribbed from original script
self.example_inputs = (
torch.randint(1, vocab_size, (self.batch_size, prompt_size)).to(self.device),
)
def get_module(self):
return self.model, self.example_inputs
# The code included here is specialized for eval
def train(self):
return NotImplementedError("training script not published")
def eval(self):
with torch.no_grad():
out = self.model(*self.example_inputs)
return (out,)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Portions of this code are derived from https://github.com/facebookresearch/metaseq
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.benchmark as benchmark
from torch import Tensor
from typing import Optional, Dict, Any
from tqdm import tqdm
# torch.set_float32_matmul_precision("high")
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def make_positions(tensor, padding_idx: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
# we cannot use incremental state here because we must be aware of
# padding.
if positions is None and self.padding_idx is not None:
positions = make_positions(input, self.padding_idx)
assert positions is not None
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
learned_sinusoidal: bool = False,
full_megatron_init=False,
pos_init_scalar=1.0,
megatron_init_sigma=None,
truncate_init=False,
):
def _init_emb(tensor, sigma):
if sigma <= 1e-8: # effectively 0
return nn.init.zeros_(tensor)
if truncate_init:
return nn.init.trunc_normal_(
tensor, mean=0.0, std=sigma, a=-3 * sigma, b=3 * sigma
)
else:
return nn.init.normal_(tensor, mean=0.0, std=sigma)
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
if full_megatron_init:
_init_emb(m.weight, megatron_init_sigma * pos_init_scalar)
else:
_init_emb(m.weight, embedding_dim**-0.5 * pos_init_scalar)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
elif learned_sinusoidal:
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
with torch.no_grad():
m.weight.copy_(
SinusoidalPositionalEmbedding.get_embedding(
num_embeddings,
embedding_dim,
padding_idx,
)
)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
from typing import Tuple
from torch.nn import Parameter, init
import math
import uuid
def softmax(x, dim: int):
return F.softmax(x, dim=dim, dtype=torch.float32)
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Linear(nn.Module):
"""
Exact same as pytorch nn.Linear but with option to initialize weight and bias directly on GPU
"""
__constants__ = ["in_features", "out_features"]
in_features: int
out_features: int
weight: Tensor
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
initialize_params_on_gpu: bool = False,
dtype: torch.dtype = None,
) -> None:
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
device = torch.cuda.current_device() if initialize_params_on_gpu else None
if dtype is None:
dtype = torch.float
self.weight = Parameter(
torch.empty(out_features, in_features, device=device, dtype=dtype)
)
if bias:
self.bias = Parameter(torch.empty(out_features, device=device, dtype=dtype))
else:
self.register_parameter("bias", None)
def forward(self, input: Tensor) -> Tensor:
return F.linear(input, self.weight, self.bias)
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None
)
class Dropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def extra_repr(self) -> str:
return "p={}".format(self.p)
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def init_incremental_state(self):
self._incremental_state_id = "5" # str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
initialize_params_on_gpu=False,
dtype: Optional[torch.dtype] = None,
):
self.init_incremental_state()
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
random_state = torch.get_rng_state()
# random_state_cuda = torch.cuda.get_rng_state()
self.k_proj = Linear(
self.kdim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.v_proj = Linear(
self.vdim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.q_proj = Linear(
embed_dim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.out_proj = Linear(
embed_dim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
torch.set_rng_state(random_state)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
attn_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
incremental_state is None
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
False,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
assert v is not None
v = torch.cat([prev_value, v], dim=1)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
# Replace any non-finite values with finite equivalents, since otherwise
# we may get NaN when adding attn_mask or computing softmax.
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights_float = softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
return attn, None # To match return type of F.multi_head_attention_forward
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
from typing import Callable, List
class ActivationFn(nn.Module):
def __init__(self, name, embed_dim, ffn_dim):
super().__init__()
self.fn = self.__get_fn(name)
def forward(self, fc1_in, fc1_out, model_parallel: bool):
return self.fn(fc1_out)
def __get_fn(self, name: str) -> Callable:
"""Returns the activation function corresponding to the arg passed in the run"""
if name == "relu":
return F.relu
elif name == "relu_squared":
return relu_squared
elif name == "gelu":
return gelu
elif name == "tanh":
return torch.tanh
elif name == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(name))
class TransformerDecoderLayer(nn.Module):
"""Pre-norm Decoder layer block.
Note that we have found model training to require pre-norm to remain stable.
Args:
embed_dim (int): dimension of the model embedding
decoder_embed_dim (int): dimension of the decoder embedding
dropout (float): dropout probability
decoder_attention_heads (int): number of decoder attention heads
attention_dropout (float): dropout probability for attention weights
decoder_ffn_embed_dim (int): dimension of the decoder feedforward network embedding
activation_fn (str): activation function name
add_bias_kv (bool): whether to add bias to the key and value projections
add_zero_attn (bool): whether to add a zero attention vector for padding tokens
disable_affine_ln (bool): whether to disable affine layer normalization
disable_bias (bool): whether to disable bias in linear layers
tensor_parallel_init_model_on_gpu (bool): whether to initialize model on GPU for tensor parallelism
full_megatron_init (bool): whether to use full Megatron initialization
megatron_init_sigma (float): sigma value for Megatron initialization
truncate_init (bool): whether to truncate the initialization values
"""
def __init__(
self,
embed_dim,
decoder_embed_dim,
dropout=0.1,
decoder_attention_heads=8,
attention_dropout=0.1,
decoder_ffn_embed_dim=2048,
activation_fn="relu",
add_bias_kv=False,
add_zero_attn=False,
disable_affine_ln=False,
disable_bias=False,
tensor_parallel_init_model_on_gpu=False,
full_megatron_init=False,
megatron_init_sigma=0.006,
truncate_init=False,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.self_attn = self.build_self_attention(
decoder_embed_dim,
decoder_attention_heads,
attention_dropout,
add_bias_kv,
add_zero_attn,
tensor_parallel_init_model_on_gpu,
disable_bias,
megatron_init_sigma,
truncate_init,
)
self.nh = decoder_attention_heads
self.head_dim = int(decoder_embed_dim / self.nh)
affine_ln = not disable_affine_ln
self.self_attn_layer_norm = LayerNorm(
decoder_embed_dim, elementwise_affine=affine_ln
)
self.fc1 = self.build_fc1(
decoder_embed_dim,
decoder_ffn_embed_dim,
tensor_parallel_init_model_on_gpu,
full_megatron_init,
megatron_init_sigma,
truncate_init,
disable_bias,
)
self.activation_fn = ActivationFn(
activation_fn,
decoder_embed_dim,
decoder_ffn_embed_dim,
)
self.fc2 = self.build_fc2(
decoder_ffn_embed_dim,
decoder_embed_dim,
tensor_parallel_init_model_on_gpu,
full_megatron_init,
megatron_init_sigma,
truncate_init,
disable_bias,
)
self.final_layer_norm = LayerNorm(
decoder_embed_dim, elementwise_affine=affine_ln
)
def build_fc1(
self,
input_dim,
output_dim,
initialize_params_on_gpu=False,
full_megatron_init=False,
megatron_init_sigma=0.006,
truncate_init=False,
disable_bias=False,
):
return Linear(
input_dim,
output_dim,
initialize_params_on_gpu=initialize_params_on_gpu,
bias=not disable_bias,
)
def build_fc2(
self,
input_dim,
output_dim,
initialize_params_on_gpu=False,
full_megatron_init=False,
megatron_init_sigma=0.006,
truncate_init=False,
disable_bias=False,
):
return Linear(
input_dim,
output_dim,
initialize_params_on_gpu=initialize_params_on_gpu,
bias=not disable_bias,
)
def build_self_attention(
self,
embed_dim,
decoder_attention_heads,
attention_dropout,
add_bias_kv,
add_zero_attn,
tensor_parallel_init_model_on_gpu,
disable_bias,
megatron_init_sigma,
truncate_init,
):
return MultiheadAttention(
embed_dim,
decoder_attention_heads,
dropout=attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True,
initialize_params_on_gpu=tensor_parallel_init_model_on_gpu,
bias=not disable_bias,
)
def forward_attention(
self,
query,
key,
value,
residual,
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
attn_mask: Optional[Tensor] = None,
):
x, _ = self.self_attn(
query=query,
key=key,
value=value,
key_padding_mask=key_padding_mask,
incremental_state=incremental_state,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = residual + x
return x
def forward(
self,
x,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
self_attn_mask: Optional[Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
x = self.self_attn_layer_norm(x)
x = self.forward_attention(
query=x,
key=x,
value=x,
residual=residual,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask,
)
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(x, self.fc1(x), model_parallel=False)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
return x
class TransformerDecoder(nn.Module):
def __init__(
self,
embed_tokens,
decoder_attention_heads,
decoder_ffn_embed_dim,
activation_fn="relu",
dropout=0.1,
attention_dropout=0.1,
no_emb_dropout=False,
share_decoder_input_output_embed=False,
embed_dim=512,
max_target_positions=1024,
no_scale_embedding=False,
decoder_learned_pos=False,
decoder_learned_sinusoidal=False,
full_megatron_init=False,
pos_init_scalar=1.0,
megatron_init_sigma=0.006,
truncate_init=False,
decoder_layers=6,
self_attn_doc_sep=-1,
initialize_params_on_gpu=False,
dtype=torch.float32,
add_bias_kv=False,
add_zero_attn=False,
disable_affine_ln=False,
disable_bias=False,
tensor_parallel_init_model_on_gpu=False,
):
super().__init__()
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.tensor_parallel_init_model_on_gpu = tensor_parallel_init_model_on_gpu
self.megatron_init_sigma = megatron_init_sigma
self.full_megatron_init = full_megatron_init
self.activation_fn = activation_fn
self.attention_dropout = attention_dropout
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.dropout = dropout
self.truncate_init = truncate_init
if no_emb_dropout:
self.dropout_module = None
self.add_bias_kv = add_bias_kv
self.add_zero_attn = add_zero_attn
self.disable_affine_ln = disable_affine_ln
self.disable_bias = disable_bias
self.decoder_attention_heads = decoder_attention_heads
self.share_input_output_embed = share_decoder_input_output_embed
self.embed_dim = embed_dim
self.padding_idx: int = embed_tokens.padding_idx
assert self.padding_idx is not None
self.max_target_positions = max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if no_scale_embedding else math.sqrt(self.embed_dim)
self.decoder_ffn_embed_dim = decoder_ffn_embed_dim
# default value
device = torch.cuda.current_device() if initialize_params_on_gpu else None
# default value
self.self_attn_doc_sep = self_attn_doc_sep
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
self.embed_dim,
self.padding_idx,
learned=decoder_learned_pos,
learned_sinusoidal=decoder_learned_sinusoidal,
full_megatron_init=full_megatron_init,
pos_init_scalar=pos_init_scalar,
megatron_init_sigma=megatron_init_sigma,
truncate_init=truncate_init,
)
if decoder_learned_pos
else None
)
self.embed_positions.to(device).to(dtype)
self.layers = nn.ModuleList([])
layers = []
for i in range(decoder_layers):
layers.append(self.build_decoder_layer())
self.layers = nn.ModuleList(layers)
self.num_layers = len(self.layers)
self.layer_norm = LayerNorm(
self.embed_dim,
elementwise_affine=not disable_affine_ln,
)
self.layer_norm.to(device).to(dtype)
self.output_projection = None
if self.share_input_output_embed:
self.output_projection = Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = Linear(
self.embed_dim,
len(dictionary),
bias=False,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.embed_dim**-0.5
)
def build_base_decoder_layer(self):
return TransformerDecoderLayer(
self.embed_dim,
self.embed_dim,
self.dropout,
self.decoder_attention_heads,
self.attention_dropout,
self.decoder_ffn_embed_dim,
self.activation_fn,
self.add_bias_kv,
self.add_zero_attn,
self.disable_affine_ln,
self.disable_bias,
self.tensor_parallel_init_model_on_gpu,
self.full_megatron_init,
self.megatron_init_sigma,
self.truncate_init,
)
def build_decoder_layer(self):
layer = self.build_base_decoder_layer()
return layer
def forward_embedding(
self,
tokens,
token_embedding: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
# embed tokens and positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state, positions=positions
)
# see BaseDecoder for important information about
# incremental state
if incremental_state is not None:
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if positions is not None:
x += positions
if self.dropout_module is not None:
x = self.dropout_module(x)
# Returning in T x B x C format as that makes integrating sequence parallelism easier.
x = x.transpose(0, 1).contiguous()
return x, embed, positions
# forward for TransformerDecoder
def forward(
self,
prev_output_tokens,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
src_lengths: Optional[Any] = None,
token_embeddings: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
self_attn_padding_mask (torch.Tensor, optional): precomputed padding
mask for self-attention (default None will recompute mask)
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
# see BaseDecoder for important information about
# incremental state
x = self.extract_features(
prev_output_tokens,
incremental_state=incremental_state,
token_embeddings=token_embeddings,
self_attn_padding_mask=self_attn_padding_mask,
)
if not features_only:
x = self.output_layer(x)
# Transposing back to B x T x C, so that the interface stays the same.
x = x.transpose(0, 1).contiguous()
return x
def extract_features(
self,
prev_output_tokens: torch.Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
token_embeddings: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
) -> torch.Tensor:
# compute self-attention padding mask (involves device-to-host transfer,
# so put it at the top of the forward)
assert prev_output_tokens is not None
assert self.padding_idx is not None
if (
self_attn_padding_mask is None
and prev_output_tokens.eq(self.padding_idx).any()
):
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# assert self_attn_padding_mask is not None
# embed tokens and positions
# x is T x B x C
x, tok, pos = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
# see BaseDecoder for important information about
# incremental state. Note that it may be an empty dictionary.
if incremental_state is not None:
self_attn_mask = self.buffered_future_mask(x, prev_output_tokens)
else:
self_attn_mask = None
# decoder layers
# store other representations for instrumentation in VocabParallelCrossEntCrit
# Note: we are only storing the embeddings output and output of final transformer block
# instead of all inner representations, as thats the only thing being logged and storing
# all intermediate representation causes OOM for large models during validation.
for idx, layer in enumerate(self.layers):
x = layer(
x,
incremental_state=incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
if self.layer_norm is not None:
x = self.layer_norm(x)
# Returned x is T x B x C here, as sequence_parallel requires T to be first dim
return x
def output_layer(self, features):
"""Project features to the vocabulary size."""
return self.output_projection(features)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor, input_tokens=None) -> torch.Tensor:
cur_seq_len, batch_size = tensor.size(0), tensor.size(1)
max_seq_len = self.max_positions()
need_to_make_new_mask = (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(1) < max_seq_len
or (
self._future_mask.size(0) != (batch_size * self.decoder_attention_heads)
)
)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if need_to_make_new_mask:
self._future_mask = torch.triu(
fill_with_neg_inf(
torch.zeros([max_seq_len, max_seq_len], device=tensor.device)
),
1,
)
self._future_mask = self._future_mask.to(tensor)
if self.self_attn_doc_sep != -1:
return self._future_mask
else:
return self._future_mask[:cur_seq_len, :cur_seq_len]
def _sample_topp(temperature: float, sampling_topp: float, lprobs: torch.Tensor):
if temperature == 0.0 or sampling_topp == 0.0:
# greedy search
return tuple(lprobs.max(dim=-1))
probs = lprobs.exp()
sprobs, sinds = probs.sort(dim=-1, descending=True)
mask = (sprobs.cumsum(dim=-1) - sprobs) >= sampling_topp
trunc_sprobs = sprobs.detach().clone()
trunc_sprobs[mask] = 0
trunc_sprobs.div_(trunc_sprobs.sum(dim=-1).unsqueeze(-1))
choices = torch.multinomial(trunc_sprobs, 1)[:, 0]
hyp_ids = torch.arange(lprobs.size(0)).to(lprobs.device)
tok_ids = sinds[hyp_ids, choices]
scores = sprobs[hyp_ids, choices].log()
return scores, tok_ids
class SequenceGenerator(nn.Module):
def __init__(
self, model, beam_size: int, generate_size: int, use_incremental: bool = True
) -> None:
super().__init__()
self.model = model
self.beam_size = beam_size
self.generate_size = generate_size
self.use_incremental = use_incremental
def forward(self, src_tokens):
with torch.no_grad():
incremental_states = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
max_len = src_len + self.generate_size
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
tokens = (
torch.zeros(bsz * beam_size, max_len).to(src_tokens).long().fill_(0)
)
start_step = src_tokens.shape[1]
tokens[:, :start_step] = src_tokens.repeat_interleave(beam_size, 0)
model_out = self.model(
tokens[:, :start_step],
incremental_state=incremental_states if self.use_incremental else None,
)
model_predictions = F.log_softmax(model_out.float()[:, -1, :])
for step in range(start_step, max_len):
tokens[:, step] = model_predictions.max(-1)[1]
# forward through the next pass
model_out = self.model(
tokens[:, : step + 1],
incremental_state=incremental_states
if self.use_incremental
else None,
)
# see above for why this must remain float
model_predictions = F.log_softmax(model_out.float()[:, -1, :])
return tokens
class SequenceGeneratorFixedSize(nn.Module):
def __init__(self, model, beam_size: int, generate_size: int) -> None:
super().__init__()
self.model = model
self.beam_size = beam_size
self.generate_size = generate_size
def forward(self, src_tokens):
with torch.no_grad():
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
max_len = src_len + self.generate_size
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
start_step = src_tokens.shape[1]
tokens = (
torch.zeros(bsz * beam_size, max_len).to(src_tokens).long().fill_(0)
)
tokens[:, :start_step] = src_tokens.repeat_interleave(beam_size, 0)
model_out = self.model(tokens)
model_predictions = F.log_softmax(model_out.float()[:, start_step, :])
for step in range(start_step, max_len):
tokens[:, step] = model_predictions.max(-1)[1]
model_out = self.model(
tokens,
)
# see above for why this must remain float
model_predictions = F.log_softmax(model_out.float()[:, step, :])
return tokens
def create_model(embed_dim=1536):
embed_tokens = torch.nn.Embedding(2048, embed_dim, padding_idx=-1)
return (
TransformerDecoder(
embed_tokens,
decoder_layers=24,
decoder_attention_heads=16,
max_target_positions=2048,
embed_dim=embed_dim,
decoder_ffn_embed_dim=embed_dim * 4,
no_scale_embedding=True,
share_decoder_input_output_embed=True,
decoder_learned_pos=True,
dropout=0.1,
)
)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='vit_giant_patch14_224', device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Bart", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import numpy as np
import random
import time
import torch
from argparse import Namespace
from .meta import Meta
from pathlib import Path
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
CANNOT_SET_CUSTOM_OPTIMIZER = True
# Skip correctness check, because maml runs backward and optimizer in eval()
# Which will return non-deterministic results
SKIP_CORRECTNESS_CHECK = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# load from disk or synthesize data
use_data_file = False
debug_print = False
root = str(Path(__file__).parent)
args = Namespace(**{
'n_way': 5,
'k_spt': 1,
'k_qry': 15,
'imgsz': 28,
'imgc': 1,
'task_num': 32,
'meta_lr': 1e-3,
'update_lr': 0.4,
'update_step': 5,
'update_step_test': 10
})
config = [
('conv2d', [64, args.imgc, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, 64])
]
self.module = Meta(args, config).to(device)
if use_data_file:
self.example_inputs = torch.load(f'{root}/batch.pt')
self.example_inputs = tuple([torch.from_numpy(i).to(self.device) for i in self.example_inputs])
else:
# synthesize data parameterized by arg values
self.example_inputs = (
torch.randn(args.task_num, args.n_way, args.imgc, args.imgsz, args.imgsz).to(device),
torch.randint(0, args.n_way, [args.task_num, args.n_way], dtype=torch.long).to(device),
torch.randn(args.task_num, args.n_way * args.k_qry, args.imgc, args.imgsz, args.imgsz).to(device),
torch.randint(0, args.n_way, [args.task_num, args.n_way * args.k_qry], dtype=torch.long).to(device))
# print input shapes
if debug_print:
for i in range(len(self.example_inputs)):
print(self.example_inputs[i].shape)
def get_module(self):
return self.module, self.example_inputs
def eval(self) -> Tuple[torch.Tensor]:
out = self.module(*self.example_inputs)
return (out, )
def train(self):
raise NotImplementedError("MAML model doesn't support train.")
def eval_in_nograd(self):
return False
|
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from .learner import Learner
from copy import deepcopy
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
if self.training:
return self.forward_train(x_spt, y_spt, x_qry, y_qry)
else:
return self.finetunning(x_spt[0], y_spt[0], x_qry[0], y_qry[0])
def forward_train(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
# 1. run the i-th task and compute loss for k=0
logits = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
fast_weights = list([p[1] - self.update_lr * p[0]for p in zip(grad, self.net.parameters())])
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = [p[1] - self.update_lr * p[0] for p in zip(grad, fast_weights)]
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = torch.tensor(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
querysz = x_qry.size(0)
corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
del net
accs = torch.tensor(corrects) / querysz
return accs
def main():
pass
if __name__ == '__main__':
main()
|
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from typing import List
class Learner(nn.Module):
"""
"""
def __init__(self, config, imgc, imgsz):
"""
:param config: network config file, type:list of (string, list)
:param imgc: 1 or 3
:param imgsz: 28 or 84
"""
super(Learner, self).__init__()
self.config = config
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
for i, (name, param) in enumerate(self.config):
if name == 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name == 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name == 'linear':
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name == 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param in self.config:
if name == 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name == 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name == 'linear':
tmp = 'linear:(in:%d, out:%d)'%(param[1], param[0])
info += tmp + '\n'
elif name == 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)'%(param[0])
info += tmp + '\n'
elif name == 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name == 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=True):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn == updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
if vars == None:
vars = self.vars
idx = 0
bn_idx = 0
for name, param in self.config:
if name == 'conv2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name == 'convt2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name == 'linear':
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
# print('forward:', idx, x.norm().item())
elif name == 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name == 'flatten':
# print(x.shape)
x = x.view(x.size(0), -1)
elif name == 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name == 'relu':
x = F.relu(x, inplace=param[0])
elif name == 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name == 'tanh':
x = F.tanh(x)
elif name == 'sigmoid':
x = torch.sigmoid(x)
elif name == 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name == 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name == 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
raise NotImplementedError
# make sure variable == used properly
assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return x
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars == None:
for p in self.vars:
if not p.grad == None:
p.grad.zero_()
else:
for p in vars:
if not p.grad == None:
p.grad.zero_()
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from typing import Optional, Tuple
from .sam import Sam
from .transforms import ResizeLongestSide
class SamPredictor:
def __init__(
self,
sam_model: Sam,
) -> None:
"""
Uses SAM to calculate the image embedding for an image, and then
allow repeated, efficient mask prediction given prompts.
Arguments:
sam_model (Sam): The model to use for mask prediction.
"""
super().__init__()
self.model = sam_model
self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
self.reset_image()
def set_image(
self,
image: np.ndarray,
image_format: str = "RGB",
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method.
Arguments:
image (np.ndarray): The image for calculating masks. Expects an
image in HWC uint8 format, with pixel values in [0, 255].
image_format (str): The color format of the image, in ['RGB', 'BGR'].
"""
assert image_format in [
"RGB",
"BGR",
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
if image_format != self.model.image_format:
image = image[..., ::-1]
# Transform the image to the form expected by the model
input_image = self.transform.apply_image(image)
input_image_torch = torch.as_tensor(input_image, device=self.device)
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
self.set_torch_image(input_image_torch, image.shape[:2])
@torch.no_grad()
def set_torch_image(
self,
transformed_image: torch.Tensor,
original_image_size: Tuple[int, ...],
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method. Expects the input
image to be already transformed to the format expected by the model.
Arguments:
transformed_image (torch.Tensor): The input image, with shape
1x3xHxW, which has been transformed with ResizeLongestSide.
original_image_size (tuple(int, int)): The size of the image
before transformation, in (H, W) format.
"""
assert (
len(transformed_image.shape) == 4
and transformed_image.shape[1] == 3
and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
self.reset_image()
self.original_size = original_image_size
self.input_size = tuple(transformed_image.shape[-2:])
input_image = self.model.preprocess(transformed_image)
self.features = self.model.image_encoder(input_image)
self.is_image_set = True
def predict(
self,
point_coords: Optional[np.ndarray] = None,
point_labels: Optional[np.ndarray] = None,
box: Optional[np.ndarray] = None,
mask_input: Optional[np.ndarray] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Predict masks for the given input prompts, using the currently set image.
Arguments:
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (np.ndarray or None): A length N array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A length 4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form 1xHxW, where
for SAM, H=W=256.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(np.ndarray): The output masks in CxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(np.ndarray): An array of length C containing the model's
predictions for the quality of each mask.
(np.ndarray): An array of shape CxHxW, where C is the number
of masks and H=W=256. These low resolution logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
# Transform input prompts
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
if point_coords is not None:
assert (
point_labels is not None
), "point_labels must be supplied if point_coords is supplied."
point_coords = self.transform.apply_coords(point_coords, self.original_size)
coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
if box is not None:
box = self.transform.apply_boxes(box, self.original_size)
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
box_torch = box_torch[None, :]
if mask_input is not None:
mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
mask_input_torch = mask_input_torch[None, :, :, :]
masks, iou_predictions, low_res_masks = self.predict_torch(
coords_torch,
labels_torch,
box_torch,
mask_input_torch,
multimask_output,
return_logits=return_logits,
)
masks_np = masks[0].detach().cpu().numpy()
iou_predictions_np = iou_predictions[0].to(torch.float32).detach().cpu().numpy()
low_res_masks_np = low_res_masks[0].to(torch.float32).detach().cpu().numpy()
return masks_np, iou_predictions_np, low_res_masks_np
@torch.no_grad()
def predict_torch(
self,
point_coords: Optional[torch.Tensor],
point_labels: Optional[torch.Tensor],
boxes: Optional[torch.Tensor] = None,
mask_input: Optional[torch.Tensor] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Input prompts are batched torch tensors and are expected to already be
transformed to the input frame using ResizeLongestSide.
Arguments:
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (torch.Tensor or None): A BxN array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
boxes (np.ndarray or None): A Bx4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form Bx1xHxW, where
for SAM, H=W=256. Masks returned by a previous iteration of the
predict method do not need further transformation.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(torch.Tensor): The output masks in BxCxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(torch.Tensor): An array of shape BxC containing the model's
predictions for the quality of each mask.
(torch.Tensor): An array of shape BxCxHxW, where C is the number
of masks and H=W=256. These low res logits can be passed to
a subsequent iteration as mask input.
"""
# if not self.is_image_set:
# raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
if point_coords is not None:
points = (point_coords, point_labels)
else:
points = None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
points=points,
boxes=boxes,
masks=mask_input,
)
# Predict masks
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# Upscale the masks to the original image resolution
masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
if not return_logits:
masks = masks > self.model.mask_threshold
return masks, iou_predictions, low_res_masks
def get_image_embedding(self) -> torch.Tensor:
"""
Returns the image embeddings for the currently set image, with
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
the embedding spatial dimension of SAM (typically C=256, H=W=64).
"""
# if not self.is_image_set:
# raise RuntimeError(
# "An image must be set with .set_image(...) to generate an embedding."
# )
assert self.features is not None, "Features must exist if an image has been set."
return self.features
@property
def device(self) -> torch.device:
return self.model.device
def reset_image(self) -> None:
"""Resets the currently set image."""
self.is_image_set = False
self.features = None
self.orig_h = None
self.orig_w = None
self.input_h = None
self.input_w = None |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import partial
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
from .transformer import TwoWayTransformer
from .sam import Sam
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict)
return sam |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
from copy import deepcopy
from typing import Tuple
class ResizeLongestSide:
"""
Resizes images to the longest side 'target_length', as well as provides
methods for resizing coordinates and boxes. Provides methods for
transforming both numpy array and batched torch tensors.
"""
def __init__(self, target_length: int) -> None:
self.target_length = target_length
def apply_image(self, image: np.ndarray) -> np.ndarray:
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return np.array(resize(to_pil_image(image), target_size))
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).astype(float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array shape Bx4. Requires the original image size
in (H, W) format.
"""
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
"""
Expects batched images with shape BxCxHxW and float format. This
transformation may not exactly match apply_image. apply_image is
the transformation expected by the model.
"""
# Expects an image in BCHW format. May not exactly match apply_image.
target_size = self.get_preprocess_shape(image.shape[2], image.shape[3], self.target_length)
return F.interpolate(
image, target_size, mode="bilinear", align_corners=False, antialias=True
)
def apply_coords_torch(
self, coords: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with length 2 in the last dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).to(torch.float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
@staticmethod
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
"""
Compute the output size given input size and target long side length.
"""
scale = long_side_length * 1.0 / max(oldh, oldw)
newh, neww = oldh * scale, oldw * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww) |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from ...util.model import BenchmarkModel
from .build_sam import sam_model_registry
from .predictor import SamPredictor
from PIL import Image
import numpy as np
import cv2
from torchbenchmark.tasks import COMPUTER_VISION
import torch
import os
class Model(BenchmarkModel):
task = COMPUTER_VISION.SEGMENTATION
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=1, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# Checkpoint options are here https://github.com/facebookresearch/segment-anything#model-checkpoints
data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data')
sam_checkpoint = os.path.join(data_folder, 'sam_vit_h_4b8939.pth')
model_type = "vit_h"
self.model = sam_model_registry[model_type](checkpoint=sam_checkpoint)
self.model.to(device=device)
data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data')
image_path = os.path.join(data_folder, 'truck.jpg')
self.image = cv2.imread(image_path)
self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
self.sample_image = torch.randn((3, 256, 256)).to(device)
def get_module(self):
example_input = [
{
'image': self.sample_image,
'original_size': (256, 256),
}
]
multimask_output = False
return self.model, (example_input, multimask_output)
def train(self):
error_msg = """
As of May 17, 2023
Some base VIT checkpoints are available for SAM but getting the dataset
requires a research license. It's easy to make up a training loop on random
data and if that's interesting please let @msaroufim know
https://github.com/facebookresearch/segment-anything#dataset
"""
return NotImplementedError(error_msg)
def eval(self):
# To test for bfloat16 uncomment the below line
# predictor = SamPredictor(self.model.to(dtype=torch.bfloat16))
predictor = SamPredictor(self.model)
predictor.set_image(self.image)
input_point = np.array([[500, 375]])
input_label = np.array([1])
masks, scores, logits = predictor.predict(
point_coords=input_point,
point_labels=input_label,
multimask_output=True)
return (masks,) |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from typing import Type
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor, nn
import math
from typing import Tuple, Type
from .common import MLPBlock
class TwoWayTransformer(nn.Module):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
"""
A transformer decoder that attends to an input image using
queries whose positional embedding is supplied.
Args:
depth (int): number of layers in the transformer
embedding_dim (int): the channel dimension for the input embeddings
num_heads (int): the number of heads for multihead attention. Must
divide embedding_dim
mlp_dim (int): the channel dimension internal to the MLP block
activation (nn.Module): the activation to use in the MLP block
"""
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
self.final_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm_final_attn = nn.LayerNorm(embedding_dim)
def forward(
self,
image_embedding: Tensor,
image_pe: Tensor,
point_embedding: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Args:
image_embedding (torch.Tensor): image to attend to. Should be shape
B x embedding_dim x h x w for any h and w.
image_pe (torch.Tensor): the positional encoding to add to the image. Must
have the same shape as image_embedding.
point_embedding (torch.Tensor): the embedding to add to the query points.
Must have shape B x N_points x embedding_dim for any N_points.
Returns:
torch.Tensor: the processed point_embedding
torch.Tensor: the processed image_embedding
"""
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
bs, c, h, w = image_embedding.shape
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
# Prepare queries
queries = point_embedding
keys = image_embedding
# Apply transformer blocks and final layernorm
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
# Apply the final attention layer from the points to the image
q = queries + point_embedding
k = keys + image_pe
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm_final_attn(queries)
return queries, keys
class TwoWayAttentionBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
num_heads: int,
mlp_dim: int = 2048,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
"""
A transformer block with four layers: (1) self-attention of sparse
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
block on sparse inputs, and (4) cross attention of dense inputs to sparse
inputs.
Arguments:
embedding_dim (int): the channel dimension of the embeddings
num_heads (int): the number of heads in the attention layers
mlp_dim (int): the hidden dimension of the mlp block
activation (nn.Module): the activation of the mlp block
skip_first_layer_pe (bool): skip the PE on the first layer
"""
super().__init__()
self.self_attn = Attention(embedding_dim, num_heads)
self.norm1 = nn.LayerNorm(embedding_dim)
self.cross_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm2 = nn.LayerNorm(embedding_dim)
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
self.norm3 = nn.LayerNorm(embedding_dim)
self.norm4 = nn.LayerNorm(embedding_dim)
self.cross_attn_image_to_token = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
) -> Tuple[Tensor, Tensor]:
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
else:
q = queries + query_pe
attn_out = self.self_attn(q=q, k=q, v=queries)
queries = queries + attn_out
queries = self.norm1(queries)
# Cross attention block, tokens attending to image embedding
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.norm3(queries)
# Cross attention block, image embedding attending to tokens
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
keys = keys + attn_out
keys = self.norm4(keys)
return queries, keys
class Attention(nn.Module):
"""
An attention layer that allows for downscaling the size of the embedding
after projection to queries, keys, and values.
"""
def __init__(
self,
embedding_dim: int,
num_heads: int,
downsample_rate: int = 1,
) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.internal_dim = embedding_dim // downsample_rate
self.num_heads = num_heads
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
def _recombine_heads(self, x: Tensor) -> Tensor:
b, n_heads, n_tokens, c_per_head = x.shape
x = x.transpose(1, 2)
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
# Separate into heads
q = self._separate_heads(q, self.num_heads)
k = self._separate_heads(k, self.num_heads)
v = self._separate_heads(v, self.num_heads)
# Attention
_, _, _, c_per_head = q.shape
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
attn = attn / math.sqrt(c_per_head)
attn = torch.softmax(attn, dim=-1)
# Get output
out = attn @ v
out = self._recombine_heads(out)
out = self.out_proj(out)
return out
|
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
def download_checkpoint():
subprocess.check_call(['wget', '-P', '.data', 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth'])
def download_data():
subprocess.check_call(['wget', '-P', '.data', 'https://github.com/facebookresearch/segment-anything/raw/main/notebooks/images/truck.jpg'])
if __name__ == '__main__':
pip_install_requirements()
# Create .data folder in the script's directory
data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data')
os.makedirs(data_folder, exist_ok=True)
# Download checkpoint and data files to the .data folder
download_checkpoint()
download_data()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then
use global attention.
input_size (tuple(int, int) or None): Input resolution for calculating the relative
positional parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.norm2 = norm_layer(dim)
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
x = x + self.mlp(self.norm2(x))
return x
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (tuple(int, int) or None): Input resolution for calculating the relative
positional parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
).view(B, q_h * q_w, k_h * k_w)
return attn
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
kernel_size: Tuple[int, int] = (16, 16),
stride: Tuple[int, int] = (16, 16),
padding: Tuple[int, int] = (0, 0),
in_chans: int = 3,
embed_dim: int = 768,
) -> None:
"""
Args:
kernel_size (Tuple): kernel size of the projection layer.
stride (Tuple): stride of the projection layer.
padding (Tuple): padding size of the projection layer.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
"""
super().__init__()
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
# B C H W -> B H W C
x = x.permute(0, 2, 3, 1)
return x
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch import nn
from typing import Any, Optional, Tuple, Type
from .common import LayerNorm2d
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class PositionEmbeddingRandom(nn.Module):
"""
Positional encoding using random spatial frequencies.
"""
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
self.register_buffer(
"positional_encoding_gaussian_matrix",
scale * torch.randn((2, num_pos_feats)),
)
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
"""Positionally encode points that are normalized to [0,1]."""
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords.to(self.positional_encoding_gaussian_matrix.dtype)
coords = coords @ self.positional_encoding_gaussian_matrix
coords = 2 * np.pi * coords
# outputs d_1 x ... x d_n x C shape
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
"""Generate positional encoding for a grid of the specified size."""
h, w = size
device: Any = self.positional_encoding_gaussian_matrix.device
grid = torch.ones((h, w), device=device, dtype=torch.float32)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / h
x_embed = x_embed / w
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
) -> torch.Tensor:
"""Positionally encode points that are not normalized to [0,1]."""
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
return self._pe_encoding(coords.to(torch.float)) # B x N x C
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input prompts,
C is determined by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import List, Tuple, Type
from .common import LayerNorm2d
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
transformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
tokens = tokens.to(src.dtype)
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Lightly adapted from
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
sigmoid_output: bool = False,
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
self.sigmoid_output = sigmoid_output
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
if self.sigmoid_output:
x = F.sigmoid(x)
return x
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Bert", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import torch
from . import tke_pytorch
from typing import Tuple
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
def _generate_inputs(size):
import numpy as np
import math
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1 / 3)),
math.ceil(2 * size ** (1 / 3)),
math.ceil(0.25 * size ** (1 / 3)),
)
# masks
maskU, maskV, maskW = (
(np.random.rand(*shape) < 0.8).astype("float64") for _ in range(3)
)
# 1d arrays
dxt, dxu = (np.random.randn(shape[0]) for _ in range(2))
dyt, dyu = (np.random.randn(shape[1]) for _ in range(2))
dzt, dzw = (np.random.randn(shape[2]) for _ in range(2))
cost, cosu = (np.random.randn(shape[1]) for _ in range(2))
# 2d arrays
kbot = np.random.randint(0, shape[2], size=shape[:2])
forc_tke_surface = np.random.randn(*shape[:2])
# 3d arrays
kappaM, mxl, forc = (np.random.randn(*shape) for _ in range(3))
# 4d arrays
u, v, w, tke, dtke = (np.random.randn(*shape, 3) for _ in range(5))
return (
u,
v,
w,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
kbot,
kappaM,
mxl,
forc,
forc_tke_surface,
tke,
dtke,
)
class TurbulentKineticEnergy(torch.nn.Module):
def __init__(self, device):
super(TurbulentKineticEnergy, self).__init__()
self.device = device
def forward(
self,
u,
v,
w,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
kbot,
kappaM,
mxl,
forc,
forc_tke_surface,
tke,
dtke,
):
# tke and dtke will be modified in integrate_tke and generate inconsistent results
# so clone them before passing them in
return tke_pytorch.integrate_tke(
u,
v,
w,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
kbot,
kappaM,
mxl,
forc,
forc_tke_surface,
torch.clone(tke),
torch.clone(dtke),
)
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
# Original input size: [2 ** i for i in range(12, 23, 2)]
# Source: https://github.com/dionhaefner/pyhpc-benchmarks/blob/650ecc650e394df829944ffcf09e9d646ec69691/run.py#L25
# Pick data-point when i = 20, size = 1048576
DEFAULT_EVAL_BSIZE = 1048576
ALLOW_CUSTOMIZE_BSIZE = False
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = TurbulentKineticEnergy(self.device).to(device=self.device)
input_size = self.batch_size
self.example_inputs = tuple(
torch.from_numpy(x).to(self.device) for x in _generate_inputs(input_size)
)
def get_module(self):
return self.model, self.example_inputs
def train(self):
raise NotImplementedError("Training not supported")
def eval(self) -> Tuple[torch.Tensor]:
model, example_inputs = self.get_module()
with torch.no_grad():
out = model(*example_inputs)
return out
|
import torch
def solve_tridiag(a, b, c, d):
"""
Solves a tridiagonal matrix system with diagonals a, b, c and RHS vector d.
"""
assert a.shape == b.shape and a.shape == c.shape and a.shape == d.shape
n = a.shape[-1]
for i in range(1, n):
w = a[..., i] / b[..., i - 1]
b[..., i] += -w * c[..., i - 1]
d[..., i] += -w * d[..., i - 1]
out = torch.empty_like(a)
out[..., -1] = d[..., -1] / b[..., -1]
for i in range(n - 2, -1, -1):
out[..., i] = (d[..., i] - c[..., i] * out[..., i + 1]) / b[..., i]
return out
def solve_implicit(ks, a, b, c, d, b_edge):
land_mask = (ks >= 0)[:, :, None]
edge_mask = land_mask & (
torch.arange(a.shape[2], device=ks.device)[None, None, :] == ks[:, :, None]
)
water_mask = land_mask & (
torch.arange(a.shape[2], device=ks.device)[None, None, :] >= ks[:, :, None]
)
a_tri = water_mask * a * torch.logical_not(edge_mask)
b_tri = torch.where(water_mask, b, 1.0)
b_tri = torch.where(edge_mask, b_edge, b_tri)
c_tri = water_mask * c
d_tri = water_mask * d
return solve_tridiag(a_tri, b_tri, c_tri, d_tri), water_mask
def _calc_cr(rjp, rj, rjm, vel):
"""
Calculates cr value used in superbee advection scheme
"""
eps = 1e-20 # prevent division by 0
return torch.where(vel > 0.0, rjm, rjp) / torch.where(torch.abs(rj) < eps, eps, rj)
def pad_z_edges(arr):
arr_shape = list(arr.shape)
arr_shape[2] += 2
out = torch.zeros(arr_shape, dtype=arr.dtype, device=arr.device)
out[:, :, 1:-1] = arr
return out
def limiter(cr):
return torch.maximum(
torch.tensor([0.0], device=cr.device),
torch.maximum(
torch.minimum(torch.tensor([1.0], device=cr.device), 2 * cr),
torch.minimum(torch.tensor([2.0], device=cr.device), cr),
),
)
def _adv_superbee(vel, var, mask, dx, axis: int, cost, cosu, dt_tracer: float):
if axis == 0:
dx = cost[None, 2:-2, None] * dx[1:-2, None, None]
uCFL = torch.abs(vel[1:-2, 2:-2, :] * dt_tracer / dx)
rjp = (var[3:, 2:-2, :] - var[2:-1, 2:-2, :]) * mask[2:-1, 2:-2, :]
rj = (var[2:-1, 2:-2, :] - var[1:-2, 2:-2, :]) * mask[1:-2, 2:-2, :]
rjm = (var[1:-2, 2:-2, :] - var[:-3, 2:-2, :]) * mask[:-3, 2:-2, :]
cr = limiter(_calc_cr(rjp, rj, rjm, vel[1:-2, 2:-2, :]))
return (
vel[1:-2, 2:-2, :] * (var[2:-1, 2:-2, :] + var[1:-2, 2:-2, :]) * 0.5
- torch.abs(vel[1:-2, 2:-2, :]) * ((1.0 - cr) + uCFL * cr) * rj * 0.5
)
elif axis == 1:
dx = (cost * dx)[None, 1:-2, None]
velfac = cosu[None, 1:-2, None]
uCFL = torch.abs(velfac * vel[2:-2, 1:-2, :] * dt_tracer / dx)
rjp = (var[2:-2, 3:, :] - var[2:-2, 2:-1, :]) * mask[2:-2, 2:-1, :]
rj = (var[2:-2, 2:-1, :] - var[2:-2, 1:-2, :]) * mask[2:-2, 1:-2, :]
rjm = (var[2:-2, 1:-2, :] - var[2:-2, :-3, :]) * mask[2:-2, :-3, :]
cr = limiter(_calc_cr(rjp, rj, rjm, vel[2:-2, 1:-2, :]))
return (
velfac
* vel[2:-2, 1:-2, :]
* (var[2:-2, 2:-1, :] + var[2:-2, 1:-2, :])
* 0.5
- torch.abs(velfac * vel[2:-2, 1:-2, :])
* ((1.0 - cr) + uCFL * cr)
* rj
* 0.5
)
elif axis == 2:
vel, var, mask = [pad_z_edges(a) for a in (vel, var, mask)]
dx = dx[None, None, :-1]
uCFL = torch.abs(vel[2:-2, 2:-2, 1:-2] * dt_tracer / dx)
rjp = (var[2:-2, 2:-2, 3:] - var[2:-2, 2:-2, 2:-1]) * mask[2:-2, 2:-2, 2:-1]
rj = (var[2:-2, 2:-2, 2:-1] - var[2:-2, 2:-2, 1:-2]) * mask[2:-2, 2:-2, 1:-2]
rjm = (var[2:-2, 2:-2, 1:-2] - var[2:-2, 2:-2, :-3]) * mask[2:-2, 2:-2, :-3]
cr = limiter(_calc_cr(rjp, rj, rjm, vel[2:-2, 2:-2, 1:-2]))
return (
vel[2:-2, 2:-2, 1:-2]
* (var[2:-2, 2:-2, 2:-1] + var[2:-2, 2:-2, 1:-2])
* 0.5
- torch.abs(vel[2:-2, 2:-2, 1:-2]) * ((1.0 - cr) + uCFL * cr) * rj * 0.5
)
else:
raise ValueError("axis must be 0, 1, or 2")
def adv_flux_superbee_wgrid(
adv_fe,
adv_fn,
adv_ft,
var,
u_wgrid,
v_wgrid,
w_wgrid,
maskW,
dxt,
dyt,
dzw,
cost,
cosu,
dt_tracer: float,
):
"""
Calculates advection of a tracer defined on Wgrid
"""
maskUtr = torch.zeros_like(maskW)
maskUtr[:-1, :, :] = maskW[1:, :, :] * maskW[:-1, :, :]
adv_fe[...] = 0.0
adv_fe[1:-2, 2:-2, :] = _adv_superbee(
u_wgrid, var, maskUtr, dxt, 0, cost, cosu, dt_tracer
)
maskVtr = torch.zeros_like(maskW)
maskVtr[:, :-1, :] = maskW[:, 1:, :] * maskW[:, :-1, :]
adv_fn[...] = 0.0
adv_fn[2:-2, 1:-2, :] = _adv_superbee(
v_wgrid, var, maskVtr, dyt, 1, cost, cosu, dt_tracer
)
maskWtr = torch.zeros_like(maskW)
maskWtr[:, :, :-1] = maskW[:, :, 1:] * maskW[:, :, :-1]
adv_ft[...] = 0.0
adv_ft[2:-2, 2:-2, :-1] = _adv_superbee(
w_wgrid, var, maskWtr, dzw, 2, cost, cosu, dt_tracer
)
def integrate_tke(
u,
v,
w,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
kbot,
kappaM,
mxl,
forc,
forc_tke_surface,
tke,
dtke,
):
tau = 0
taup1 = 1
taum1 = 2
dt_tracer = 1.0
dt_mom = 1
AB_eps = 0.1
alpha_tke = 1.0
c_eps = 0.7
K_h_tke = 2000.0
flux_east = torch.zeros_like(maskU)
flux_north = torch.zeros_like(maskU)
flux_top = torch.zeros_like(maskU)
sqrttke = torch.sqrt(
torch.maximum(torch.tensor([0.0], device=tke.device), tke[:, :, :, tau])
)
"""
integrate Tke equation on W grid with surface flux boundary condition
"""
dt_tke = dt_mom # use momentum time step to prevent spurious oscillations
"""
vertical mixing and dissipation of TKE
"""
ks = kbot[2:-2, 2:-2] - 1
a_tri = torch.zeros_like(maskU[2:-2, 2:-2])
b_tri = torch.zeros_like(maskU[2:-2, 2:-2])
c_tri = torch.zeros_like(maskU[2:-2, 2:-2])
d_tri = torch.zeros_like(maskU[2:-2, 2:-2])
delta = torch.zeros_like(maskU[2:-2, 2:-2])
delta[:, :, :-1] = (
dt_tke
/ dzt[None, None, 1:]
* alpha_tke
* 0.5
* (kappaM[2:-2, 2:-2, :-1] + kappaM[2:-2, 2:-2, 1:])
)
a_tri[:, :, 1:-1] = -delta[:, :, :-2] / dzw[None, None, 1:-1]
a_tri[:, :, -1] = -delta[:, :, -2] / (0.5 * dzw[-1])
b_tri[:, :, 1:-1] = (
1
+ (delta[:, :, 1:-1] + delta[:, :, :-2]) / dzw[None, None, 1:-1]
+ dt_tke * c_eps * sqrttke[2:-2, 2:-2, 1:-1] / mxl[2:-2, 2:-2, 1:-1]
)
b_tri[:, :, -1] = (
1
+ delta[:, :, -2] / (0.5 * dzw[-1])
+ dt_tke * c_eps / mxl[2:-2, 2:-2, -1] * sqrttke[2:-2, 2:-2, -1]
)
b_tri_edge = (
1
+ delta / dzw[None, None, :]
+ dt_tke * c_eps / mxl[2:-2, 2:-2, :] * sqrttke[2:-2, 2:-2, :]
)
c_tri[:, :, :-1] = -delta[:, :, :-1] / dzw[None, None, :-1]
d_tri[...] = tke[2:-2, 2:-2, :, tau] + dt_tke * forc[2:-2, 2:-2, :]
d_tri[:, :, -1] += dt_tke * forc_tke_surface[2:-2, 2:-2] / (0.5 * dzw[-1])
sol, water_mask = solve_implicit(ks, a_tri, b_tri, c_tri, d_tri, b_edge=b_tri_edge)
tke[2:-2, 2:-2, :, taup1] = torch.where(water_mask, sol, tke[2:-2, 2:-2, :, taup1])
"""
Add TKE if surface density flux drains TKE in uppermost box
"""
tke_surf_corr = torch.zeros(maskU.shape[:2], device=maskU.device)
mask = tke[2:-2, 2:-2, -1, taup1] < 0.0
tke_surf_corr[2:-2, 2:-2] = torch.where(
mask, -tke[2:-2, 2:-2, -1, taup1] * 0.5 * dzw[-1] / dt_tke, 0.0
)
tke[2:-2, 2:-2, -1, taup1] = torch.maximum(
torch.tensor([0.0], device=tke.device), tke[2:-2, 2:-2, -1, taup1]
)
"""
add tendency due to lateral diffusion
"""
flux_east[:-1, :, :] = (
K_h_tke
* (tke[1:, :, :, tau] - tke[:-1, :, :, tau])
/ (cost[None, :, None] * dxu[:-1, None, None])
* maskU[:-1, :, :]
)
flux_east[-1, :, :] = 0.0
flux_north[:, :-1, :] = (
K_h_tke
* (tke[:, 1:, :, tau] - tke[:, :-1, :, tau])
/ dyu[None, :-1, None]
* maskV[:, :-1, :]
* cosu[None, :-1, None]
)
flux_north[:, -1, :] = 0.0
tke[2:-2, 2:-2, :, taup1] += (
dt_tke
* maskW[2:-2, 2:-2, :]
* (
(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])
/ (cost[None, 2:-2, None] * dxt[2:-2, None, None])
+ (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])
/ (cost[None, 2:-2, None] * dyt[None, 2:-2, None])
)
)
"""
add tendency due to advection
"""
adv_flux_superbee_wgrid(
flux_east,
flux_north,
flux_top,
tke[:, :, :, tau],
u[..., tau],
v[..., tau],
w[..., tau],
maskW,
dxt,
dyt,
dzw,
cost,
cosu,
dt_tracer,
)
dtke[2:-2, 2:-2, :, tau] = maskW[2:-2, 2:-2, :] * (
-(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])
/ (cost[None, 2:-2, None] * dxt[2:-2, None, None])
- (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])
/ (cost[None, 2:-2, None] * dyt[None, 2:-2, None])
)
dtke[:, :, 0, tau] += -flux_top[:, :, 0] / dzw[0]
dtke[:, :, 1:-1, tau] += -(flux_top[:, :, 1:-1] - flux_top[:, :, :-2]) / dzw[1:-1]
dtke[:, :, -1, tau] += -(flux_top[:, :, -1] - flux_top[:, :, -2]) / (0.5 * dzw[-1])
"""
Adam Bashforth time stepping
"""
tke[:, :, :, taup1] += dt_tracer * (
(1.5 + AB_eps) * dtke[:, :, :, tau] - (0.5 + AB_eps) * dtke[:, :, :, taum1]
)
return tke, dtke, tke_surf_corr
def prepare_inputs(*inputs, device):
out = [
torch.as_tensor(a, device=device) for a in inputs
]
if device == "gpu":
torch.cuda.synchronize()
return out
def run(*inputs, device="cpu"):
with torch.no_grad():
outputs = integrate_tke(*inputs)
if device == "gpu":
torch.cuda.synchronize()
return outputs
|
if __name__ == "__main__":
pass
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# Original train batch size per device: 8
# Source: https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/run_t5_mlm_flax.py#L83
DEFAULT_TRAIN_BSIZE = 8
# Original eval batch size per device: 8
# Downscale to 1 to fit in Nvidia T4 of the infra
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_T5", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
# Copyright (c) 2017 NVIDIA Corporation
import argparse
from math import sqrt
parser = argparse.ArgumentParser(description='RMSE_calculator')
parser.add_argument('--path_to_predictions', type=str, default="", metavar='N',
help='Path file with actual ratings and predictions')
parser.add_argument('--round', action='store_true',
help='round predictions to nearest')
args = parser.parse_args()
print(args)
def main():
with open(args.path_to_predictions, 'r') as inpt:
lines = inpt.readlines()
n = 0
denom = 0.0
for line in lines:
parts = line.split('\t')
prediction = float(parts[2]) if not args.round else round(float(parts[2]))
rating = float(parts[3])
denom += (prediction - rating)*(prediction - rating)
n += 1
print("####################")
print("RMSE: {}".format(sqrt(denom/n)))
print("####################")
if __name__ == '__main__':
main() |
# Benchmark created from NVidia DeepRecommender github project:
# https://github.com/NVIDIA/DeepRecommender
# a32a8a5c23092c551616acf6fac5b32e1155d18b
# Test supports eval and train modes for cpu and cuda targets.
#
# Both nvtrain.py and nvinfer.py support all original command
# line parameters but tensorflow dependency for logging has
# been removed.
import torch
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import RECOMMENDATION
from typing import Tuple
import gc
from .nvtrain import DeepRecommenderTrainBenchmark
from .nvinfer import DeepRecommenderInferenceBenchmark
class Model(BenchmarkModel):
task = RECOMMENDATION.RECOMMENDATION
DEFAULT_TRAIN_BSIZE = 256
DEFAULT_EVAL_BSIZE = 256
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.eval_mode = True if self.test == "eval" else False
if test == "train":
self.model = DeepRecommenderTrainBenchmark(device = self.device, jit = False, batch_size=self.batch_size)
elif test == "eval":
self.model = DeepRecommenderInferenceBenchmark(device = self.device, jit = False, batch_size=self.batch_size)
def jit_callback(self):
self.model.rencoder = torch.jit.trace(self.model.rencoder, (self.model.toyinputs, ))
def get_module(self):
if self.eval_mode:
return self.model.rencoder, (self.model.toyinputs,)
return self.model.rencoder, (self.model.toyinputs,)
def set_module(self, new_model):
self.model.rencoder = new_model
def set_eval(self):
self.eval_mode = True
def set_train(self):
self.eval_mode = False
def get_optimizer(self):
return self.model.get_optimizer()
def set_optimizer(self, optimizer) -> None:
self.model.set_optimizer(optimizer)
def train(self):
self.model.train()
def eval(self) -> Tuple[torch.Tensor]:
out = self.model.eval()
return (out, )
def timed_infer(self):
self.model.TimedInferenceRun()
def timed_train(self):
self.model.TimedTrainingRun()
|
# Copyright (c) 2017 NVIDIA Corporation
# parameters to run benchmark on cpu
# --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_TEST --hidden_layers 512,512,1024 --non_linearity_type selu --save_path model_save/model.epoch_0 --drop_prob 0.8 --predictions_path preds.txt --nooutput --forcecpu
# parameters to run benchmark on cuda
# --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_TEST --hidden_layers 512,512,1024 --non_linearity_type selu --save_path model_save/model.epoch_0 --drop_prob 0.8 --predictions_path preds.txt --nooutput --forcecuda
import torch
import argparse
import copy
import time
import os
from .reco_encoder.data import input_layer
from .reco_encoder.model import model
from torch.autograd import Variable
from pathlib import Path
import torch.autograd.profiler as profiler
def getCommandLineArgs() :
parser = argparse.ArgumentParser(description='RecoEncoder')
parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N',
help='dropout drop probability')
parser.add_argument('--constrained', action='store_true',
help='constrained autoencoder')
parser.add_argument('--skip_last_layer_nl', action='store_true',
help='if present, decoder\'s last layer will not apply non-linearity function')
parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N',
help='hidden layer sizes, comma-separated')
parser.add_argument('--path_to_train_data', type=str, default="", metavar='N',
help='Path to training data')
parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N',
help='Path to evaluation data')
parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N',
help='type of the non-linearity used in activations')
parser.add_argument('--save_path', type=str, default="autorec.pt", metavar='N',
help='where to save model')
parser.add_argument('--predictions_path', type=str, default="out.txt", metavar='N',
help='where to save predictions')
parser.add_argument('--batch_size', type=int, default=1, metavar='N',
help='inference batch size')
parser.add_argument('--jit', action='store_true',
help='jit-ify model before running')
parser.add_argument('--forcecuda', action='store_true',
help='force cuda use')
parser.add_argument('--forcecpu', action='store_true',
help='force cpu use')
parser.add_argument('--nooutput', action='store_true',
help='disable writing output to file')
parser.add_argument('--silent', action='store_true',
help='disable output messages')
parser.add_argument('--profile', action='store_true',
help='enable profiler and stat print')
args = parser.parse_args()
return args
def getBenchmarkArgs(forceCuda):
class Args:
pass
args = Args()
args.drop_prob = 0.8
args.constrained = False
args.skip_last_layer_nl = False
args.hidden_layers = '512,512,1024'
args.path_to_train_data = os.path.dirname(__file__) + '/Netflix/N1W_TRAIN'
args.path_to_eval_data = os.path.dirname(__file__) + '/Netflix/N1W_TEST'
args.non_linearity_type = 'selu'
args.save_path = 'model_save/model.epoch_0'
args.predictions_path = 'preds.txt'
args.batch_size = 1
args.jit = False
args.forcecuda = forceCuda
args.forcecpu = not forceCuda
args.nooutput = True
args.silent = True
args.profile = False
return args
def processArgState(args) :
if not args.silent:
print(args)
if args.forcecpu and args.forcecuda:
print("Error, force cpu and cuda cannot both be set")
quit()
args.use_cuda = torch.cuda.is_available() # global flag
if not args.silent:
if args.use_cuda:
print('GPU is available.')
else:
print('GPU is not available.')
if args.use_cuda and args.forcecpu:
args.use_cuda = False
if not args.silent:
if args.use_cuda:
print('Running On GPU')
else:
print('Running On CUDA')
if args.profile:
print('Profiler Enabled')
return args
class DeepRecommenderInferenceBenchmark:
def __init__(self, device = 'cpu', jit=False, batch_size=256, usecommandlineargs = False) :
self.toytest = True
self.batch_size = batch_size
# number of movies in netflix training set.
self.node_count = 197951
if self.toytest:
self.toyinputs = torch.randn(self.batch_size,self.node_count).to(device)
if usecommandlineargs:
self.args = getCommandLineArgs()
else:
if device == "cpu":
forcecuda = False
elif device == "cuda":
forcecuda = True
else:
# unknown device string, quit init
return
self.args = getBenchmarkArgs(forcecuda)
args = processArgState(self.args)
self.params = dict()
self.params['batch_size'] = self.args.batch_size
self.params['data_dir'] = self.args.path_to_train_data
self.params['major'] = 'users'
self.params['itemIdInd'] = 1
self.params['userIdInd'] = 0
if not self.args.silent:
print("Loading training data")
if self.toytest == False:
self.data_layer = input_layer.UserItemRecDataProvider(params=self.params)
if not self.args.silent:
print("Data loaded")
print("Total items found: {}".format(len(self.data_layer.data.keys())))
print("Vector dim: {}".format(self.data_layer.vector_dim))
print("Loading eval data")
self.eval_params = copy.deepcopy(self.params)
# must set eval batch size to 1 to make sure no examples are missed
self.eval_params['batch_size'] = 1
self.eval_params['data_dir'] = self.args.path_to_eval_data
if self.toytest:
self.rencoder = model.AutoEncoder(layer_sizes=[self.node_count] + [int(l) for l in self.args.hidden_layers.split(',')],
nl_type=self.args.non_linearity_type,
is_constrained=self.args.constrained,
dp_drop_prob=self.args.drop_prob,
last_layer_activations=not self.args.skip_last_layer_nl)
else:
self.eval_data_layer = input_layer.UserItemRecDataProvider(params=self.eval_params,
user_id_map=self.data_layer.userIdMap,
item_id_map=self.data_layer.itemIdMap)
self.rencoder = model.AutoEncoder(layer_sizes=[self.data_layer.vector_dim] + [int(l) for l in self.args.hidden_layers.split(',')],
nl_type=self.args.non_linearity_type,
is_constrained=self.args.constrained,
dp_drop_prob=self.args.drop_prob,
last_layer_activations=not self.args.skip_last_layer_nl)
self.path_to_model = Path(self.args.save_path)
if self.path_to_model.is_file():
print("Loading model from: {}".format(self.path_to_model))
self.rencoder.load_state_dict(torch.load(self.args.save_path))
if not self.args.silent:
print('######################################################')
print('######################################################')
print('############# AutoEncoder Model: #####################')
print(self.rencoder)
print('######################################################')
print('######################################################')
self.rencoder.eval()
if self.args.use_cuda: self.rencoder = self.rencoder.cuda()
if self.toytest == False:
self.inv_userIdMap = {v: k for k, v in self.data_layer.userIdMap.items()}
self.inv_itemIdMap = {v: k for k, v in self.data_layer.itemIdMap.items()}
self.eval_data_layer.src_data = self.data_layer.data
def eval(self, niter=1):
for iteration in range(niter):
if self.toytest:
out = self.rencoder(self.toyinputs)
continue
for i, ((out, src), majorInd) in enumerate(self.eval_data_layer.iterate_one_epoch_eval(for_inf=True)):
inputs = Variable(src.cuda().to_dense() if self.args.use_cuda else src.to_dense())
targets_np = out.to_dense().numpy()[0, :]
out = self.rencoder(inputs)
if not self.args.nooutput:
self.outputs = out.cpu().data.numpy()[0, :]
non_zeros = targets_np.nonzero()[0].tolist()
major_key = self.inv_userIdMap [majorInd]
with open(self.args.predictions_path, 'w') as outf:
for ind in non_zeros:
outf.write("{}\t{}\t{}\t{}\n".format(major_key, self.inv_itemIdMap[ind], self.outputs[ind], targets_np[ind]))
if i % 10000 == 0:
print("Done: {}".format(i))
return out
def TimedInferenceRun(self) :
print('Timed Inference Start')
e_start_time = time.time()
if self.args.profile:
with profiler.profile(record_shapes=True, use_cuda=True) as prof:
with profiler.record_function("Inference"):
self.eval()
else:
self.eval()
e_end_time = time.time()
print('Timed Inference Complete')
print('Inference finished in {} seconds'
.format(e_end_time - e_start_time))
if self.args.profile:
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
prof.export_chrome_trace("trace.json")
def main():
benchmarkCuda = DeepRecommenderInferenceBenchmark(device='cuda')
benchmarkCuda.TimedInferenceRun()
benchmarkCPU = DeepRecommenderInferenceBenchmark(device='cpu')
benchmarkCPU.TimedInferenceRun()
if __name__ == '__main__':
main()
|
# Copyright (c) 2017 NVIDIA Corporation
# to run against cuda:
# --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_step 1 --noise_prob 0 --num_epochs 1 --summary_frequency 1000 --forcecuda
# to run on cpu:
# --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_step 1 --noise_prob 0 --num_epochs 1 --summary_frequency 1000 --forcecpu
import torch
import argparse
from .reco_encoder.data import input_layer
from .reco_encoder.model import model
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
import torch.nn as nn
from torch.autograd import Variable
import copy
import time
from pathlib import Path
#from .logger import Logger
from math import sqrt
import numpy as np
import os
import torch.autograd.profiler as profiler
def getTrainBenchmarkArgs() :
class Args:
pass
args = Args()
args.lr = 0.005
args.weight_decay = 0
args.drop_prob = 0.8
args.noise_prob = 0
args.batch_size = 128
args.summary_frequency = 1000
args.aug_step = 1
args.constrained = False
args.skip_last_layer_nl = False
args.num_epochs = 1
args.save_every = 3
args.optimizer = 'momentum'
args.hidden_layers = '512,512,1024'
args.gpu_ids = '0'
args.path_to_train_data = os.path.dirname(__file__) + '/Netflix/N1W_TRAIN'
args.path_to_eval_data = os.path.dirname(__file__) + '/Netflix/N1W_VALID'
args.non_linearity_type = 'selu'
args.logdir = 'model_save'
args.nooutput = True
args.silent = True
args.forcecuda = False
args.forcecpu = False
args.profile = False
return args
def getTrainCommandLineArgs() :
parser = argparse.ArgumentParser(description='RecoEncoder')
parser.add_argument('--lr', type=float, default=0.00001, metavar='N',
help='learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0, metavar='N',
help='L2 weight decay')
parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N',
help='dropout drop probability')
parser.add_argument('--noise_prob', type=float, default=0.0, metavar='N',
help='noise probability')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='global batch size')
parser.add_argument('--summary_frequency', type=int, default=100, metavar='N',
help='how often to save summaries')
parser.add_argument('--aug_step', type=int, default=-1, metavar='N',
help='do data augmentation every X step')
parser.add_argument('--constrained', action='store_true',
help='constrained autoencoder')
parser.add_argument('--skip_last_layer_nl', action='store_true',
help='if present, decoder\'s last layer will not apply non-linearity function')
parser.add_argument('--num_epochs', type=int, default=50, metavar='N',
help='maximum number of epochs')
parser.add_argument('--save_every', type=int, default=3, metavar='N',
help='save every N number of epochs')
parser.add_argument('--optimizer', type=str, default="momentum", metavar='N',
help='optimizer kind: adam, momentum, adagrad or rmsprop')
parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N',
help='hidden layer sizes, comma-separated')
parser.add_argument('--gpu_ids', type=str, default="0", metavar='N',
help='comma-separated gpu ids to use for data parallel training')
parser.add_argument('--path_to_train_data', type=str, default="", metavar='N',
help='Path to training data')
parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N',
help='Path to evaluation data')
parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N',
help='type of the non-linearity used in activations')
parser.add_argument('--logdir', type=str, default="logs", metavar='N',
help='where to save model and write logs')
parser.add_argument('--nooutput', action='store_true',
help='disable writing output to file')
parser.add_argument('--silent', action='store_true',
help='disable all messages')
parser.add_argument('--forcecuda', action='store_true',
help='force cuda use')
parser.add_argument('--forcecpu', action='store_true',
help='force cpu use')
parser.add_argument('--profile', action='store_true',
help='enable profiler and stat print')
args = parser.parse_args()
return args
def processTrainArgState(args) :
if not args.silent:
print(args)
if args.forcecpu and args.forcecuda:
print("Error, force cpu and cuda cannot both be set")
quit()
args.use_cuda = torch.cuda.is_available() # global flag
if not args.silent:
if args.use_cuda:
print('GPU is available.')
else:
print('GPU is not available.')
if args.use_cuda and args.forcecpu:
args.use_cuda = False
if not args.silent:
if args.use_cuda:
print('Running On CUDA')
else:
print('Running On CPU')
return args
def log_var_and_grad_summaries(logger, layers, global_step, prefix, log_histograms=False):
"""
Logs variable and grad stats for layer. Transfers data from GPU to CPU automatically
:param logger: TB logger
:param layers: param list
:param global_step: global step for TB
:param prefix: name prefix
:param log_histograms: (default: False) whether or not log histograms
:return:
"""
for ind, w in enumerate(layers):
# Variables
w_var = w.data.cpu().numpy()
logger.scalar_summary("Variables/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_var),
global_step)
if log_histograms:
logger.histo_summary(tag="Variables/{}_{}".format(prefix, ind), values=w.data.cpu().numpy(),
step=global_step)
# Gradients
w_grad = w.grad.data.cpu().numpy()
logger.scalar_summary("Gradients/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_grad),
global_step)
if log_histograms:
logger.histo_summary(tag="Gradients/{}_{}".format(prefix, ind), values=w.grad.data.cpu().numpy(),
step=global_step)
def DoTrainEval(encoder, evaluation_data_layer, use_cuda):
encoder.eval()
denom = 0.0
total_epoch_loss = 0.0
for i, (eval, src) in enumerate(evaluation_data_layer.iterate_one_epoch_eval()):
inputs = Variable(src.cuda().to_dense() if use_cuda else src.to_dense())
targets = Variable(eval.cuda().to_dense() if use_cuda else eval.to_dense())
outputs = encoder(inputs)
loss, num_ratings = model.MSEloss(outputs, targets)
total_epoch_loss += loss.item()
denom += num_ratings.item()
return sqrt(total_epoch_loss / denom)
class DeepRecommenderTrainBenchmark:
def __init__(self, device="cpu", jit=False, batch_size=256, processCommandLine = False):
self.TrainInit(device, jit, batch_size, processCommandLine)
def TrainInit(self, device="cpu", jit=False, batch_size=256, processCommandLine = False):
# Force test to run in toy mode. Single call of fake data to model.
self.toytest = True
self.toybatch = batch_size
# number of movies in netflix training set.
self.toyvocab = 197951
self.toyinputs = torch.randn(self.toybatch, self.toyvocab)
if (processCommandLine) :
self.args = getTrainCommandLineArgs()
else:
self.args = getTrainBenchmarkArgs()
if device == "cpu":
forcecuda = False
elif device == "cuda":
forcecuda = True
else:
# unknown device string, quit init
return
self.args.forcecuda = forcecuda
self.args.forcecpu = not forcecuda
self.args = processTrainArgState(self.args)
if self.toytest == False:
self.logger = Logger(self.args.logdir)
self.params = dict()
self.params['batch_size'] = self.args.batch_size
self.params['data_dir'] = self.args.path_to_train_data
self.params['major'] = 'users'
self.params['itemIdInd'] = 1
self.params['userIdInd'] = 0
if self.toytest == False:
if not self.args.silent:
print("Loading training data")
self.data_layer = input_layer.UserItemRecDataProvider(params=self.params)
if not self.args.silent:
print("Data loaded")
print("Total items found: {}".format(len(self.data_layer.data.keys())))
print("Vector dim: {}".format(self.data_layer.vector_dim))
print("Loading eval data")
self.eval_params = copy.deepcopy(self.params)
# must set eval batch size to 1 to make sure no examples are missed
if self.toytest:
self.rencoder = model.AutoEncoder(layer_sizes=[self.toyvocab] + [int(l) for l in self.args.hidden_layers.split(',')],
nl_type=self.args.non_linearity_type,
is_constrained=self.args.constrained,
dp_drop_prob=self.args.drop_prob,
last_layer_activations=not self.args.skip_last_layer_nl)
else:
self.eval_params['data_dir'] = self.args.path_to_eval_data
self.eval_data_layer = input_layer.UserItemRecDataProvider(params=self.eval_params,
user_id_map=self.data_layer.userIdMap, # the mappings are provided
item_id_map=self.data_layer.itemIdMap)
self.eval_data_layer.src_data = self.data_layer.data
self.rencoder = model.AutoEncoder(layer_sizes=[self.data_layer.vector_dim] + [int(l) for l in self.args.hidden_layers.split(',')],
nl_type=self.args.non_linearity_type,
is_constrained=self.args.constrained,
dp_drop_prob=self.args.drop_prob,
last_layer_activations=not self.args.skip_last_layer_nl)
os.makedirs(self.args.logdir, exist_ok=True)
self.model_checkpoint = self.args.logdir + "/model"
self.path_to_model = Path(self.model_checkpoint)
if self.path_to_model.is_file():
print("Loading model from: {}".format(self.model_checkpoint))
self.rencoder.load_state_dict(torch.load(self.model_checkpoint))
if not self.args.silent:
print('######################################################')
print('######################################################')
print('############# AutoEncoder Model: #####################')
print(self.rencoder)
print('######################################################')
print('######################################################')
if self.args.use_cuda:
gpu_ids = [int(g) for g in self.args.gpu_ids.split(',')]
if not self.args.silent:
print('Using GPUs: {}'.format(gpu_ids))
if len(gpu_ids)>1:
self.rencoder = nn.DataParallel(self.rencoder,
device_ids=gpu_ids)
self.rencoder = self.rencoder.cuda()
self.toyinputs = self.toyinputs.to(device)
if self.args.optimizer == "adam":
self.optimizer = optim.Adam(self.rencoder.parameters(),
lr=self.args.lr,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == "adagrad":
self.optimizer = optim.Adagrad(self.rencoder.parameters(),
lr=self.args.lr,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == "momentum":
self.optimizer = optim.SGD(self.rencoder.parameters(),
lr=self.args.lr, momentum=0.9,
weight_decay=self.args.weight_decay)
self.scheduler = MultiStepLR(self.optimizer, milestones=[24, 36, 48, 66, 72], gamma=0.5)
elif args.optimizer == "rmsprop":
self.optimizer = optim.RMSprop(self.rencoder.parameters(),
lr=self.args.lr, momentum=0.9,
weight_decay=self.args.weight_decay)
else:
raise ValueError('Unknown optimizer kind')
self.t_loss = 0.0
self.t_loss_denom = 0.0
self.denom = 0.0
self.total_epoch_loss = 0.0
self.global_step = 0
if self.args.noise_prob > 0.0:
self.dp = nn.Dropout(p=self.args.noise_prob)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def DoTrain(self):
self.rencoder.train()
#if self.args.optimizer == "momentum":
# self.scheduler.step()
for i, mb in enumerate(self.data_layer.iterate_one_epoch()):
inputs = Variable(mb.cuda().to_dense() if self.args.use_cuda else mb.to_dense())
self.optimizer.zero_grad()
outputs = self.rencoder(inputs)
loss, num_ratings = model.MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
self.optimizer.step()
self.global_step += 1
self.t_loss += loss.item()
self.t_loss_denom += 1
if not self.args.nooutput:
if i % self.args.summary_frequency == 0:
print('[%d, %5d] RMSE: %.7f' % (self.epoch, i, sqrt(self.t_loss / self.t_loss_denom)))
self.logger.scalar_summary("Training_RMSE", sqrt(self.t_loss/self.t_loss_denom), self.global_step)
self.t_loss = 0
self.t_loss_denom = 0.0
log_var_and_grad_summaries(self.logger, self.rencoder.encode_w, self.global_step, "Encode_W")
log_var_and_grad_summaries(self.logger, self.rencoder.encode_b, self.global_step, "Encode_b")
if not self.rencoder.is_constrained:
log_var_and_grad_summaries(self.logger, self.rencoder.decode_w, self.global_step, "Decode_W")
log_var_and_grad_summaries(self.logger, self.rencoder.decode_b, self.global_step, "Decode_b")
self.total_epoch_loss += loss.item()
self.denom += 1
#if args.aug_step > 0 and i % args.aug_step == 0 and i > 0:
if self.args.aug_step > 0:
# Magic data augmentation trick happen here
for t in range(self.args.aug_step):
inputs = Variable(outputs.data)
if self.args.noise_prob > 0.0:
inputs = dp(inputs)
self.optimizer.zero_grad()
outputs = self.rencoder(inputs)
loss, num_ratings = model.MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
self.optimizer.step()
def train(self, niter=1) :
for self.epoch in range(niter):
if self.toytest:
self.rencoder.train()
self.optimizer.zero_grad()
outputs = self.rencoder(self.toyinputs)
loss, num_ratings = model.MSEloss(outputs, self.toyinputs)
loss = loss / num_ratings
loss.backward()
self.optimizer.step()
continue
if not self.args.silent:
print('Doing epoch {} of {}'.format(self.epoch, niter))
print('Timing Start')
e_start_time = time.time()
self.DoTrain()
if not self.args.silent:
e_end_time = time.time()
print('Timing End')
if self.args.profile:
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
prof.export_chrome_trace("trace.json")
print('Total epoch {} finished in {} seconds with TRAINING RMSE loss: {}'
.format(self.epoch, e_end_time - e_start_time, sqrt(self.total_epoch_loss/self.denom)))
if not self.args.silent:
self.logger.scalar_summary("Training_RMSE_per_epoch", sqrt(self.total_epoch_loss/self.denom), self.epoch)
self.logger.scalar_summary("Epoch_time", e_end_time - e_start_time, self.epoch)
if self.epoch % self.args.save_every == 0 or self.epoch == self.args.num_epochs - 1:
eval_loss = DoTrainEval(self.rencoder, self.eval_data_layer, self.args.use_cuda)
print('Epoch {} EVALUATION LOSS: {}'.format(self.epoch, eval_loss))
self.logger.scalar_summary("EVALUATION_RMSE", eval_loss, self.epoch)
print("Saving model to {}".format(self.model_checkpoint + ".epoch_"+str(self.epoch)))
torch.save(self.rencoder.state_dict(), self.model_checkpoint + ".epoch_"+str(self.epoch))
if not self.args.nooutput:
print("Saving model to {}".format(self.model_checkpoint + ".last"))
torch.save(self.rencoder.state_dict(), self.model_checkpoint + ".last")
# save to onnx
dummy_input = Variable(torch.randn(self.params['batch_size'], self.data_layer.vector_dim).type(torch.float))
torch.onnx.export(self.rencoder.float(), dummy_input.cuda() if self.args.use_cuda else dummy_input,
self.model_checkpoint + ".onnx", verbose=True)
print("ONNX model saved to {}!".format(self.model_checkpoint + ".onnx"))
def TimedTrainingRun(self):
if self.args.profile:
with profiler.profile(record_shapes=True, use_cuda=self.args.use_cuda) as prof:
with profiler.record_function("training_epoch"):
self.train(self.args.num_epochs)
else:
self.train(self.args.num_epochs)
def main() :
gpuTrain = DeepRecommenderTrainBenchmark(device = 'cuda')
gpuTrain.TimedTrainingRun()
gpuTrain = DeepRecommenderBenchmark(device = 'cpu')
gpuTrain.TimedTrainingRun()
if __name__ == '__main__':
main()
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# Copyright (c) 2017 NVIDIA Corporation
from os import listdir, path, makedirs
import random
import sys
import time
import datetime
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.keys())))
def save_data_to_file(data, filename):
with open(filename, 'w') as out:
for userId in data:
for record in data[userId]:
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
def create_NETFLIX_data_timesplit(all_data,
train_min,
train_max,
test_min,
test_max):
"""
Creates time-based split of NETFLIX data into train, and (validation, test)
:param all_data:
:param train_min:
:param train_max:
:param test_min:
:param test_max:
:return:
"""
train_min_ts = time.mktime(datetime.datetime.strptime(train_min,"%Y-%m-%d").timetuple())
train_max_ts = time.mktime(datetime.datetime.strptime(train_max, "%Y-%m-%d").timetuple())
test_min_ts = time.mktime(datetime.datetime.strptime(test_min, "%Y-%m-%d").timetuple())
test_max_ts = time.mktime(datetime.datetime.strptime(test_max, "%Y-%m-%d").timetuple())
training_data = dict()
validation_data = dict()
test_data = dict()
train_set_items = set()
for userId, userRatings in all_data.items():
time_sorted_ratings = sorted(userRatings, key=lambda x: x[2]) # sort by timestamp
for rating_item in time_sorted_ratings:
if rating_item[2] >= train_min_ts and rating_item[2] <= train_max_ts:
if not userId in training_data:
training_data[userId] = []
training_data[userId].append(rating_item)
train_set_items.add(rating_item[0]) # keep track of items from training set
elif rating_item[2] >= test_min_ts and rating_item[2] <= test_max_ts:
if not userId in training_data: # only include users seen in the training set
continue
p = random.random()
if p <=0.5:
if not userId in validation_data:
validation_data[userId] = []
validation_data[userId].append(rating_item)
else:
if not userId in test_data:
test_data[userId] = []
test_data[userId].append(rating_item)
# remove items not not seen in training set
for userId, userRatings in test_data.items():
test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
for userId, userRatings in validation_data.items():
validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
return training_data, validation_data, test_data
def main(args):
user2id_map = dict()
item2id_map = dict()
userId = 0
itemId = 0
all_data = dict()
folder = args[1]
out_folder = args[2]
# create necessary folders:
for output_dir in [(out_folder + f) for f in [
"/N1W_TRAIN", "/N1W_VALID", "/N1W_TEST",
"/N3M_TRAIN", "/N3M_VALID", "/N3M_TEST",
"/N6M_TRAIN", "/N6M_VALID", "/N6M_TEST",
"/N1Y_TRAIN", "/N1Y_VALID", "/N1Y_TEST",
"/NF_TRAIN", "/NF_VALID", "/NF_TEST"]]:
makedirs(output_dir, exist_ok=True)
text_files = [path.join(folder, f)
for f in listdir(folder)
if path.isfile(path.join(folder, f)) and ('.txt' in f)]
for text_file in text_files:
with open(text_file, 'r') as f:
print("Processing: {}".format(text_file))
lines = f.readlines()
item = int(lines[0][:-2]) # remove newline and :
if not item in item2id_map:
item2id_map[item] = itemId
itemId += 1
for rating in lines[1:]:
parts = rating.strip().split(",")
user = int(parts[0])
if not user in user2id_map:
user2id_map[user] = userId
userId += 1
rating = float(parts[1])
ts = int(time.mktime(datetime.datetime.strptime(parts[2],"%Y-%m-%d").timetuple()))
if user2id_map[user] not in all_data:
all_data[user2id_map[user]] = []
all_data[user2id_map[user]].append((item2id_map[item], rating, ts))
print("STATS FOR ALL INPUT DATA")
print_stats(all_data)
# Netflix 1 week, for benchmark
(n1w_train, n1w_valid, n1w_test) = create_NETFLIX_data_timesplit(all_data,
"2005-09-01",
"2005-09-07",
"2005-09-10",
"2005-09-11")
print("Netflix 1w train")
print_stats(n1w_train)
save_data_to_file(n1w_train, out_folder+"/N1W_TRAIN/n1w.train.txt")
print("Netflix 1w valid")
print_stats(n1w_valid)
save_data_to_file(n1w_valid, out_folder + "/N1W_VALID/n1w.valid.txt")
print("Netflix 1w test")
print_stats(n1w_test)
save_data_to_file(n1w_test, out_folder + "/N1W_TEST/n1w.test.txt")
print("finished 1 week!")
quit()
# Netflix full
(nf_train, nf_valid, nf_test) = create_NETFLIX_data_timesplit(all_data,
"1999-12-01",
"2005-11-30",
"2005-12-01",
"2005-12-31")
print("Netflix full train")
print_stats(nf_train)
save_data_to_file(nf_train, out_folder + "/NF_TRAIN/nf.train.txt")
print("Netflix full valid")
print_stats(nf_valid)
save_data_to_file(nf_valid, out_folder + "/NF_VALID/nf.valid.txt")
print("Netflix full test")
print_stats(nf_test)
save_data_to_file(nf_test, out_folder + "/NF_TEST/nf.test.txt")
(n3m_train, n3m_valid, n3m_test) = create_NETFLIX_data_timesplit(all_data,
"2005-09-01",
"2005-11-30",
"2005-12-01",
"2005-12-31")
print("Netflix 3m train")
print_stats(n3m_train)
save_data_to_file(n3m_train, out_folder+"/N3M_TRAIN/n3m.train.txt")
print("Netflix 3m valid")
print_stats(n3m_valid)
save_data_to_file(n3m_valid, out_folder + "/N3M_VALID/n3m.valid.txt")
print("Netflix 3m test")
print_stats(n3m_test)
save_data_to_file(n3m_test, out_folder + "/N3M_TEST/n3m.test.txt")
(n6m_train, n6m_valid, n6m_test) = create_NETFLIX_data_timesplit(all_data,
"2005-06-01",
"2005-11-30",
"2005-12-01",
"2005-12-31")
print("Netflix 6m train")
print_stats(n6m_train)
save_data_to_file(n6m_train, out_folder+"/N6M_TRAIN/n6m.train.txt")
print("Netflix 6m valid")
print_stats(n6m_valid)
save_data_to_file(n6m_valid, out_folder + "/N6M_VALID/n6m.valid.txt")
print("Netflix 6m test")
print_stats(n6m_test)
save_data_to_file(n6m_test, out_folder + "/N6M_TEST/n6m.test.txt")
# Netflix 1 year
(n1y_train, n1y_valid, n1y_test) = create_NETFLIX_data_timesplit(all_data,
"2004-06-01",
"2005-05-31",
"2005-06-01",
"2005-06-30")
print("Netflix 1y train")
print_stats(n1y_train)
save_data_to_file(n1y_train, out_folder + "/N1Y_TRAIN/n1y.train.txt")
print("Netflix 1y valid")
print_stats(n1y_valid)
save_data_to_file(n1y_valid, out_folder + "/N1Y_VALID/n1y.valid.txt")
print("Netflix 1y test")
print_stats(n1y_test)
save_data_to_file(n1y_test, out_folder + "/N1Y_TEST/n1y.test.txt")
if __name__ == "__main__":
main(sys.argv)
|
# Copyright (c) 2017 NVIDIA Corporation
import sys
import datetime
import random
from math import floor
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.keys())))
def save_data_to_file(data, filename):
with open(filename, 'w') as out:
for userId in data:
for record in data[userId]:
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
def main(args):
inpt = args[1]
out_prefix = args[2]
percent = 0.7
user2id_map = dict()
item2id_map = dict()
userId = 0
itemId = 0
data = dict()
min_ts = 100000000000
max_ts = 0
total_rating_count = 0
with open(inpt, 'r') as inpt_f: #ratings.csv headers: userId,movieId,rating,timestamp
for line in inpt_f:
if 'userId' in line:
continue
parts = line.split(',')
user = int(parts[0])
item = int(parts[1])
rating = float(parts[2])
ts = int(parts[3])
if min_ts > ts:
min_ts = ts
if max_ts < ts:
max_ts = ts
if not user in user2id_map:
user2id_map[user] = userId
userId += 1
if not item in item2id_map:
item2id_map[item] = itemId
itemId += 1
total_rating_count += 1
if user2id_map[user] not in data:
data[user2id_map[user]] = []
data[user2id_map[user]].append((item2id_map[item], rating, ts))
print("STATS")
print("Total Ratings: {}".format(total_rating_count))
print("Total User count: {}".format(len(user2id_map)))
print("Total Item count: {}".format(len(item2id_map)))
print("Minimum ts: {}, which is {}".format(min_ts, datetime.datetime.fromtimestamp(min_ts).strftime('%Y-%m-%d')))
print("Maximum ts: {}, which is {}".format(max_ts, datetime.datetime.fromtimestamp(max_ts).strftime('%Y-%m-%d')))
training_data = dict()
validation_data = dict()
test_data = dict()
train_set_items = set()
for userId in data.keys():
if len(data[userId]) < 2:
#print("WARNING, userId {} has less than 2 ratings, skipping user...".format(userId))
continue
time_sorted_ratings = sorted(data[userId], key=lambda x: x[2]) # sort by timestamp
last_train_ind = floor(percent * len(time_sorted_ratings))
training_data[userId] = time_sorted_ratings[:last_train_ind]
for rating_item in time_sorted_ratings[:last_train_ind]:
train_set_items.add(rating_item[0]) # keep track of items from training set
p = random.random()
if p <= 0.5:
validation_data[userId] = time_sorted_ratings[last_train_ind:]
else:
test_data[userId] = time_sorted_ratings[last_train_ind:]
# remove items not not seen in training set
for userId, userRatings in test_data.items():
test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
for userId, userRatings in validation_data.items():
validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
print("Training Data")
print_stats(training_data)
save_data_to_file(training_data, out_prefix+".train")
print("Validation Data")
print_stats(validation_data)
save_data_to_file(validation_data, out_prefix + ".valid")
print("Test Data")
print_stats(test_data)
save_data_to_file(test_data, out_prefix + ".test")
if __name__ == "__main__":
main(sys.argv)
|
# Copyright (c) 2017 NVIDIA Corporation
|
# Copyright (c) 2017 NVIDIA Corporation
|
# Copyright (c) 2017 NVIDIA Corporation
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as weight_init
from torch.autograd import Variable
def activation(input, kind):
#print("Activation: {}".format(kind))
if kind == 'selu':
return F.selu(input)
elif kind == 'relu':
return F.relu(input)
elif kind == 'relu6':
return F.relu6(input)
elif kind == 'sigmoid':
return F.sigmoid(input)
elif kind == 'tanh':
return F.tanh(input)
elif kind == 'elu':
return F.elu(input)
elif kind == 'lrelu':
return F.leaky_relu(input)
elif kind == 'swish':
return input*F.sigmoid(input)
elif kind == 'none':
return input
else:
raise ValueError('Unknown non-linearity type')
def MSEloss(inputs, targets, size_average=False):
mask = targets != 0
num_ratings = torch.sum(mask.float())
criterion = nn.MSELoss(reduction='sum' if not size_average else 'mean')
return criterion(inputs * mask.float(), targets), Variable(torch.Tensor([1.0])) if size_average else num_ratings
class AutoEncoder(nn.Module):
def __init__(self, layer_sizes, nl_type='selu', is_constrained=True, dp_drop_prob=0.0, last_layer_activations=True):
"""
Describes an AutoEncoder model
:param layer_sizes: Encoder network description. Should start with feature size (e.g. dimensionality of x).
For example: [10000, 1024, 512] will result in:
- encoder 2 layers: 10000x1024 and 1024x512. Representation layer (z) will be 512
- decoder 2 layers: 512x1024 and 1024x10000.
:param nl_type: (default 'selu') Type of no-linearity
:param is_constrained: (default: True) Should constrain decoder weights
:param dp_drop_prob: (default: 0.0) Dropout drop probability
:param last_layer_activations: (default: True) Whether to apply activations on last decoder layer
"""
super(AutoEncoder, self).__init__()
self._dp_drop_prob = dp_drop_prob
self._last_layer_activations = last_layer_activations
if dp_drop_prob > 0:
self.drop = nn.Dropout(dp_drop_prob)
self._last = len(layer_sizes) - 2
self._nl_type = nl_type
self.encode_w = nn.ParameterList(
[nn.Parameter(torch.rand(layer_sizes[i + 1], layer_sizes[i])) for i in range(len(layer_sizes) - 1)])
for ind, w in enumerate(self.encode_w):
weight_init.xavier_uniform_(w)
self.encode_b = nn.ParameterList(
[nn.Parameter(torch.zeros(layer_sizes[i + 1])) for i in range(len(layer_sizes) - 1)])
reversed_enc_layers = list(reversed(layer_sizes))
self.is_constrained = is_constrained
if not is_constrained:
self.decode_w = nn.ParameterList(
[nn.Parameter(torch.rand(reversed_enc_layers[i + 1], reversed_enc_layers[i])) for i in range(len(reversed_enc_layers) - 1)])
for ind, w in enumerate(self.decode_w):
nn.init.xavier_uniform_(w)
self.decode_b = nn.ParameterList(
[nn.Parameter(torch.zeros(reversed_enc_layers[i + 1])) for i in range(len(reversed_enc_layers) - 1)])
if False:
print("******************************")
print("******************************")
print(layer_sizes)
print("Dropout drop probability: {}".format(self._dp_drop_prob))
print("Encoder pass:")
for ind, w in enumerate(self.encode_w):
print(w.data.size())
print(self.encode_b[ind].size())
print("Decoder pass:")
if self.is_constrained:
print('Decoder is constrained')
for ind, w in enumerate(list(reversed(self.encode_w))):
print(w.transpose(0, 1).size())
print(self.decode_b[ind].size())
else:
for ind, w in enumerate(self.decode_w):
print(w.data.size())
print(self.decode_b[ind].size())
print("******************************")
print("******************************")
def encode(self, x):
for ind, w in enumerate(self.encode_w):
x = activation(input=F.linear(input=x, weight=w, bias=self.encode_b[ind]), kind=self._nl_type)
if self._dp_drop_prob > 0: # apply dropout only on code layer
x = self.drop(x)
return x
def decode(self, z):
if False: #self.is_constrained:
for ind, w in enumerate(list(reversed(self.encode_w))): # constrained autoencode re-uses weights from encoder
z = activation(input=F.linear(input=z, weight=w.transpose(0, 1), bias=self.decode_b[ind]),
# last layer or decoder should not apply non linearities
kind=self._nl_type if ind!=self._last or self._last_layer_activations else 'none')
#if self._dp_drop_prob > 0 and ind!=self._last: # and no dp on last layer
# z = self.drop(z)
else:
for ind, w in enumerate(self.decode_w):
z = activation(input=F.linear(input=z, weight=w, bias=self.decode_b[ind]),
# last layer or decoder should not apply non linearities
kind=self._nl_type if ind!=self._last or self._last_layer_activations else 'none')
#if self._dp_drop_prob > 0 and ind!=self._last: # and no dp on last layer
# z = self.drop(z)
return z
def forward(self, x):
return self.decode(self.encode(x))
|
# Copyright (c) 2017 NVIDIA Corporation
|
# Copyright (c) 2017 NVIDIA Corporation
"""Data Layer Classes"""
from os import listdir, path
from random import shuffle
import torch
class UserItemRecDataProvider:
def __init__(self, params, user_id_map=None, item_id_map=None):
self._params = params
self._data_dir = self.params['data_dir']
self._extension = ".txt" if 'extension' not in self.params else self.params['extension']
self._i_id = 0 if 'itemIdInd' not in self.params else self.params['itemIdInd']
self._u_id = 1 if 'userIdInd' not in self.params else self.params['userIdInd']
self._r_id = 2 if 'ratingInd' not in self.params else self.params['ratingInd']
self._major = 'items' if 'major' not in self.params else self.params['major']
if not (self._major == 'items' or self._major == 'users'):
raise ValueError("Major must be 'users' or 'items', but got {}".format(self._major))
self._major_ind = self._i_id if self._major == 'items' else self._u_id
self._minor_ind = self._u_id if self._major == 'items' else self._i_id
self._delimiter = '\t' if 'delimiter' not in self.params else self.params['delimiter']
if user_id_map is None or item_id_map is None:
self._build_maps()
else:
self._user_id_map = user_id_map
self._item_id_map = item_id_map
major_map = self._item_id_map if self._major == 'items' else self._user_id_map
minor_map = self._user_id_map if self._major == 'items' else self._item_id_map
self._vector_dim = len(minor_map)
src_files = [path.join(self._data_dir, f)
for f in listdir(self._data_dir)
if path.isfile(path.join(self._data_dir, f)) and f.endswith(self._extension)]
self._batch_size = self.params['batch_size']
self.data = dict()
for source_file in src_files:
with open(source_file, 'r') as src:
for line in src.readlines():
parts = line.strip().split(self._delimiter)
if len(parts)<3:
raise ValueError('Encountered badly formatted line in {}'.format(source_file))
key = major_map[int(parts[self._major_ind])]
value = minor_map[int(parts[self._minor_ind])]
rating = float(parts[self._r_id])
#print("Key: {}, Value: {}, Rating: {}".format(key, value, rating))
if key not in self.data:
self.data[key] = []
self.data[key].append((value, rating))
def _build_maps(self):
self._user_id_map = dict()
self._item_id_map = dict()
src_files = [path.join(self._data_dir, f)
for f in listdir(self._data_dir)
if path.isfile(path.join(self._data_dir, f)) and f.endswith(self._extension)]
u_id = 0
i_id = 0
for source_file in src_files:
with open(source_file, 'r') as src:
for line in src.readlines():
parts = line.strip().split(self._delimiter)
if len(parts)<3:
raise ValueError('Encountered badly formatted line in {}'.format(source_file))
u_id_orig = int(parts[self._u_id])
if u_id_orig not in self._user_id_map:
self._user_id_map[u_id_orig] = u_id
u_id += 1
i_id_orig = int(parts[self._i_id])
if i_id_orig not in self._item_id_map:
self._item_id_map[i_id_orig] = i_id
i_id += 1
def iterate_one_epoch(self):
data = self.data
keys = list(data.keys())
shuffle(keys)
s_ind = 0
e_ind = self._batch_size
while e_ind < len(keys):
local_ind = 0
inds1 = []
inds2 = []
vals = []
for ind in range(s_ind, e_ind):
inds2 += [v[0] for v in data[keys[ind]]]
inds1 += [local_ind]*len([v[0] for v in data[keys[ind]]])
vals += [v[1] for v in data[keys[ind]]]
local_ind += 1
i_torch = torch.LongTensor([inds1, inds2])
v_torch = torch.FloatTensor(vals)
mini_batch = torch.sparse.FloatTensor(i_torch, v_torch, torch.Size([self._batch_size, self._vector_dim]))
s_ind += self._batch_size
e_ind += self._batch_size
yield mini_batch
def iterate_one_epoch_eval(self, for_inf=False):
keys = list(self.data.keys())
s_ind = 0
while s_ind < len(keys):
inds1 = [0] * len([v[0] for v in self.data[keys[s_ind]]])
inds2 = [v[0] for v in self.data[keys[s_ind]]]
vals = [v[1] for v in self.data[keys[s_ind]]]
src_inds1 = [0] * len([v[0] for v in self.src_data[keys[s_ind]]])
src_inds2 = [v[0] for v in self.src_data[keys[s_ind]]]
src_vals = [v[1] for v in self.src_data[keys[s_ind]]]
i_torch = torch.LongTensor([inds1, inds2])
v_torch = torch.FloatTensor(vals)
src_i_torch = torch.LongTensor([src_inds1, src_inds2])
src_v_torch = torch.FloatTensor(src_vals)
mini_batch = (torch.sparse.FloatTensor(i_torch, v_torch, torch.Size([1, self._vector_dim])),
torch.sparse.FloatTensor(src_i_torch, src_v_torch, torch.Size([1, self._vector_dim])))
s_ind += 1
if not for_inf:
yield mini_batch
else:
yield mini_batch, keys[s_ind - 1]
@property
def vector_dim(self):
return self._vector_dim
@property
def userIdMap(self):
return self._user_id_map
@property
def itemIdMap(self):
return self._item_id_map
@property
def params(self):
return self._params
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.SEGMENTATION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data as data
import torchvision.models as models
from opacus import PrivacyEngine
from opacus.validators.module_validator import ModuleValidator
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
# disable torchdynamo-fx2trt because it never terminates
if "--torchdynamo" in extra_args and "fx2trt" in extra_args:
raise NotImplementedError("TorchDynamo Fx2trt is not supported because of hanging issue. "
"See: https://github.com/facebookresearch/torchdynamo/issues/109")
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = models.resnet18(num_classes=10)
self.model = ModuleValidator.fix(self.model)
self.model = self.model.to(device)
# Cifar10 images are 32x32 and have 10 classes
self.example_inputs = (
torch.randn((self.batch_size, 3, 32, 32), device=self.device),
)
self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device)
dataset = data.TensorDataset(self.example_inputs[0], self.example_target)
self.dummy_loader = data.DataLoader(dataset, batch_size=self.batch_size)
self.noise_multiplier: float=1.0
self.max_grad_norm: float=1.0
self.poisson_sampling: bool=False
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
self.criterion = nn.CrossEntropyLoss()
self.privacy_engine = PrivacyEngine()
self.model, self.optimizer, _ = self.privacy_engine.make_private(
module=self.model,
optimizer=self.optimizer,
data_loader=self.dummy_loader,
noise_multiplier=self.noise_multiplier,
max_grad_norm=self.max_grad_norm,
poisson_sampling=self.poisson_sampling,
)
def get_module(self):
return self.model, self.example_inputs
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.model, self.optimizer, _ = self.privacy_engine.make_private(
module=self.model,
optimizer=self.optimizer,
data_loader=self.dummy_loader,
noise_multiplier=1.0,
max_grad_norm=1.0,
poisson_sampling=False,
)
def train(self):
model = self.model
(images, ) = self.example_inputs
model.train()
targets = self.example_target
output = model(images)
loss = self.criterion(output, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
(images, ) = self.example_inputs
model.eval()
targets = self.example_target
with torch.no_grad():
out = model(images)
return (out, )
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from .model import GPT, SequenceGeneratorNanoGPT, GPTConfig, GPTGenerationConfig
class Model(BenchmarkModel):
task = NLP.GENERATION
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# Use the default configs
self.gpt_config = GPTConfig()
self.generator_config = GPTGenerationConfig(32, 0.8, 200)
self.model = SequenceGeneratorNanoGPT(GPT(self.gpt_config), self.generator_config).eval().to(self.device)
self.prompt_size = 64
self.example_inputs = (
torch.randint(1, self.gpt_config.vocab_size, (self.batch_size, self.prompt_size)).to(self.device),
)
def get_module(self):
return self.model, self.example_inputs
def train(self):
return NotImplementedError("Training not supported for this model")
def eval(self):
with torch.no_grad():
out = self.model(*self.example_inputs)
return (out,)
|
"""
Full definition of a GPT Language Model, all of it in this single file.
References:
1) the official GPT-2 TensorFlow implementation released by OpenAI:
https://github.com/openai/gpt-2/blob/master/src/model.py
2) huggingface/transformers PyTorch implementation:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py
"""
import math
import inspect
from typing import Optional
from dataclasses import dataclass
import torch
import torch.nn as nn
from torch.nn import functional as F
# @torch.jit.script # good to enable when not using torch.compile, disable when using (our default)
def new_gelu(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
class LayerNorm(nn.Module):
""" LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
def __init__(self, ndim, bias):
super().__init__()
self.weight = nn.Parameter(torch.ones(ndim))
self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
def forward(self, input):
return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
class CausalSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
# regularization
self.attn_dropout = nn.Dropout(config.dropout)
self.resid_dropout = nn.Dropout(config.dropout)
self.n_head = config.n_head
self.n_embd = config.n_embd
self.dropout = config.dropout
# flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
if not self.flash:
print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
def forward(self, x):
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
if self.flash:
# efficient attention using Flash Attention CUDA kernels
y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
else:
# manual implementation of attention
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_dropout(self.c_proj(y))
return y
class MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x):
x = self.c_fc(x)
x = new_gelu(x)
x = self.c_proj(x)
x = self.dropout(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
self.attn = CausalSelfAttention(config)
self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
self.mlp = MLP(config)
def forward(self, x):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
@dataclass
class GPTConfig:
block_size: int = 1024
vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
n_layer: int = 12
n_head: int = 12
n_embd: int = 768
dropout: float = 0.0
bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
@dataclass
class GPTGenerationConfig:
max_new_tokens: int = 512 # max number of new tokens to generate
temperature: float = 1.0 # temperature for sampling. > 1.0: more exploring, < 1.0: more conservative.
top_k: Optional[int] = None # top_k > 0: keep only top k tokens with highest probability (top-k filtering).
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
assert config.vocab_size is not None
assert config.block_size is not None
self.config = config
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
wpe = nn.Embedding(config.block_size, config.n_embd),
drop = nn.Dropout(config.dropout),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
ln_f = LayerNorm(config.n_embd, bias=config.bias),
))
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# with weight tying when using torch.compile() some warnings get generated:
# "UserWarning: functional_call was passed multiple values for tied weights.
# This behavior is deprecated and will be an error in future versions"
# not 100% sure what this is, so far seems to be harmless. TODO investigate
self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
# init all weights
self.apply(self._init_weights)
# apply special scaled init to the residual projections, per GPT-2 paper
for pn, p in self.named_parameters():
if pn.endswith('c_proj.weight'):
torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
# report number of parameters
print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
def get_num_params(self, non_embedding=True):
"""
Return the number of parameters in the model.
For non-embedding count (default), the position embeddings get subtracted.
The token embeddings would too, except due to the parameter sharing these
params are actually used as weights in the final layer, so we include them.
"""
n_params = sum(p.numel() for p in self.parameters())
if non_embedding:
n_params -= self.transformer.wpe.weight.numel()
return n_params
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, idx, targets=None):
device = idx.device
b, t = idx.size()
assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
# forward the GPT model itself
tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
x = self.transformer.drop(tok_emb + pos_emb)
for block in self.transformer.h:
x = block(x)
x = self.transformer.ln_f(x)
if targets is not None:
# if we are given some desired targets also calculate the loss
logits = self.lm_head(x)
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
else:
# inference-time mini-optimization: only forward the lm_head on the very last position
logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
loss = None
return logits, loss
def crop_block_size(self, block_size):
# model surgery to decrease the block size if necessary
# e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
# but want to use a smaller block size for some smaller, simpler model
assert block_size <= self.config.block_size
self.config.block_size = block_size
self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
for block in self.transformer.h:
if hasattr(block.attn, 'bias'):
block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
@classmethod
def from_pretrained(cls, model_type, override_args=None):
assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
override_args = override_args or {} # default to empty dict
# only dropout can be overridden see more notes below
assert all(k == 'dropout' for k in override_args)
from transformers import GPT2LMHeadModel
print("loading weights from pretrained gpt: %s" % model_type)
# n_layer, n_head and n_embd are determined from model_type
config_args = {
'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
}[model_type]
print("forcing vocab_size=50257, block_size=1024, bias=True")
config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
config_args['bias'] = True # always True for GPT model checkpoints
# we can override the dropout rate, if desired
if 'dropout' in override_args:
print(f"overriding dropout rate to {override_args['dropout']}")
config_args['dropout'] = override_args['dropout']
# create a from-scratch initialized minGPT model
config = GPTConfig(**config_args)
model = GPT(config)
sd = model.state_dict()
sd_keys = sd.keys()
sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
# init a huggingface/transformers model
model_hf = GPT2LMHeadModel.from_pretrained(model_type)
sd_hf = model_hf.state_dict()
# copy while ensuring all of the parameters are aligned and match in names and shapes
sd_keys_hf = sd_hf.keys()
sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
# basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
# this means that we have to transpose these weights when we import them
assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
for k in sd_keys_hf:
if any(k.endswith(w) for w in transposed):
# special treatment for the Conv1D weights we need to transpose
assert sd_hf[k].shape[::-1] == sd[k].shape
with torch.no_grad():
sd[k].copy_(sd_hf[k].t())
else:
# vanilla copy over the other parameters
assert sd_hf[k].shape == sd[k].shape
with torch.no_grad():
sd[k].copy_(sd_hf[k])
return model
def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
# start with all of the candidate parameters
param_dict = {pn: p for pn, p in self.named_parameters()}
# filter out those that do not require grad
param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
# create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
# i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
optim_groups = [
{'params': decay_params, 'weight_decay': weight_decay},
{'params': nodecay_params, 'weight_decay': 0.0}
]
num_decay_params = sum(p.numel() for p in decay_params)
num_nodecay_params = sum(p.numel() for p in nodecay_params)
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
# Create AdamW optimizer and use the fused version if it is available
fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
use_fused = fused_available and device_type == 'cuda'
extra_args = dict(fused=True) if use_fused else dict()
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
print(f"using fused AdamW: {use_fused}")
return optimizer
def estimate_mfu(self, fwdbwd_per_iter, dt):
""" estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
# first estimate the number of flops we do per iteration.
# see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
N = self.get_num_params()
cfg = self.config
L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
flops_per_token = 6*N + 12*L*H*Q*T
flops_per_fwdbwd = flops_per_token * T
flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
# express our flops throughput as ratio of A100 bfloat16 peak flops
flops_achieved = flops_per_iter * (1.0/dt) # per second
flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
mfu = flops_achieved / flops_promised
return mfu
@torch.no_grad()
def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
"""
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
"""
for _ in range(max_new_tokens):
# if the sequence context is growing too long we must crop it at block_size
idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
# forward the model to get the logits for the index in the sequence
logits, _ = self(idx_cond)
# pluck the logits at the final step and scale by desired temperature
logits = logits[:, -1, :] / temperature
# optionally crop the logits to only the top k options
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
logits[logits < v[:, [-1]]] = -float('Inf')
# apply softmax to convert logits to (normalized) probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution
idx_next = torch.multinomial(probs, num_samples=1)
# append sampled index to the running sequence and continue
idx = torch.cat((idx, idx_next), dim=1)
return idx
class SequenceGeneratorNanoGPT(nn.Module):
def __init__(self, model, generate_config) -> None:
super().__init__()
self.base_model: GPT = model
self.generate_config: GPTGenerationConfig = generate_config
def forward(self, idx):
return self.base_model.generate(idx, self.generate_config.max_new_tokens, self.generate_config.temperature, self.generate_config.top_k)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torch.optim as optim
import torch
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
# Original train batch size: 512, out of memory on V100 GPU
# Use hierarchical batching to scale down: 512 = batch_size (32) * epoch_size (16)
# Source: https://github.com/forresti/SqueezeNet
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 16
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="squeezenet1_1", test=test, device=device,
batch_size=batch_size, weights=models.SqueezeNet1_1_Weights.IMAGENET1K_V1,
extra_args=extra_args)
self.epoch_size = 16
def train(self):
optimizer = optim.Adam(self.model.parameters())
loss = torch.nn.CrossEntropyLoss()
optimizer.zero_grad()
for _ in range(self.epoch_size):
pred = self.model(*self.example_inputs)
y = torch.empty(pred.shape[0], dtype=torch.long, device=self.device).random_(pred.shape[1])
loss(pred, y).backward()
optimizer.step()
|
import argparse
import random
from collections import deque
import math
import gym
import numpy as np
class ActionRepeatWrapper(gym.Wrapper):
def __init__(self, env, repeat_multiplier=8):
super().__init__(env)
self.action_space = gym.spaces.Box(
-1.0, 1.0, shape=(1 + self.env.action_space.shape[0],)
)
self.repeat_multiplier = repeat_multiplier / 2.0
def step(self, action):
repeat_action = max(math.floor((action[0] + 1.0) * self.repeat_multiplier), 1)
main_action = action[1:]
total_reward = 0
for _ in range(repeat_action):
next_state, reward, done, _ = self.env.step(main_action)
total_reward += reward
return next_state, total_reward, done, {}
class ChannelsFirstWrapper(gym.ObservationWrapper):
"""
Some pixel-based gym environments use a (Height, Width, Channel) image format.
This wrapper rolls those axes to (Channel, Height, Width) to work with pytorch
Conv2D layers.
"""
def __init__(self, env):
super().__init__(env)
self.observation_space.shape = (
env.observation_space.shape[-1],
) + env.observation_space.shape[:-1]
def observation(self, frame):
frame = np.transpose(frame, (2, 0, 1))
return np.ascontiguousarray(frame)
class NormalizeObservationSpace(gym.ObservationWrapper):
def __init__(self, env, obs_mean, obs_std):
super().__init__(env)
self.mean = obs_mean
self.std = obs_std + 1e-5
def observation(self, x):
return (x - self.mean) / self.std
class NormalizeContinuousActionSpace(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
self._true_action_space = env.action_space
self.action_space = gym.spaces.Box(
low=-1.0, high=1.0, shape=self._true_action_space.shape, dtype=np.float32,
)
def action(self, action):
true_delta = self._true_action_space.high - self._true_action_space.low
norm_delta = self.action_space.high - self.action_space.low
action = (action - self.action_space.low) / norm_delta
action = action * true_delta + self._true_action_space.low
return action
def robosuite_action_adjustment(robosuite_env, verbose=False):
if verbose:
action_space = robosuite_env.action_space
high = action_space.high
same_high = np.all(high == high[0])
low = action_space.low
same_low = np.all(low == low[0])
shape = action_space.shape[0]
print("RoboSuite Action Space Report:")
if same_high and same_low:
print(f"Uniformly Bounded Action Space in [{low[0]}, {high[0]}]^{shape}")
else:
print(f"Non-uniform Bounded Action Space with elements = {zip(low, high)}")
print("\nAttempting to normalize action space using dc.envs.Normalize...\n")
env = NormalizeContinuousActionSpace(robosuite_env)
if verbose:
action_space = env.action_space
high = action_space.high
same_high = np.all(high == high[0])
low = action_space.low
same_low = np.all(low == low[0])
shape = action_space.shape[0]
print("Normalized RoboSuite Action Space Report:")
if same_high and same_low:
print(f"Uniformly Bounded Action Space in [{low[0]}, {high[0]}]^{shape}")
else:
print(f"Non-uniform Bounded Action Space with elements = {zip(low, high)}")
return env
class FlattenObsWrapper(gym.ObservationWrapper):
"""
Simple wrapper that flattens an image observation
into a state vector when CNNs are overkill.
"""
def __init__(self, env):
super().__init__(env)
self.observation_space.shape = (np.prod(env.observation_space.shape),)
def observation(self, obs):
return obs.flatten()
class ConcatObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
obs_space_shape = sum(x.shape[0] for x in self.observation_space)
self.observation_space.shape = (obs_space_shape,)
def observation(self, obs):
return np.concatenate(obs, axis=0)
def highway_env(env_id):
"""
Convenience function to turn all the highway_env
environments into continuous control tasks.
highway_env: https://highway-env.readthedocs.io/en/latest/index.html
"""
import gym
import highway_env
env = gym.make(env_id)
env.configure({"action": {"type": "ContinuousAction"}})
env.reset()
env = NormalizeContinuousActionSpace(env)
env = FlattenObsWrapper(env)
return env
class DiscreteActionWrapper(gym.ActionWrapper):
"""
This is intended to let the action be any scalar
(float or int) or np array (float or int) of size 1.
floats are cast to ints using python's standard rounding.
"""
def __init__(self, env):
super().__init__(env)
self.action_space.shape = (env.action_space.n,)
def action(self, action):
if isinstance(action, np.ndarray):
if len(action.shape) > 0:
action = action[0]
return int(action)
class FrameStack(gym.Wrapper):
def __init__(self, env, num_stack):
gym.Wrapper.__init__(self, env)
self._k = num_stack
self._frames = deque([], maxlen=num_stack)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * num_stack,) + shp[1:]),
dtype=env.observation_space.dtype,
)
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
class GoalBasedWrapper(gym.ObservationWrapper):
"""
Some goal-based envs (like the Gym Robotics suite) use dictionary observations
with one entry for the current state and another to describe the goal. This
wrapper concatenates those into a single vector so it can be used just like
any other env.
"""
def __init__(self, env):
super().__init__(env)
self.observation_space.shape = (
env.observation_space["observation"].shape[0]
+ env.observation_space["desired_goal"].shape[0],
)
def observation(self, obs_dict):
return self._flatten_obs(obs_dict)
def _flatten_obs(self, obs_dict):
return np.concatenate((obs_dict["observation"], obs_dict["desired_goal"]))
def add_gym_args(parser):
"""
Add a --env_id cl flag to an argparser
"""
parser.add_argument("--env_id", type=str, default="Pendulum-v1")
parser.add_argument("--seed", type=int, default=123)
def load_gym(env_id="CartPole-v1", seed=None, normalize_action_space=True, **_):
"""
Load an environment from OpenAI gym (or pybullet_gym, if installed)
"""
# optional pybullet import
try:
import pybullet
import pybulletgym
except ImportError:
pass
env = gym.make(env_id)
if normalize_action_space and isinstance(env.action_space, gym.spaces.Box):
env = NormalizeContinuousActionSpace(env)
if seed is None:
seed = random.randint(1, 100000)
env.reset(seed=seed)
return env
def add_dmc_args(parser):
"""
Add cl flags associated with the deepmind control suite to a parser
"""
parser.add_argument("--domain_name", type=str, default="fish")
parser.add_argument("--task_name", type=str, default="swim")
parser.add_argument(
"--from_pixels", action="store_true", help="Use image observations"
)
parser.add_argument("--height", type=int, default=84)
parser.add_argument("--width", type=int, default=84)
parser.add_argument("--camera_id", type=int, default=0)
parser.add_argument("--frame_skip", type=int, default=1)
parser.add_argument("--frame_stack", type=int, default=3)
parser.add_argument("--channels_last", action="store_true")
parser.add_argument("--rgb", action="store_true")
parser.add_argument("--seed", type=int, default=231)
def add_atari_args(parser):
parser.add_argument("--game_id", type=str, default="Boxing-v0")
parser.add_argument("--noop_max", type=int, default=30)
parser.add_argument("--frame_skip", type=int, default=1)
parser.add_argument("--screen_size", type=int, default=84)
parser.add_argument("--terminal_on_life_loss", action="store_true")
parser.add_argument("--rgb", action="store_true")
parser.add_argument("--normalize", action="store_true")
parser.add_argument("--frame_stack", type=int, default=4)
parser.add_argument("--seed", type=int, default=231)
def load_atari(
game_id,
seed=None,
noop_max=30,
frame_skip=1,
screen_size=84,
terminal_on_life_loss=False,
rgb=False,
normalize=False,
frame_stack=4,
clip_reward=True,
**_,
):
"""
Load a game from the Atari benchmark, with the usual settings
Note that the simplest game ids (e.g. Boxing-v0) come with frame
skipping by default, and you'll get an error if the frame_skp arg > 1.
Use `BoxingNoFrameskip-v0` with frame_skip > 1.
"""
env = gym.make(game_id)
if seed is None:
seed = random.randint(1, 100000)
env.reset(seed=seed)
env = gym.wrappers.AtariPreprocessing(
env,
noop_max=noop_max,
frame_skip=frame_skip,
screen_size=screen_size,
terminal_on_life_loss=terminal_on_life_loss,
grayscale_obs=False, # use GrayScale wrapper instead...
scale_obs=normalize,
)
if not rgb:
env = gym.wrappers.GrayScaleObservation(env, keep_dim=True)
if clip_reward:
env = ClipReward(env)
env = ChannelsFirstWrapper(env)
env = FrameStack(env, num_stack=frame_stack)
env = DiscreteActionWrapper(env)
return env
class ClipReward(gym.RewardWrapper):
def __init__(self, env, low=-1.0, high=1.0):
super().__init__(env)
self._clip_low = low
self._clip_high = high
def reward(self, rew):
return max(min(rew, self._clip_high), self._clip_low)
def load_dmc(
domain_name,
task_name,
seed=None,
from_pixels=False,
frame_stack=1,
height=84,
width=84,
camera_id=0,
frame_skip=1,
channels_last=False,
rgb=False,
**_,
):
"""
Load a task from the deepmind control suite.
Uses dmc2gym (https://github.com/denisyarats/dmc2gym)
Note that setting seed=None (the default) picks a random seed
"""
import dmc2gym
if seed is None:
seed = random.randint(1, 100000)
env = dmc2gym.make(
domain_name=domain_name,
task_name=task_name,
from_pixels=from_pixels,
height=height,
width=width,
camera_id=camera_id,
visualize_reward=False,
frame_skip=frame_skip,
channels_first=not channels_last
if rgb
else False, # if we're using RGB, set the channel order here
)
if not rgb and from_pixels:
env = gym.wrappers.GrayScaleObservation(env, keep_dim=True)
env = ChannelsFirstWrapper(env)
if from_pixels:
env = FrameStack(env, num_stack=frame_stack)
return env
|
import dataclasses
@dataclasses.dataclass
class SACConfig:
env_id = "Pendulum-v1"
seed = 123
num_steps = 1
transitions_per_step = 1
max_episode_steps = 10
batch_size = 512
tau = 0.005
actor_lr = 1e-4
critic_lr = 1e-4
gamma = 0.99
init_alpha = 0.1
alpha_lr = 1e-4
buffer_size = 1_000_000
eval_interval = 5000
eval_episodes = 10
warmup_steps = 1
render = False
actor_clip = 0.0
critic_clip = 0.0
name = "sac_run"
actor_l2 = 0.0
critic_l2 = 0.0
target_delay = 2
actor_delay = 1
save_interval = 100_000
verbosy = 0
gradient_updates_per_step = 1
prioritized_replay = False
skip_save_to_disk = True
skip_log_to_disk = True
discrete_actions = False
log_std_low = -10.0
log_std_high = 2.0
self_regularized = False
sr_max_critic_updates_per_step = 10
sr_critic_target_improvement_init = 0.7
sr_critic_target_improvement_final = 0.9
train_env_path = "input_data/train_env.pkl"
test_env_path = "input_data/test_env.pkl"
|
import torch
import os
import copy
import pickle
import math
from itertools import chain
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from typing import Tuple
from .config import SACConfig
from .envs import load_gym
from .sac import SACAgent
from .replay import PrioritizedReplayBuffer, ReplayBuffer
from .utils import hard_update, soft_update
def learn_standard(
buffer,
target_agent,
agent,
actor_optimizer,
critic_optimizer,
log_alpha_optimizer,
target_entropy,
batch_size,
log_alpha,
gamma,
critic_clip,
actor_clip,
update_policy=True,
device=None,
):
per = isinstance(buffer, PrioritizedReplayBuffer)
if per:
batch, imp_weights, priority_idxs = buffer.sample(batch_size)
imp_weights = imp_weights.to(device)
else:
batch = buffer.sample(batch_size)
# prepare transitions for models
state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch
state_batch = state_batch.to(device)
next_state_batch = next_state_batch.to(device)
action_batch = action_batch.to(device)
reward_batch = reward_batch.to(device)
done_batch = done_batch.to(device)
agent.train()
###################
## CRITIC UPDATE ##
###################
alpha = torch.exp(log_alpha)
with torch.no_grad():
action_dist_s1 = agent.actor(next_state_batch)
action_s1 = action_dist_s1.rsample()
logp_a1 = action_dist_s1.log_prob(action_s1).sum(-1, keepdim=True)
target_action_value_s1 = torch.min(
target_agent.critic1(next_state_batch, action_s1),
target_agent.critic2(next_state_batch, action_s1),
)
td_target = reward_batch + gamma * (1.0 - done_batch) * (
target_action_value_s1 - (alpha * logp_a1)
)
# update critics
agent_critic1_pred = agent.critic1(state_batch, action_batch)
agent_critic2_pred = agent.critic2(state_batch, action_batch)
td_error1 = td_target - agent_critic1_pred
td_error2 = td_target - agent_critic2_pred
critic_loss = 0.5 * (td_error1 ** 2 + td_error2 ** 2)
if per:
critic_loss *= imp_weights
critic_loss = critic_loss.mean()
critic_optimizer.zero_grad()
critic_loss.backward()
if critic_clip:
torch.nn.utils.clip_grad_norm_(
chain(agent.critic1.parameters(), agent.critic2.parameters()), critic_clip
)
critic_optimizer.step()
if update_policy:
##################
## ACTOR UPDATE ##
##################
dist = agent.actor(state_batch)
agent_actions = dist.rsample()
logp_a = dist.log_prob(agent_actions).sum(-1, keepdim=True)
actor_loss = -(
torch.min(
agent.critic1(state_batch, agent_actions),
agent.critic2(state_batch, agent_actions),
)
- (alpha.detach() * logp_a)
).mean()
actor_optimizer.zero_grad()
actor_loss.backward()
if actor_clip:
torch.nn.utils.clip_grad_norm_(agent.actor.parameters(), actor_clip)
actor_optimizer.step()
##################
## ALPHA UPDATE ##
##################
alpha_loss = (-alpha * (logp_a + target_entropy).detach()).mean()
log_alpha_optimizer.zero_grad()
alpha_loss.backward()
log_alpha_optimizer.step()
if per:
new_priorities = (abs(td_error1) + 1e-5).cpu().detach().squeeze(1).numpy()
buffer.update_priorities(priority_idxs, new_priorities)
class Model(BenchmarkModel):
task = REINFORCEMENT_LEARNING.OTHER_RL
# Original train batch size: 256
# Source: https://github.com/pranz24/pytorch-soft-actor-critic/blob/398595e0d9dca98b7db78c7f2f939c969431871a/main.py#L31
# This model doesn't support customizing batch size, or data prefetching
DEFAULT_TRAIN_BSIZE = 256
DEFAULT_EVAL_BSIZE = 256
ALLOW_CUSTOMIZE_BSIZE = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.args = SACConfig()
self.args.batch_size = self.batch_size
# Construct agent
current_dir = os.path.dirname(os.path.abspath(__file__))
self.train_env = load_gym(self.args.env_id, self.args.seed)
self.test_env = load_gym(self.args.env_id, self.args.seed)
self.obs_shape = self.train_env.observation_space.shape
self.actions_shape = self.train_env.action_space.shape
self.agent = SACAgent(self.obs_shape[0], self.actions_shape[0],
self.args.log_std_low, self.args.log_std_high, self.device)
if self.args.prioritized_replay:
buffer_t = PrioritizedReplayBuffer
else:
buffer_t = ReplayBuffer
self.buffer = buffer_t(
self.args.buffer_size,
device=self.device,
state_shape=self.train_env.observation_space.shape,
state_dtype=float,
action_shape=(1,),
)
self.learning_method = "Standard"
self.agent.to(device)
if not self.args.self_regularized:
# initialize target networks
self.target_agent = copy.deepcopy(self.agent)
self.target_agent.to(device)
hard_update(self.target_agent.critic1, self.agent.critic1)
hard_update(self.target_agent.critic2, self.agent.critic2)
self.target_agent.train()
self.critic_optimizer = torch.optim.Adam(
chain(self.agent.critic1.parameters(), self.agent.critic2.parameters(),),
lr=self.args.critic_lr,
weight_decay=self.args.critic_l2,
betas=(0.9, 0.999),
)
self.actor_optimizer = torch.optim.Adam(
self.agent.actor.parameters(),
lr=self.args.actor_lr,
weight_decay=self.args.actor_l2,
betas=(0.9, 0.999),
)
self.log_alpha = torch.Tensor([math.log(self.args.init_alpha)]).to(device)
self.log_alpha.requires_grad = True
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.args.alpha_lr, betas=(0.5, 0.999))
if not self.args.discrete_actions:
self.target_entropy = -self.train_env.action_space.shape[0]
else:
self.target_entropy = -math.log(1.0 / self.train_env.action_space.n) * 0.98
if self.args.self_regularized:
# the critic target improvement ratio is annealed during training
self.critic_target_imp_slope = (
self.args.sr_critic_target_improvement_final - self.args.sr_critic_target_improvement_init
) / self.args.num_steps
self.current_target_imp = lambda step: min(
self.args.sr_critic_target_improvement_init + self.critic_target_imp_slope * step,
self.args.sr_critic_target_improvement_final,
)
def get_module(self):
model = self.agent.actor
state = self.train_env.reset()
action = self.agent.sample_action(state)
next_state, reward, done, info = self.train_env.step(action)
self.buffer.push(state, action, reward, next_state, done)
batch = self.buffer.sample(self.args.batch_size)
state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch
state_batch = state_batch.to(self.device)
return model, (state_batch, )
def set_module(self, new_model):
self.agent.actor = new_model
def train(self):
# Setup
self.target_agent.train()
done = True
niter = 1
for step in range(niter):
if done:
state = self.train_env.reset()
steps_this_ep = 0
done = False
action = self.agent.sample_action(state)
next_state, reward, done, info = self.train_env.step(action)
self.buffer.push(state, action, reward, next_state, done)
state = next_state
steps_this_ep += 1
if steps_this_ep >= self.args.max_episode_steps:
done = True
for _ in range(self.args.gradient_updates_per_step):
learn_standard(
buffer=self.buffer,
target_agent=self.target_agent,
agent=self.agent,
actor_optimizer=self.actor_optimizer,
critic_optimizer=self.critic_optimizer,
log_alpha=self.log_alpha,
log_alpha_optimizer=self.log_alpha_optimizer,
target_entropy=self.target_entropy,
batch_size=self.args.batch_size,
gamma=self.args.gamma,
critic_clip=self.args.critic_clip,
actor_clip=self.args.actor_clip,
update_policy=step % self.args.actor_delay == 0,
device=self.device
)
# move target model towards training model
if not self.args.self_regularized and (step % self.args.target_delay == 0):
soft_update(self.target_agent.critic1, self.agent.critic1, self.args.tau)
soft_update(self.target_agent.critic2, self.agent.critic2, self.args.tau)
def eval(self) -> Tuple[torch.Tensor]:
niter = 1
with torch.no_grad():
discount= 1.0
episode_return_history = []
for episode in range(niter):
episode_return = 0.0
state = self.test_env.reset()
done, info = False, {}
for step_num in range(self.args.max_episode_steps):
if done:
break
action = self.agent.forward(state)
state, reward, done, info = self.test_env.step(action)
episode_return += reward * (discount ** step_num)
episode_return_history.append(episode_return)
retval = torch.tensor(episode_return_history)
return (torch.tensor(action), )
def get_optimizer(self):
return (self.actor_optimizer, self.critic_optimizer, self.log_alpha_optimizer)
def set_optimizer(self, optimizer) -> None:
self.actor_optimizer, self.critic_optimizer, self.log_alpha_optimizer = optimizer
|
import argparse
import copy
import math
import os
from itertools import chain
import numpy as np
import tensorboardX
import torch
import torch.nn.functional as F
import tqdm
from . import envs, nets, replay, utils
class SACAgent:
def __init__(
self,
obs_space_size,
act_space_size,
log_std_low,
log_std_high,
device,
actor_net_cls=nets.StochasticActor,
critic_net_cls=nets.BigCritic,
hidden_size=1024,
):
self.actor = actor_net_cls(
obs_space_size,
act_space_size,
log_std_low,
log_std_high,
dist_impl="pyd",
hidden_size=hidden_size,
)
self.critic1 = critic_net_cls(obs_space_size, act_space_size, hidden_size)
self.critic2 = critic_net_cls(obs_space_size, act_space_size, hidden_size)
self.device = device
def to(self, device):
self.actor = self.actor.to(device)
self.critic1 = self.critic1.to(device)
self.critic2 = self.critic2.to(device)
def eval(self):
self.actor.eval()
self.critic1.eval()
self.critic2.eval()
def train(self):
self.actor.train()
self.critic1.train()
self.critic2.train()
def save(self, path):
actor_path = os.path.join(path, "actor.pt")
critic1_path = os.path.join(path, "critic1.pt")
critic2_path = os.path.join(path, "critic2.pt")
torch.save(self.actor.state_dict(), actor_path)
torch.save(self.critic1.state_dict(), critic1_path)
torch.save(self.critic2.state_dict(), critic2_path)
def load(self, path):
actor_path = os.path.join(path, "actor.pt")
critic1_path = os.path.join(path, "critic1.pt")
critic2_path = os.path.join(path, "critic2.pt")
self.actor.load_state_dict(torch.load(actor_path))
self.critic1.load_state_dict(torch.load(critic1_path))
self.critic2.load_state_dict(torch.load(critic2_path))
def forward(self, state, from_cpu=True):
if from_cpu:
state = self.process_state(state)
self.actor.eval()
with torch.no_grad():
act_dist = self.actor.forward(state)
act = act_dist.mean
self.actor.train()
if from_cpu:
act = self.process_act(act)
return act
def sample_action(self, state, from_cpu=True):
if from_cpu:
state = self.process_state(state)
self.actor.eval()
with torch.no_grad():
act_dist = self.actor.forward(state)
act = act_dist.sample()
self.actor.train()
if from_cpu:
act = self.process_act(act)
return act
def process_state(self, state):
return torch.from_numpy(np.expand_dims(state, 0).astype(np.float32)).to(
self.device
)
def process_act(self, act):
return np.squeeze(act.clamp(-1.0, 1.0).cpu().numpy(), 0)
class SACDAgent(SACAgent):
def __init__(self, obs_space_size, act_space_size):
self.actor = nets.BaselineDiscreteActor(obs_space_size, act_space_size)
self.critic1 = nets.BaselineDiscreteCritic(obs_space_size, act_space_size)
self.critic2 = nets.BaselineDiscreteCritic(obs_space_size, act_space_size)
def forward(self, state):
state = self.process_state(state)
self.actor.eval()
with torch.no_grad():
act_dist = self.actor.forward(state)
act = torch.argmax(act_dist.probs, dim=1)
self.actor.train()
return self.process_act(act)
|
import numpy as np
import torch
def unique(sorted_array):
"""
More efficient implementation of np.unique for sorted arrays
:param sorted_array: (np.ndarray)
:return:(np.ndarray) sorted_array without duplicate elements
"""
if len(sorted_array) == 1:
return sorted_array
left = sorted_array[:-1]
right = sorted_array[1:]
uniques = np.append(right != left, True)
return sorted_array[uniques]
class SegmentTree:
def __init__(self, capacity, operation, neutral_element):
"""
Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array that supports Index arrays, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
:param capacity: (int) Total size of the array - must be a power of two.
:param operation: (lambda (Any, Any): Any) operation for combining elements (eg. sum, max) must form a
mathematical group together with the set of possible values for array elements (i.e. be associative)
:param neutral_element: (Any) neutral element for the operation above. eg. float('-inf') for max and 0 for sum.
"""
assert (
capacity > 0 and capacity & (capacity - 1) == 0
), "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
self.neutral_element = neutral_element
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end),
)
def reduce(self, start=0, end=None):
"""
Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
:param start: (int) beginning of the subsequence
:param end: (int) end of the subsequences
:return: (Any) result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# indexes of the leaf
idxs = idx + self._capacity
self._value[idxs] = val
if isinstance(idxs, int):
idxs = np.array([idxs])
# go up one level in the tree and remove duplicate indexes
idxs = unique(idxs // 2)
while len(idxs) > 1 or idxs[0] > 0:
# as long as there are non-zero indexes, update the corresponding values
self._value[idxs] = self._operation(
self._value[2 * idxs], self._value[2 * idxs + 1]
)
# go up one level in the tree and remove duplicate indexes
idxs = unique(idxs // 2)
def __getitem__(self, idx):
assert np.max(idx) < self._capacity
assert 0 <= np.min(idx)
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=np.add, neutral_element=0.0
)
self._value = np.array(self._value)
def sum(self, start=0, end=None):
"""
Returns arr[start] + ... + arr[end]
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
:return: (Any) reduction of SumSegmentTree
"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""
Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum for each entry in prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
:param prefixsum: (np.ndarray) float upper bounds on the sum of array prefix
:return: (np.ndarray) highest indexes satisfying the prefixsum constraint
"""
if isinstance(prefixsum, float):
prefixsum = np.array([prefixsum])
assert 0 <= np.min(prefixsum)
assert np.max(prefixsum) <= self.sum() + 1e-5
assert isinstance(prefixsum[0], float)
idx = np.ones(len(prefixsum), dtype=int)
cont = np.ones(len(prefixsum), dtype=bool)
while np.any(cont): # while not all nodes are leafs
idx[cont] = 2 * idx[cont]
prefixsum_new = np.where(
self._value[idx] <= prefixsum, prefixsum - self._value[idx], prefixsum
)
# prepare update of prefixsum for all right children
idx = np.where(
np.logical_or(self._value[idx] > prefixsum, np.logical_not(cont)),
idx,
idx + 1,
)
# Select child node for non-leaf nodes
prefixsum = prefixsum_new
# update prefixsum
cont = idx < self._capacity
# collect leafs
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity, operation=np.minimum, neutral_element=float("inf")
)
self._value = np.array(self._value)
def min(self, start=0, end=None):
"""
Returns min(arr[start], ..., arr[end])
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
:return: (Any) reduction of MinSegmentTree
"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBufferStorage:
def __init__(self, size, obs_shape, act_shape, device, obs_dtype=torch.float32):
self.s_dtype = obs_dtype
self.device = device
# buffer arrays
self.s_stack = torch.zeros((size,) + obs_shape, dtype=self.s_dtype, device=device)
self.action_stack = torch.zeros((size,) + act_shape, dtype=torch.float32, device=device)
self.reward_stack = torch.zeros((size, 1), dtype=torch.float32, device=device)
self.s1_stack = torch.zeros((size,) + obs_shape, dtype=self.s_dtype, device=device)
self.done_stack = torch.zeros((size, 1), dtype=torch.int, device=device)
self.obs_shape = obs_shape
self.size = size
self._next_idx = 0
self._max_filled = 0
def __len__(self):
return self._max_filled
def add(self, s, a, r, s_1, d):
# this buffer supports batched experience
if len(s.shape) > len(self.obs_shape):
# there must be a batch dimension
num_samples = len(s)
else:
num_samples = 1
r, d = [r], [d]
if not isinstance(s, torch.Tensor):
# convert states to numpy (checking for LazyFrames)
if not isinstance(s, np.ndarray):
s = np.asarray(s)
if not isinstance(s_1, np.ndarray):
s_1 = np.asarray(s_1)
# convert to torch tensors
s = torch.from_numpy(s)
a = torch.from_numpy(a).float()
r = torch.Tensor(r).float()
s_1 = torch.from_numpy(s_1)
d = torch.Tensor(d).int()
# make sure tensors are floats not doubles
if self.s_dtype is torch.float32:
s = s.float()
s_1 = s_1.float()
s = s.to(self.device)
a = a.to(self.device)
r = r.to(self.device)
s_1 = s_1.to(self.device)
d = d.int().to(self.device)
# Store at end of buffer. Wrap around if past end.
R = np.arange(self._next_idx, self._next_idx + num_samples) % self.size
self.s_stack[R] = s
self.action_stack[R] = a
self.reward_stack[R] = r
self.s1_stack[R] = s_1
self.done_stack[R] = d
# Advance index.
self._max_filled = min(
max(self._next_idx + num_samples, self._max_filled), self.size
)
self._next_idx = (self._next_idx + num_samples) % self.size
return R
def __getitem__(self, indices):
try:
iter(indices)
except ValueError:
raise IndexError(
"ReplayBufferStorage getitem called with indices object that is not iterable"
)
# converting states and actions to float here instead of inside the learning loop
# of each agent seems fine for now.
state = self.s_stack[indices].float()
action = self.action_stack[indices].float()
reward = self.reward_stack[indices]
next_state = self.s1_stack[indices].float()
done = self.done_stack[indices]
return (state, action, reward, next_state, done)
def __setitem__(self, indices, experience):
s, a, r, s1, d = experience
self.s_stack[indices] = s.float()
self.action_stack[indices] = a.float()
self.reward_stack[indices] = r
self.s1_stack[indices] = s1.float()
self.done_stack[indices] = d
def get_all_transitions(self):
return (
self.s_stack[: self._max_filled],
self.action_stack[: self._max_filled],
self.reward_stack[: self._max_filled],
self.s1_stack[: self._max_filled],
self.done_stack[: self._max_filled],
)
class ReplayBuffer:
def __init__(self, size, device, state_shape=None, action_shape=None, state_dtype=float):
self.device=device
self._maxsize = size
self.state_shape = state_shape
self.state_dtype = self._convert_dtype(state_dtype)
self.action_shape = action_shape
self._storage = None
assert self.state_shape, "Must provide shape of state space to ReplayBuffer"
assert self.action_shape, "Must provide shape of action space to ReplayBuffer"
def _convert_dtype(self, dtype):
if dtype in [int, np.uint8, torch.uint8]:
return torch.uint8
elif dtype in [float, np.float32, np.float64, torch.float32, torch.float64]:
return torch.float32
elif dtype in ["int32", np.int32]:
return torch.int32
else:
raise ValueError(f"Uncreocgnized replay buffer dtype: {dtype}")
def __len__(self):
return len(self._storage) if self._storage is not None else 0
def push(self, state, action, reward, next_state, done):
if self._storage is None:
self._storage = ReplayBufferStorage(
self._maxsize,
device=self.device,
obs_shape=self.state_shape,
act_shape=self.action_shape,
obs_dtype=self.state_dtype,
)
return self._storage.add(state, action, reward, next_state, done)
def sample(self, batch_size, get_idxs=False):
random_idxs = torch.randint(len(self._storage), (batch_size,)).to(self.device)
if get_idxs:
return self._storage[random_idxs], random_idxs.cpu().numpy()
else:
return self._storage[random_idxs]
def get_all_transitions(self):
return self._storage.get_all_transitions()
def load_experience(self, s, a, r, s1, d):
assert (
s.shape[0] <= self._maxsize
), "Experience dataset is larger than the buffer."
if len(r.shape) < 2:
r = np.expand_dims(r, 1)
if len(d.shape) < 2:
d = np.expand_dims(d, 1)
self.push(s, a, r, s1, d)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(
self, size, state_shape, action_shape, state_dtype=float, alpha=0.6, beta=1.0
):
super(PrioritizedReplayBuffer, self).__init__(
size, state_shape, action_shape, state_dtype
)
assert alpha >= 0
self.alpha = alpha
self.beta = beta
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def push(self, s, a, r, s_1, d, priorities=None):
R = super().push(s, a, r, s_1, d)
if priorities is None:
priorities = self._max_priority
self._it_sum[R] = priorities ** self.alpha
self._it_min[R] = priorities ** self.alpha
def _sample_proportional(self, batch_size):
mass = []
total = self._it_sum.sum(0, len(self._storage) - 1)
mass = np.random.random(size=batch_size) * total
idx = self._it_sum.find_prefixsum_idx(mass)
return idx
def sample(self, batch_size):
idxes = self._sample_proportional(batch_size)
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-self.beta)
p_sample = self._it_sum[idxes] / self._it_sum.sum()
weights = (p_sample * len(self._storage)) ** (-self.beta) / max_weight
return self._storage[idxes], torch.from_numpy(weights), idxes
def sample_uniform(self, batch_size):
return super().sample(batch_size, get_idxs=True)
def update_priorities(self, idxes, priorities):
assert len(idxes) == len(priorities)
assert np.min(priorities) > 0
assert np.min(idxes) >= 0
assert np.max(idxes) < len(self._storage)
self._it_sum[idxes] = priorities ** self.alpha
self._it_min[idxes] = priorities ** self.alpha
self._max_priority = max(self._max_priority, np.max(priorities))
class MultiPriorityBuffer(ReplayBuffer):
def __init__(
self,
size,
trees,
state_shape,
action_shape,
state_dtype=float,
alpha=0.6,
beta=1.0,
):
super(MultiPriorityBuffer, self).__init__(
size, state_shape, action_shape, state_dtype
)
assert alpha >= 0
self.alpha = alpha
self.beta = beta
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self.sum_trees = [SumSegmentTree(it_capacity) for _ in range(trees)]
self.min_trees = [MinSegmentTree(it_capacity) for _ in range(trees)]
self._max_priority = 1.0
def push(self, s, a, r, s_1, d, priorities=None):
R = super().push(s, a, r, s_1, d)
if priorities is None:
priorities = self._max_priority
for sum_tree in self.sum_trees:
sum_tree[R] = priorities ** self.alpha
for min_tree in self.min_trees:
min_tree[R] = priorities ** self.alpha
def _sample_proportional(self, batch_size, tree_num):
mass = []
total = self.sum_trees[tree_num].sum(0, len(self._storage) - 1)
mass = np.random.random(size=batch_size) * total
idx = self.sum_trees[tree_num].find_prefixsum_idx(mass)
return idx
def sample(self, batch_size, tree_num):
idxes = self._sample_proportional(batch_size, tree_num)
p_min = self.min_trees[tree_num].min() / self.sum_trees[tree_num].sum()
max_weight = (p_min * len(self._storage)) ** (-self.beta)
p_sample = self.sum_trees[tree_num][idxes] / self.sum_trees[tree_num].sum()
weights = (p_sample * len(self._storage)) ** (-self.beta) / max_weight
return self._storage[idxes], torch.from_numpy(weights), idxes
def sample_uniform(self, batch_size):
return super().sample(batch_size, get_idxs=True)
def update_priorities(self, idxes, priorities, tree_num):
assert len(idxes) == len(priorities)
assert np.min(priorities) > 0
assert np.min(idxes) >= 0
assert np.max(idxes) < len(self._storage)
self.sum_trees[tree_num][idxes] = priorities ** self.alpha
self.min_trees[tree_num][idxes] = priorities ** self.alpha
self._max_priority = max(self._max_priority, np.max(priorities))
|
import math
import os
import random
from collections import namedtuple
import gym
import numpy as np
import torch
def clean_hparams_dict(hparams_dict):
return {key: val for key, val in hparams_dict.items() if val}
def get_grad_norm(model):
total_norm = 0.0
for p in model.parameters():
try:
param = p.grad.data
except AttributeError:
continue
else:
param_norm = param.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1.0 / 2)
return total_norm
def torch_and_pad(x):
if not isinstance(x, np.ndarray):
x = np.array(x)
return torch.from_numpy(x.astype(np.float32)).unsqueeze(0)
def mean(lst):
return float(sum(lst)) / len(lst)
def make_process_dirs(run_name, base_path="dc_saves"):
base_dir = os.path.join(base_path, run_name)
i = 0
while os.path.exists(base_dir + f"_{i}"):
i += 1
base_dir += f"_{i}"
os.makedirs(base_dir)
return base_dir
def compute_conv_output(
inp_shape, kernel_size, padding=(0, 0), dilation=(1, 1), stride=(1, 1)
):
"""
Compute the shape of the output of a torch Conv2d layer using
the formula from the docs.
every argument is a tuple corresponding to (height, width), e.g. kernel_size=(3, 4)
"""
height_out = math.floor(
(
(inp_shape[0] + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1)
/ stride[0]
)
+ 1
)
width_out = math.floor(
(
(inp_shape[1] + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1)
/ stride[1]
)
+ 1
)
return height_out, width_out
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
""" This is all from: https://github.com/matthiasplappert/keras-rl/blob/master/rl/random.py """
class AnnealedGaussianProcess:
def __init__(self, mu, sigma, sigma_min, n_steps_annealing):
self.mu = mu
self.sigma = sigma
self.n_steps = 0
if sigma_min is not None:
self.m = -float(sigma - sigma_min) / float(n_steps_annealing)
self.c = sigma
self.sigma_min = sigma_min
else:
self.m = 0.0
self.c = sigma
self.sigma_min = sigma
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c)
return sigma
class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):
def __init__(
self,
theta,
mu=0.0,
sigma=1.0,
dt=1e-2,
x0=None,
size=1,
sigma_min=None,
n_steps_annealing=1000,
):
super(OrnsteinUhlenbeckProcess, self).__init__(
mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing
)
self.theta = theta
self.mu = mu
self.dt = dt
self.x0 = x0
self.size = size
self.reset_states()
def sample(self):
x = (
self.x_prev
+ self.theta * (self.mu - self.x_prev) * self.dt
+ self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
)
self.x_prev = x
self.n_steps += 1
return x
def reset_states(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.size)
class GaussianExplorationNoise:
def __init__(self, size, start_scale=1.0, final_scale=0.1, steps_annealed=1000):
assert start_scale >= final_scale
self.size = size
self.start_scale = start_scale
self.final_scale = final_scale
self.steps_annealed = steps_annealed
self._current_scale = start_scale
self._scale_slope = (start_scale - final_scale) / steps_annealed
def sample(self):
noise = self._current_scale * torch.randn(*self.size)
self._current_scale = max(
self._current_scale - self._scale_slope, self.final_scale
)
return noise.numpy()
def reset_states(self):
pass
|
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import distributions as pyd
from torch import nn
from . import utils
def weight_init(m):
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain("relu")
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
class BigPixelEncoder(nn.Module):
def __init__(self, obs_shape, out_dim=50):
super().__init__()
channels = obs_shape[0]
self.conv1 = nn.Conv2d(channels, 32, kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
output_height, output_width = utils.compute_conv_output(
obs_shape[1:], kernel_size=(3, 3), stride=(2, 2)
)
for _ in range(3):
output_height, output_width = utils.compute_conv_output(
(output_height, output_width), kernel_size=(3, 3), stride=(1, 1)
)
self.fc = nn.Linear(output_height * output_width * 32, out_dim)
self.ln = nn.LayerNorm(out_dim)
self.apply(weight_init)
def forward(self, obs):
obs /= 255.0
x = F.relu(self.conv1(obs))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.ln(x)
state = torch.tanh(x)
return state
class SmallPixelEncoder(nn.Module):
def __init__(self, obs_shape, out_dim=50):
super().__init__()
channels = obs_shape[0]
self.conv1 = nn.Conv2d(channels, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
output_height, output_width = utils.compute_conv_output(
obs_shape[1:], kernel_size=(8, 8), stride=(4, 4)
)
output_height, output_width = utils.compute_conv_output(
(output_height, output_width), kernel_size=(4, 4), stride=(2, 2)
)
output_height, output_width = utils.compute_conv_output(
(output_height, output_width), kernel_size=(3, 3), stride=(1, 1)
)
self.fc = nn.Linear(output_height * output_width * 64, out_dim)
self.apply(weight_init)
def forward(self, obs):
obs /= 255.0
x = F.relu(self.conv1(obs))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
state = self.fc(x)
return state
class StochasticActor(nn.Module):
def __init__(
self,
state_space_size,
act_space_size,
log_std_low=-10,
log_std_high=2,
hidden_size=1024,
dist_impl="pyd",
):
super().__init__()
assert dist_impl in ["pyd", "beta"]
self.fc1 = nn.Linear(state_space_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 2 * act_space_size)
self.log_std_low = log_std_low
self.log_std_high = log_std_high
self.apply(weight_init)
self.dist_impl = dist_impl
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
out = self.fc3(x)
mu, log_std = out.chunk(2, dim=1)
if self.dist_impl == "pyd":
log_std = torch.tanh(log_std)
log_std = self.log_std_low + 0.5 * (
self.log_std_high - self.log_std_low
) * (log_std + 1)
std = log_std.exp()
dist = SquashedNormal(mu, std)
elif self.dist_impl == "beta":
out = 1.0 + F.softplus(out)
alpha, beta = out.chunk(2, dim=1)
dist = BetaDist(alpha, beta)
return dist
class BigCritic(nn.Module):
def __init__(self, state_space_size, act_space_size, hidden_size=1024):
super().__init__()
self.fc1 = nn.Linear(state_space_size + act_space_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
self.apply(weight_init)
def forward(self, state, action):
x = F.relu(self.fc1(torch.cat((state, action), dim=1)))
x = F.relu(self.fc2(x))
out = self.fc3(x)
return out
class BaselineActor(nn.Module):
def __init__(self, state_size, action_size, hidden_size=400):
super().__init__()
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, action_size)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
act = torch.tanh(self.out(x))
return act
class BaselineCritic(nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.fc1 = nn.Linear(state_size + action_size, 400)
self.fc2 = nn.Linear(400, 300)
self.out = nn.Linear(300, 1)
def forward(self, state, action):
x = torch.cat((state, action), dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
val = self.out(x)
return val
class BetaDist(pyd.transformed_distribution.TransformedDistribution):
class _BetaDistTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
def __eq__(self, other):
return isinstance(other, _BetaDistTransform)
def _inverse(self, y):
return (y.clamp(-0.99, 0.99) + 1.0) / 2.0
def _call(self, x):
return (2.0 * x) - 1.0
def log_abs_det_jacobian(self, x, y):
# return log det jacobian |dy/dx| given input and output
return torch.Tensor([math.log(2.0)]).to(x.device)
def __init__(self, alpha, beta):
self.base_dist = pyd.beta.Beta(alpha, beta)
transforms = [self._BetaDistTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.base_dist.mean
for tr in self.transforms:
mu = tr(mu)
return mu
"""
Credit for actor distribution code: https://github.com/denisyarats/pytorch_sac/blob/master/agent/actor.py
"""
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
return self.atanh(y.clamp(-0.99, 0.99))
def log_abs_det_jacobian(self, x, y):
return 2.0 * (math.log(2.0) - x - F.softplus(-2.0 * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
class GracBaselineActor(nn.Module):
def __init__(self, obs_size, action_size):
super().__init__()
self.fc1 = nn.Linear(obs_size, 400)
self.fc2 = nn.Linear(400, 300)
self.fc_mean = nn.Linear(300, action_size)
self.fc_std = nn.Linear(300, action_size)
def forward(self, state, stochastic=False):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
mean = torch.tanh(self.fc_mean(x))
std = F.softplus(self.fc_std(x)) + 1e-3
dist = pyd.Normal(mean, std)
return dist
class BaselineDiscreteActor(nn.Module):
def __init__(self, obs_shape, action_size, hidden_size=300):
super().__init__()
self.fc1 = nn.Linear(obs_shape, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.act_p = nn.Linear(hidden_size, action_size)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
act_p = F.softmax(self.act_p(x), dim=1)
dist = pyd.categorical.Categorical(act_p)
return dist
class BaselineDiscreteCritic(nn.Module):
def __init__(self, obs_shape, action_shape, hidden_size=300):
super().__init__()
self.fc1 = nn.Linear(obs_shape, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, action_shape)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
vals = self.out(x)
return vals
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
# Train batch size: use the training batch in paper.
# Source: https://arxiv.org/pdf/1608.06993.pdf
DEFAULT_TRAIN_BSIZE = 256
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="densenet121", test=test, device=device,
batch_size=batch_size, weights=models.DenseNet121_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
# Ported from pytorch example:
# https://github.com/pytorch/examples/blob/master/dcgan/main.py
from __future__ import print_function
import argparse
import os
import random
from typing import Any, Tuple
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
class DCGAN:
def __init__(self, bench):
# Spatial size of training images. All images will be resized to this
# size using a transformer.
self.image_size = 64
# Number of channels in the training images. For color images this is 3
self.nc = 3
# Size of z latent vector (i.e. size of generator input)
self.nz = 100
# Size of feature maps in generator
self.ngf = 64
# Size of feature maps in discriminator
self.ndf = 64
# Number of training epochs
self.num_epochs = 5
# Learning rate for optimizers
self.lr = 0.0002
# Beta1 hyperparam for Adam optimizers
self.beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
self.ngpu = 1
self.device = bench.device
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Generator(nn.Module):
def __init__(self, dcgan):
super(Generator, self).__init__()
self.ngpu = dcgan.ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( dcgan.nz, dcgan.ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(dcgan.ngf * 8),
nn.ReLU(True),
# state size. (dcgan.ngf*8) x 4 x 4
nn.ConvTranspose2d(dcgan.ngf * 8, dcgan.ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(dcgan.ngf * 4),
nn.ReLU(True),
# state size. (dcgan.ngf*4) x 8 x 8
nn.ConvTranspose2d( dcgan.ngf * 4, dcgan.ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(dcgan.ngf * 2),
nn.ReLU(True),
# state size. (dcgan.ngf*2) x 16 x 16
nn.ConvTranspose2d( dcgan.ngf * 2, dcgan.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(dcgan.ngf),
nn.ReLU(True),
# state size. (dcgan.ngf) x 32 x 32
nn.ConvTranspose2d( dcgan.ngf, dcgan.nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (dcgan.nc) x 64 x 64
)
self.debug_print = False
def forward(self, input):
if self.debug_print:
print(input.shape)
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ncgan):
ngpu = ncgan.ngpu
nc = ncgan.nc
ndf = ncgan.ndf
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 256
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.debug_print = False
self.root = str(Path(__file__).parent)
self.dcgan = DCGAN(self)
dcgan = self.dcgan
device = dcgan.device
ngpu = dcgan.ngpu
nz = dcgan.nz
lr = dcgan.lr
beta1 = dcgan.beta1
num_epochs = dcgan.num_epochs
# Create the generator
self.netG = Generator(dcgan).to(device)
# Handle multi-gpu if desired
if (dcgan.device == 'cuda') and (ngpu > 1):
self.netG = nn.DataParallel(self.netG, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
self.netG.apply(weights_init)
if self.debug_print:
# Print the model
print(self.netG)
# Create the Discriminator
netD = Discriminator(dcgan).to(device)
# Handle multi-gpu if desired
if (dcgan.device == 'cuda') and (ngpu > 1):
netD = nn.DataParallel(self.netD, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netD.apply(weights_init)
if self.debug_print:
# Print the model
print(netD)
# Initialize BCELoss function
self.criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
self.fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
self.real_label = 1.
self.fake_label = 0.
# Random values as surrogate for batch of photos
self.exmaple_inputs = torch.randn(self.batch_size, 3, 64, 64, device=self.device)
self.model = netD
if test == "train":
# Setup Adam optimizers for both G and D
self.optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
self.optimizerG = optim.Adam(self.netG.parameters(), lr=lr, betas=(beta1, 0.999))
elif test == "eval":
# inference would just run descriminator so thats what we'll do too.
self.inference_just_descriminator = True
if False == self.inference_just_descriminator:
self.eval_noise = torch.randn(self.batch_size, nz, 1, 1, device=self.device)
def jit_callback(self):
self.model = torch.jit.trace(self.model,(self.exmaple_inputs,))
if self.test == "eval" and False == self.inference_just_descriminator:
self.netG = torch.jit.trace(self.netG,(self.eval_noise,))
def get_module(self):
return self.model, (self.exmaple_inputs,)
def eval(self):
if False == self.inference_just_descriminator:
# Generate fake image batch with G
self.eval_fake = self.netG(self.eval_noise)
# Since we just updated D, perform another forward pass of all-fake batch through D
output = self.model(self.exmaple_inputs).view(-1)
return (output, )
def train(self):
# Training Loop
# Lists to keep track of progress
img_list = []
iters = 0
dcgan = self.dcgan
device = dcgan.device
num_epochs = dcgan.num_epochs
num_train_batch = 1
lr = dcgan.lr
nz = dcgan.nz
beta1 = dcgan.beta1
netD = self.model
netG = self.netG
criterion = self.criterion
optimizerD = self.optimizerD
optimizerG = self.optimizerG
real_label = self.real_label
fake_label = self.fake_label
benchmark_pic = self.exmaple_inputs
# For each epoch
for epoch in range(num_epochs):
for i in range(num_train_batch):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = benchmark_pic
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch, accumulated (summed) with previous gradients
errD_fake.backward()
D_G_z1 = output.mean().item()
# Compute error of D as sum over the fake and the real batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# This model has TWO optimizers! Try returning both.
def get_optimizer(self):
return (self.optimizerD, self.optimizerG)
# `optimizer` has type Tuple but we want this function to override the parent's
# so keep the name and schema the same.
def set_optimizer(self, optimizer) -> None:
self.optimizerD, self.optimizerG = optimizer
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
from torchbenchmark.tasks import SPEECH
import torch
class Model(HuggingFaceModel):
task = SPEECH.RECOGNITION
DEFAULT_EVAL_BSIZE = 8
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Whisper", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.feature_size = 80
self.sequence_length = 3000
self.input_features = torch.randn(size=(self.batch_size, self.feature_size, self.sequence_length),device=self.device)
self.example_inputs = {"input_features": self.input_features.to(self.device), "input_ids" : self.input_features.to(self.device)}
self.model.to(self.device)
def train(self):
raise NotImplementedError("Training is not implemented.")
def eval(self):
self.model.eval()
with torch.no_grad():
self.model(self.example_inputs["input_ids"])
def enable_fp16_half(self):
self.model.half()
self.example_inputs = {"input_features": self.input_features.half().to(self.device), "input_ids" : self.input_features.half().to(self.device)}
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(__file__))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.SEGMENTATION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="shufflenet_v2_x1_0", test=test, device=device,
batch_size=batch_size, weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
from torchbenchmark.util.framework.gnn.model_factory import BasicGNNModel
from torchbenchmark.tasks import GNN
class Model(BasicGNNModel):
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="gcn", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.util.framework.gnn import install_pytorch_geometric
if __name__ == '__main__':
install_pytorch_geometric()
|
from torchbenchmark.util.framework.gnn.model_factory import BasicGNNModel
from torchbenchmark.tasks import GNN
class Model(BasicGNNModel):
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="gin", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.util.framework.gnn import install_pytorch_geometric
if __name__ == '__main__':
install_pytorch_geometric()
|
import torch
def get_drhodT(salt, temp, p):
rho0 = 1024.0
z0 = 0.0
theta0 = 283.0 - 273.15
grav = 9.81
betaT = 1.67e-4
betaTs = 1e-5
gammas = 1.1e-8
zz = -p - z0
thetas = temp - theta0
return -(betaTs * thetas + betaT * (1 - gammas * grav * zz * rho0)) * rho0
def get_drhodS(salt, temp, p):
betaS = 0.78e-3
rho0 = 1024.0
return betaS * rho0 * torch.ones_like(temp)
def dm_taper(sx):
"""
tapering function for isopycnal slopes
"""
iso_slopec = 1e-3
iso_dslope = 1e-3
return 0.5 * (1.0 + torch.tanh((-torch.abs(sx) + iso_slopec) / iso_dslope))
def isoneutral_diffusion_pre(
maskT,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
salt,
temp,
zt,
K_iso,
K_11,
K_22,
K_33,
Ai_ez,
Ai_nz,
Ai_bx,
Ai_by,
):
"""
Isopycnal diffusion for tracer
following functional formulation by Griffies et al
Code adopted from MOM2.1
"""
epsln = 1e-20
K_iso_steep = 50.0
tau = 0
device = K_11.device
dTdx = torch.zeros_like(K_11)
dSdx = torch.zeros_like(K_11)
dTdy = torch.zeros_like(K_11)
dSdy = torch.zeros_like(K_11)
dTdz = torch.zeros_like(K_11)
dSdz = torch.zeros_like(K_11)
"""
drho_dt and drho_ds at centers of T cells
"""
drdT = maskT * get_drhodT(salt[:, :, :, tau], temp[:, :, :, tau], torch.abs(zt))
drdS = maskT * get_drhodS(salt[:, :, :, tau], temp[:, :, :, tau], torch.abs(zt))
"""
gradients at top face of T cells
"""
dTdz[:, :, :-1] = (
maskW[:, :, :-1]
* (temp[:, :, 1:, tau] - temp[:, :, :-1, tau])
/ dzw[None, None, :-1]
)
dSdz[:, :, :-1] = (
maskW[:, :, :-1]
* (salt[:, :, 1:, tau] - salt[:, :, :-1, tau])
/ dzw[None, None, :-1]
)
"""
gradients at eastern face of T cells
"""
dTdx[:-1, :, :] = (
maskU[:-1, :, :]
* (temp[1:, :, :, tau] - temp[:-1, :, :, tau])
/ (dxu[:-1, None, None] * cost[None, :, None])
)
dSdx[:-1, :, :] = (
maskU[:-1, :, :]
* (salt[1:, :, :, tau] - salt[:-1, :, :, tau])
/ (dxu[:-1, None, None] * cost[None, :, None])
)
"""
gradients at northern face of T cells
"""
dTdy[:, :-1, :] = (
maskV[:, :-1, :]
* (temp[:, 1:, :, tau] - temp[:, :-1, :, tau])
/ dyu[None, :-1, None]
)
dSdy[:, :-1, :] = (
maskV[:, :-1, :]
* (salt[:, 1:, :, tau] - salt[:, :-1, :, tau])
/ dyu[None, :-1, None]
)
"""
Compute Ai_ez and K11 on center of east face of T cell.
"""
diffloc = torch.zeros_like(K_11)
diffloc[1:-2, 2:-2, 1:] = 0.25 * (
K_iso[1:-2, 2:-2, 1:]
+ K_iso[1:-2, 2:-2, :-1]
+ K_iso[2:-1, 2:-2, 1:]
+ K_iso[2:-1, 2:-2, :-1]
)
diffloc[1:-2, 2:-2, 0] = 0.5 * (K_iso[1:-2, 2:-2, 0] + K_iso[2:-1, 2:-2, 0])
sumz = torch.zeros_like(K_11)[1:-2, 2:-2]
for kr in range(2):
ki = 0 if kr == 1 else 1
if kr == 1:
su = K_11.shape[2]
else:
su = K_11.shape[2] - 1
for ip in range(2):
drodxe = (
drdT[1 + ip : -2 + ip, 2:-2, ki:] * dTdx[1:-2, 2:-2, ki:]
+ drdS[1 + ip : -2 + ip, 2:-2, ki:] * dSdx[1:-2, 2:-2, ki:]
)
drodze = (
drdT[1 + ip : -2 + ip, 2:-2, ki:] * dTdz[1 + ip : -2 + ip, 2:-2, :su]
+ drdS[1 + ip : -2 + ip, 2:-2, ki:] * dSdz[1 + ip : -2 + ip, 2:-2, :su]
)
sxe = -drodxe / (
torch.min(drodze, torch.tensor([0.0], device=device)) - epsln
)
taper = dm_taper(sxe)
sumz[:, :, ki:] += (
dzw[None, None, :su]
* maskU[1:-2, 2:-2, ki:]
* torch.max(
torch.tensor([K_iso_steep], device=device),
diffloc[1:-2, 2:-2, ki:] * taper,
)
)
Ai_ez[1:-2, 2:-2, ki:, ip, kr] = taper * sxe * maskU[1:-2, 2:-2, ki:]
K_11[1:-2, 2:-2, :] = sumz / (4.0 * dzt[None, None, :])
"""
Compute Ai_nz and K_22 on center of north face of T cell.
"""
diffloc[...] = 0
diffloc[2:-2, 1:-2, 1:] = 0.25 * (
K_iso[2:-2, 1:-2, 1:]
+ K_iso[2:-2, 1:-2, :-1]
+ K_iso[2:-2, 2:-1, 1:]
+ K_iso[2:-2, 2:-1, :-1]
)
diffloc[2:-2, 1:-2, 0] = 0.5 * (K_iso[2:-2, 1:-2, 0] + K_iso[2:-2, 2:-1, 0])
sumz = torch.zeros_like(K_11)[2:-2, 1:-2]
for kr in range(2):
ki = 0 if kr == 1 else 1
if kr == 1:
su = K_11.shape[2]
else:
su = K_11.shape[2] - 1
for jp in range(2):
drodyn = (
drdT[2:-2, 1 + jp : -2 + jp, ki:] * dTdy[2:-2, 1:-2, ki:]
+ drdS[2:-2, 1 + jp : -2 + jp, ki:] * dSdy[2:-2, 1:-2, ki:]
)
drodzn = (
drdT[2:-2, 1 + jp : -2 + jp, ki:] * dTdz[2:-2, 1 + jp : -2 + jp, :su]
+ drdS[2:-2, 1 + jp : -2 + jp, ki:] * dSdz[2:-2, 1 + jp : -2 + jp, :su]
)
syn = -drodyn / (
torch.min(torch.tensor([0.0], device=device), drodzn) - epsln
)
taper = dm_taper(syn)
sumz[:, :, ki:] += (
dzw[None, None, :su]
* maskV[2:-2, 1:-2, ki:]
* torch.max(
torch.tensor([K_iso_steep], device=device),
diffloc[2:-2, 1:-2, ki:] * taper,
)
)
Ai_nz[2:-2, 1:-2, ki:, jp, kr] = taper * syn * maskV[2:-2, 1:-2, ki:]
K_22[2:-2, 1:-2, :] = sumz / (4.0 * dzt[None, None, :])
"""
compute Ai_bx, Ai_by and K33 on top face of T cell.
"""
sumx = torch.zeros_like(K_11)[2:-2, 2:-2, :-1]
sumy = torch.zeros_like(K_11)[2:-2, 2:-2, :-1]
for kr in range(2):
if kr == 1:
sl = 1
su = K_11.shape[2]
else:
sl = 0
su = K_11.shape[2] - 1
drodzb = (
drdT[2:-2, 2:-2, sl:su] * dTdz[2:-2, 2:-2, :-1]
+ drdS[2:-2, 2:-2, sl:su] * dSdz[2:-2, 2:-2, :-1]
)
# eastward slopes at the top of T cells
for ip in range(2):
drodxb = (
drdT[2:-2, 2:-2, sl:su] * dTdx[1 + ip : -3 + ip, 2:-2, sl:su]
+ drdS[2:-2, 2:-2, sl:su] * dSdx[1 + ip : -3 + ip, 2:-2, sl:su]
)
sxb = -drodxb / (
torch.min(torch.tensor([0.0], device=device), drodzb) - epsln
)
taper = dm_taper(sxb)
sumx += (
dxu[1 + ip : -3 + ip, None, None]
* K_iso[2:-2, 2:-2, :-1]
* taper
* sxb ** 2
* maskW[2:-2, 2:-2, :-1]
)
Ai_bx[2:-2, 2:-2, :-1, ip, kr] = taper * sxb * maskW[2:-2, 2:-2, :-1]
# northward slopes at the top of T cells
for jp in range(2):
facty = cosu[1 + jp : -3 + jp] * dyu[1 + jp : -3 + jp]
drodyb = (
drdT[2:-2, 2:-2, sl:su] * dTdy[2:-2, 1 + jp : -3 + jp, sl:su]
+ drdS[2:-2, 2:-2, sl:su] * dSdy[2:-2, 1 + jp : -3 + jp, sl:su]
)
syb = -drodyb / (
torch.min(torch.tensor([0.0], device=device), drodzb) - epsln
)
taper = dm_taper(syb)
sumy += (
facty[None, :, None]
* K_iso[2:-2, 2:-2, :-1]
* taper
* syb ** 2
* maskW[2:-2, 2:-2, :-1]
)
Ai_by[2:-2, 2:-2, :-1, jp, kr] = taper * syb * maskW[2:-2, 2:-2, :-1]
K_33[2:-2, 2:-2, :-1] = sumx / (4 * dxt[2:-2, None, None]) + sumy / (
4 * dyt[None, 2:-2, None] * cost[None, 2:-2, None]
)
K_33[2:-2, 2:-2, -1] = 0.0
return K_11, K_22, K_33, Ai_ez, Ai_nz, Ai_bx, Ai_by
def prepare_inputs(*inputs, device):
out = [
torch.as_tensor(a, device=device) for a in inputs
]
if device == "gpu":
torch.cuda.synchronize()
return out
def run(*inputs, device="cpu"):
with torch.no_grad():
outputs = isoneutral_diffusion_pre(*inputs)
if device == "gpu":
torch.cuda.synchronize()
return outputs
|
import torch
from . import isoneutral_pytorch
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
from typing import Tuple
def _generate_inputs(size):
import math
import numpy as np
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1 / 3)),
math.ceil(2 * size ** (1 / 3)),
math.ceil(0.25 * size ** (1 / 3)),
)
# masks
maskT, maskU, maskV, maskW = (
(np.random.rand(*shape) < 0.8).astype("float64") for _ in range(4)
)
# 1d arrays
dxt, dxu = (np.random.randn(shape[0]) for _ in range(2))
dyt, dyu = (np.random.randn(shape[1]) for _ in range(2))
dzt, dzw, zt = (np.random.randn(shape[2]) for _ in range(3))
cost, cosu = (np.random.randn(shape[1]) for _ in range(2))
# 3d arrays
K_iso, K_iso_steep, K_11, K_22, K_33 = (np.random.randn(*shape) for _ in range(5))
# 4d arrays
salt, temp = (np.random.randn(*shape, 3) for _ in range(2))
# 5d arrays
Ai_ez, Ai_nz, Ai_bx, Ai_by = (np.zeros((*shape, 2, 2)) for _ in range(4))
return (
maskT,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
salt,
temp,
zt,
K_iso,
K_11,
K_22,
K_33,
Ai_ez,
Ai_nz,
Ai_bx,
Ai_by,
)
class IsoneutralMixing(torch.nn.Module):
def __init__(self):
super(IsoneutralMixing, self).__init__()
def forward(
self,
maskT,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
salt,
temp,
zt,
K_iso,
K_11,
K_22,
K_33,
Ai_ez,
Ai_nz,
Ai_bx,
Ai_by,
):
return isoneutral_pytorch.isoneutral_diffusion_pre(
maskT,
maskU,
maskV,
maskW,
dxt,
dxu,
dyt,
dyu,
dzt,
dzw,
cost,
cosu,
salt,
temp,
zt,
K_iso,
K_11,
K_22,
K_33,
Ai_ez,
Ai_nz,
Ai_bx,
Ai_by,
)
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
# Original input size: [2 ** i for i in range(12, 23, 2)]
# Source: https://github.com/dionhaefner/pyhpc-benchmarks/blob/650ecc650e394df829944ffcf09e9d646ec69691/run.py#L25
# Pick data-point when i = 20, size = 1048576
DEFAULT_EVAL_BSIZE = 1048576
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = IsoneutralMixing().to(device=device)
input_size = self.batch_size
raw_inputs = _generate_inputs(input_size)
if hasattr(isoneutral_pytorch, "prepare_inputs"):
inputs = isoneutral_pytorch.prepare_inputs(*raw_inputs, device=device)
self.example_inputs = inputs
def get_module(self):
return self.model, self.example_inputs
def train(self):
raise NotImplementedError("Training not supported")
def eval(self) -> Tuple[torch.Tensor]:
model, example_inputs = self.get_module()
with torch.no_grad():
out = model(*example_inputs)
return out
|
if __name__ == "__main__":
pass
|
# This example was adapated from https://github.com/muhrin/milad
# It is licensed under the GLPv3 license. You can find a copy of it
# here: https://www.gnu.org/licenses/gpl-3.0.en.html .
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from functorch import vmap, jacrev
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
def make_prediction(model, drs):
norms = torch.linalg.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return F.mse_loss(energies, predicted_energies) + 0.01 * F.mse_loss(forces, predicted_forces) / 3
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 1000
DEFAULT_EVAL_BSIZE = 1000
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1)
)
self.model = self.model.to(device)
r = torch.linspace(0.5, 2 * sigma, steps=self.batch_size)
# Create a bunch of vectors that point along positive-x.
# These are the dummy inputs to the model.
self.drs = torch.outer(r, torch.tensor([1.0, 0, 0])).to(device=device)
# Generate some dummy targets based off of some interpretation of the lennard_jones force.
norms = torch.linalg.norm(self.drs, dim=1).reshape(-1, 1)
self.norms = norms
# Create training energies
self.training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
# Create forces with random direction vectors
self.training_forces = torch.stack([
force * dr for force, dr in zip(map(lennard_jones_force, norms), self.drs)
])
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
def get_module(self):
return self.model, (self.norms, )
def train(self):
model = self.model
optimizer = self.optimizer
model.train()
optimizer.zero_grad()
energies, forces = make_prediction(model, self.drs)
loss = loss_fn(self.training_energies, self.training_forces, energies, forces)
loss.backward()
optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
model.eval()
with torch.no_grad():
out = make_prediction(model, self.drs)
return out
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Bert_large", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
"""
fastNLP model (TorchBenchmark Version)
This model resembles the "BertEmedding Q&A" task in [fastNLP Tutorial](https://fastnlp.readthedocs.io/zh/latest/tutorials/extend_1_bert_embedding.html).
Input data simulates [CMRC2018 dataset](https://ymcui.com/cmrc2018/).
The program runs only for benchmark purposes and doesn't provide correctness results.
"""
import logging
from typing import Tuple
import torch
import random
import inspect
import numpy as np
from fastNLP.embeddings import BertEmbedding
from fastNLP.models import BertForQuestionAnswering
from fastNLP.core.callback import CallbackManager
from fastNLP.core.batch import DataSetIter
from fastNLP.core.losses import CMRC2018Loss
from fastNLP.core.metrics import CMRC2018Metric
from fastNLP.io.pipe.qa import CMRC2018BertPipe
from fastNLP import WarmupCallback, GradientClipCallback
from fastNLP.core.optimizer import AdamW
from fastNLP.core import logger
# Import CMRC2018 data generator
from .cmrc2018_simulator import generate_inputs
from .cmrc2018_simulator import CMRC2018_DIR, CMRC2018_CONFIG_DIR
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
logger.setLevel(logging.WARNING)
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
# Use the train batch size from the original CMRC2018 Q&A task
# Source: https://fastnlp.readthedocs.io/zh/latest/tutorials/extend_1_bert_embedding.html
DEFAULT_TRAIN_BSIZE = 6
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.input_dir = CMRC2018_DIR
# Generate input data files
# FastNLP loader requires both train and eval files, so we need to generate both of them
if test == "train":
generate_inputs(train_batch_size=self.batch_size, eval_batch_size=self.DEFAULT_EVAL_BSIZE)
elif test == "eval":
generate_inputs(train_batch_size=self.DEFAULT_TRAIN_BSIZE, eval_batch_size=self.batch_size)
data_bundle = CMRC2018BertPipe().process_from_file(paths=self.input_dir)
data_bundle.rename_field('chars', 'words')
self.embed = BertEmbedding(data_bundle.get_vocab('words'),
model_dir_or_name=CMRC2018_CONFIG_DIR,
requires_grad=True,
include_cls_sep=False, auto_truncate=True,
dropout=0.5, word_dropout=0.01)
self.model = self._move_model_to_device(BertForQuestionAnswering(self.embed), device=device)
if self._model_contains_inner_module(self.model):
self._forward_func = self.model.module.forward
else:
self._forward_func = self.model.forward
# Do not spawn new processes on small scale of data
self.num_workers = 0
if self.test == "train":
self.model.train()
self.trainer = self.model
self.train_data = data_bundle.get_dataset('train')
self.data = self.train_data
self.losser = CMRC2018Loss()
self.metrics = CMRC2018Metric()
self.update_every = 10
wm_callback = WarmupCallback(schedule='linear')
gc_callback = GradientClipCallback(clip_value=1, clip_type='norm')
callbacks = [wm_callback, gc_callback]
self.optimizer = AdamW(self.model.parameters(), lr=5e-5)
self.callback_manager = CallbackManager(env={"trainer":self}, callbacks=callbacks)
elif self.test == "eval":
self.model.eval()
self.data = data_bundle.get_dataset('dev')
example_inputs = DataSetIter(dataset=self.data,
batch_size=self.batch_size,
sampler=None,
num_workers=self.num_workers, drop_last=False)
self.example_inputs = self._prefetch(example_inputs)
def get_module(self):
batch_x, _batch_y = list(self.example_inputs)[0]
return self.model, (batch_x["words"], )
# Sliced version of fastNLP.Tester._test()
def eval(self) -> Tuple[torch.Tensor]:
self._mode(self.model, is_test=True)
self._predict_func = self.model.forward
with torch.no_grad():
for batch_x, _batch_y in self.example_inputs:
pred_dict = self._data_forward(self._predict_func, batch_x)
# return a tuple of Tensors
return (pred_dict['pred_start'], pred_dict['pred_end'] )
# Sliced version of fastNLP.Trainer._train()
def train(self):
self.step = 0
self.n_epochs = 1
self._mode(self.model, is_test=False)
self.callback_manager.on_train_begin()
for _epoch in range(self.n_epochs):
self.callback_manager.on_epoch_begin()
for batch_x, batch_y in self.example_inputs:
self.step += 1
prediction = self._data_forward(self.model, batch_x)
self.callback_manager.on_loss_begin(batch_y, prediction)
loss = self._compute_loss(prediction, batch_y).mean()
self.callback_manager.on_backward_begin(loss)
self._grad_backward(loss)
self.callback_manager.on_backward_end()
self._update()
self.callback_manager.on_step_end()
self.callback_manager.on_batch_end()
self.callback_manager.on_epoch_end()
self.callback_manager.on_train_end()
def _prefetch(self, example_inputs):
prefetched_data = []
for batch_x, batch_y in example_inputs:
self._move_dict_value_to_device(batch_x, batch_y, device=self.device)
prefetched_data.append((batch_x, batch_y))
return prefetched_data
# Helper functions
def _build_args(self, func, **kwargs):
spect = inspect.getfullargspec(func)
if spect.varkw is not None:
return kwargs
needed_args = set(spect.args)
defaults = []
if spect.defaults is not None:
defaults = [arg for arg in spect.defaults]
start_idx = len(spect.args) - len(defaults)
output = {name: default for name, default in zip(spect.args[start_idx:], defaults)}
output.update({name: val for name, val in kwargs.items() if name in needed_args})
return output
def _move_dict_value_to_device(self, *args, device, non_blocking=False):
for arg in args:
if isinstance(arg, dict):
for key, value in arg.items():
if isinstance(value, torch.Tensor):
arg[key] = value.to(device, non_blocking=non_blocking)
else:
raise TypeError("Only support `dict` type right now.")
def _model_contains_inner_module(self, model):
if isinstance(model, torch.nn.Module):
if isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)):
return True
return False
def _move_model_to_device(self, model, device):
model = model.to(device)
return model
def _mode(self, model, is_test=False):
r"""Train mode or Test mode. This is for PyTorch currently.
:param model: a PyTorch model
:param bool is_test: whether in test mode or not.
"""
if is_test:
model.eval()
else:
model.train()
def _update(self):
r"""Perform weight update on a model.
"""
if self.step % self.update_every == 0:
self.optimizer.step()
def _data_forward(self, network, x):
x = self._build_args(self._forward_func, **x)
y = network(**x)
if not isinstance(y, dict):
raise TypeError(
f"The return value of {_get_func_signature(self._forward_func)} should be dict, got {type(y)}.")
return y
def _grad_backward(self, loss):
r"""Compute gradient with link rules.
:param loss: a scalar where back-prop starts
For PyTorch, just do "loss.backward()"
"""
if (self.step-1) % self.update_every == 0:
self.model.zero_grad()
loss.backward()
def _compute_loss(self, predict, truth):
r"""Compute loss given prediction and ground truth.
:param predict: prediction dict, produced by model.forward
:param truth: ground truth dict, produced by batch_y
:return: a scalar
"""
return self.losser(predict, truth)
def get_optimizer(self):
r"""Gets the optimizer if initiated"""
if hasattr(self, "optimizer"):
return self.optimizer
return None
def set_optimizer(self, optimizer) -> None:
r"""Sets the optimizer regardless of whether it's been initiated"""
self.optimizer = optimizer
|
import subprocess
import os
import sys
import patch
def patch_fastnlp():
import fastNLP
current_dir = os.path.dirname(os.path.abspath(__file__))
patch_file = os.path.join(current_dir, "fastnlp.patch")
fastNLP_dir = os.path.dirname(fastNLP.__file__)
fastNLP_target_file = os.path.join(fastNLP_dir, "embeddings", "bert_embedding.py")
p = patch.fromfile(patch_file)
if not p.apply(strip=1, root=fastNLP_dir):
print("Failed to patch fastNLP. Exit.")
exit(1)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_fastnlp()
|
"""
Generator of a simulated CMRC2018 dataset.
Use random Chinese characters with the same length as the original dataset.
"""
import os
import pathlib
import json
import random
TRAIN_NUM_BATCH = 1
EVAL_NUM_BATCH = 1
CMRC2018_TRAIN_SPEC = {
# Original
# "data_size": 2403,
# Benchmark
"data_size": 6, # placeholder, will be replaced by the true batch size
"title_length": 5,
"paragraph_size": 1,
"context_length": 456,
"qas_size": 5,
"query_length": 15,
"answers_size": 1,
"answers_length": 7
}
CMRC2018_DEV_SPEC = {
# Original
# "data_size": 848,
# Benchmark
"data_size": 1, # placeholder, will be replaced by the true batch size
"title_length": 4,
"paragraph_size": 1,
"context_length": 455,
"qas_size": 4,
"query_length": 15,
"answers_size": 3,
"answers_length": 7
}
CMRC2018_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".data", "cmrc2018-sim")
CMRC2018_CONFIG_DIR = os.path.join(CMRC2018_DIR, "config")
CMRC2018_TRAIN_SIM = os.path.join(CMRC2018_DIR, "train.json")
CMRC2018_DEV_SIM = os.path.join(CMRC2018_DIR, "dev.json")
CMRC2018_VOCAB_SIM = os.path.join(CMRC2018_CONFIG_DIR, "vocab.txt")
CMRC2018_BERT_CONFIG = os.path.join(CMRC2018_CONFIG_DIR, "bert_config.json")
VOCAB_SET = set()
# Generate random Chinese string with length l
def _GBK2312(l):
head = 0xd7
while head == 0xd7:
head = random.randint(0xb0, 0xf7)
body = random.randint(0xa1, 0xfe)
val = f'{head:x} {body:x}'
s = bytes.fromhex(val).decode('gb2312')
VOCAB_SET.add(s)
if l == 0:
return s
else:
return s + _GBK2312(l-1)
def _generate_cmrc2018(spec):
simdata = {}
simdata["version"] = "v1.0-sim"
simdata["data"] = []
for ind in range(spec["data_size"]):
item = {}
para = {}
item["id"] = f"DEV_{ind}"
item["title"] = _GBK2312(spec["title_length"])
item["paragraphs"] = []
para["id"] = item["id"]
para["context"] = _GBK2312(spec["context_length"])
para["qas"] = []
for qind in range(spec["qas_size"]):
q = {}
q["question"] = _GBK2312(spec["query_length"])
q["id"] = f"{item['id']}_QUERY_{qind}"
q["answers"] = []
for ans in range(spec["answers_size"]):
ans = {}
ans["text"] = _GBK2312(spec["answers_length"])
ans["answer_start"] = 0
q["answers"].append(ans)
para["qas"].append(q)
item["paragraphs"].append(para)
simdata["data"].append(item)
return simdata
def _create_dir_if_nonexist(dirpath):
pathlib.Path(dirpath).mkdir(parents=True, exist_ok=True)
def _dump_data(data, path):
with open(path, "w", encoding='utf8') as dp:
json.dump(data, dp, indent=4, ensure_ascii=False)
def _generate_dev(batch_size):
CMRC2018_DEV_SPEC["data_size"] = batch_size * EVAL_NUM_BATCH
dev_data = _generate_cmrc2018(CMRC2018_DEV_SPEC)
_dump_data(dev_data, CMRC2018_DEV_SIM)
def _generate_train(batch_size):
CMRC2018_TRAIN_SPEC["data_size"] = batch_size * TRAIN_NUM_BATCH
dev_data = _generate_cmrc2018(CMRC2018_TRAIN_SPEC)
_dump_data(dev_data, CMRC2018_TRAIN_SIM)
# MUST be called after generate_dev() AND generate_train()!
def _generate_vocab():
never_split = ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"]
VOCAB_SET.update(never_split)
with open(CMRC2018_VOCAB_SIM, "w") as vf:
vf.write("\n".join(list(VOCAB_SET)))
def _copy_bert_config():
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, "bert_config.json"), "r") as configf:
config = configf.read()
with open(CMRC2018_BERT_CONFIG, "w") as configf:
configf.write(config)
def _setup_os_env():
os.environ["TORCHBENCH_FASTNLP_CONFIG_PATH"] = CMRC2018_BERT_CONFIG
def _create_empty_bin():
CMRC2018_CONFIG_DIR = os.path.join(CMRC2018_DIR, "config")
bin_file = os.path.join(CMRC2018_CONFIG_DIR, "chinese_wwm_pytorch.bin")
with open(bin_file, "w") as bf:
bf.write("")
def generate_inputs(train_batch_size, eval_batch_size):
_create_dir_if_nonexist(CMRC2018_DIR)
_create_dir_if_nonexist(os.path.join(CMRC2018_DIR, "config"))
_generate_dev(eval_batch_size)
_generate_train(train_batch_size)
_generate_vocab()
_create_empty_bin()
_copy_bert_config()
_setup_os_env()
|
import torch
from . import eos_pytorch
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
from typing import Tuple
def _generate_inputs(size):
import math
import numpy as np
np.random.seed(17)
shape = (
math.ceil(2 * size ** (1/3)),
math.ceil(2 * size ** (1/3)),
math.ceil(0.25 * size ** (1/3)),
)
s = np.random.uniform(1e-2, 10, size=shape)
t = np.random.uniform(-12, 20, size=shape)
p = np.random.uniform(0, 1000, size=(1, 1, shape[-1]))
return s, t, p
class EquationOfState(torch.nn.Module):
def __init__(self):
super(EquationOfState, self).__init__()
def forward(self, s, t, p):
return eos_pytorch.gsw_dHdT(s, t, p)
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
# Original size: [2 ** i for i in range(12, 23, 2)
# Source: https://github.com/dionhaefner/pyhpc-benchmarks/blob/650ecc650e394df829944ffcf09e9d646ec69691/run.py#L25
# Pick data point: i = 20, size = 1048576
DEFAULT_EVAL_BSIZE = 1048576
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = EquationOfState().to(device=self.device)
input_size = self.batch_size
raw_inputs = _generate_inputs(input_size)
if hasattr(eos_pytorch, "prepare_inputs"):
inputs = eos_pytorch.prepare_inputs(*raw_inputs, device=device)
self.example_inputs = inputs
def get_module(self):
return self.model, self.example_inputs
def train(self):
raise NotImplementedError("Training not supported")
def eval(self) -> Tuple[torch.Tensor]:
model, example_inputs = self.get_module()
with torch.no_grad():
out = model(*example_inputs)
return (out, )
|
"""
==========================================================================
in-situ density, dynamic enthalpy and derivatives
from Absolute Salinity and Conservative
Temperature, using the computationally-efficient 48-term expression for
density in terms of SA, CT and p (IOC et al., 2010).
==========================================================================
"""
import torch
def gsw_dHdT(sa, ct, p):
"""
d/dT of dynamic enthalpy, analytical derivative
sa : Absolute Salinity [g/kg]
ct : Conservative Temperature [deg C]
p : sea pressure [dbar]
"""
v01 = 9.998420897506056e2
v02 = 2.839940833161907e0
v03 = -3.147759265588511e-2
v04 = 1.181805545074306e-3
v05 = -6.698001071123802e0
v06 = -2.986498947203215e-2
v07 = 2.327859407479162e-4
v08 = -3.988822378968490e-2
v09 = 5.095422573880500e-4
v10 = -1.426984671633621e-5
v11 = 1.645039373682922e-7
v12 = -2.233269627352527e-2
v13 = -3.436090079851880e-4
v14 = 3.726050720345733e-6
v15 = -1.806789763745328e-4
v16 = 6.876837219536232e-7
v17 = -3.087032500374211e-7
v18 = -1.988366587925593e-8
v19 = -1.061519070296458e-11
v20 = 1.550932729220080e-10
v21 = 1.0e0
v22 = 2.775927747785646e-3
v23 = -2.349607444135925e-5
v24 = 1.119513357486743e-6
v25 = 6.743689325042773e-10
v26 = -7.521448093615448e-3
v27 = -2.764306979894411e-5
v28 = 1.262937315098546e-7
v29 = 9.527875081696435e-10
v30 = -1.811147201949891e-11
v31 = -3.303308871386421e-5
v32 = 3.801564588876298e-7
v33 = -7.672876869259043e-9
v34 = -4.634182341116144e-11
v35 = 2.681097235569143e-12
v36 = 5.419326551148740e-6
v37 = -2.742185394906099e-5
v38 = -3.212746477974189e-7
v39 = 3.191413910561627e-9
v40 = -1.931012931541776e-12
v41 = -1.105097577149576e-7
v42 = 6.211426728363857e-10
v43 = -1.119011592875110e-10
v44 = -1.941660213148725e-11
v45 = -1.864826425365600e-14
v46 = 1.119522344879478e-14
v47 = -1.200507748551599e-15
v48 = 6.057902487546866e-17
t1 = v45 * ct
t2 = 0.2e1 * t1
t3 = v46 * sa
t4 = 0.5 * v12
t5 = v14 * ct
t7 = ct * (v13 + t5)
t8 = 0.5 * t7
t11 = sa * (v15 + v16 * ct)
t12 = 0.5 * t11
t13 = t4 + t8 + t12
t15 = v19 * ct
t19 = v17 + ct * (v18 + t15) + v20 * sa
t20 = 1.0 / t19
t24 = v47 + v48 * ct
t25 = 0.5 * v13
t26 = 1.0 * t5
t27 = sa * v16
t28 = 0.5 * t27
t29 = t25 + t26 + t28
t33 = t24 * t13
t34 = t19 ** 2
t35 = 1.0 / t34
t37 = v18 + 2.0 * t15
t38 = t35 * t37
t48 = ct * (v44 + t1 + t3)
t57 = v40 * ct
t59 = ct * (v39 + t57)
t64 = t13 ** 2
t68 = t20 * t29
t71 = t24 * t64
t74 = v04 * ct
t76 = ct * (v03 + t74)
t79 = v07 * ct
t82 = torch.sqrt(sa)
t83 = v11 * ct
t85 = ct * (v10 + t83)
t92 = (
v01
+ ct * (v02 + t76)
+ sa * (v05 + ct * (v06 + t79) + t82 * (v08 + ct * (v09 + t85)))
)
t93 = v48 * t92
t105 = (
v02
+ t76
+ ct * (v03 + 2.0 * t74)
+ sa * (v06 + 2.0 * t79 + t82 * (v09 + t85 + ct * (v10 + 2.0 * t83)))
)
t106 = t24 * t105
t107 = v44 + t2 + t3
t110 = v43 + t48
t117 = t24 * t92
t120 = 4.0 * t71 * t20 - t117 - 2.0 * t110 * t13
t123 = (
v38
+ t59
+ ct * (v39 + 2.0 * t57)
+ sa * v42
+ (
4.0 * v48 * t64 * t20
+ 8.0 * t33 * t68
- 4.0 * t71 * t38
- t93
- t106
- 2.0 * t107 * t13
- 2.0 * t110 * t29
)
* t20
- t120 * t35 * t37
)
t128 = t19 * p
t130 = p * (1.0 * v12 + 1.0 * t7 + 1.0 * t11 + t128)
t131 = 1.0 / t92
t133 = 1.0 + t130 * t131
t134 = torch.log(t133)
t143 = v37 + ct * (v38 + t59) + sa * (v41 + v42 * ct) + t120 * t20
t152 = t37 * p
t156 = t92 ** 2
t165 = v25 * ct
t167 = ct * (v24 + t165)
t169 = ct * (v23 + t167)
t175 = v30 * ct
t177 = ct * (v29 + t175)
t179 = ct * (v28 + t177)
t185 = v35 * ct
t187 = ct * (v34 + t185)
t189 = ct * (v33 + t187)
t199 = t13 * t20
t217 = 2.0 * t117 * t199 - t110 * t92
t234 = (
v21
+ ct * (v22 + t169)
+ sa * (v26 + ct * (v27 + t179) + v36 * sa + t82 * (v31 + ct * (v32 + t189)))
+ t217 * t20
)
t241 = t64 - t92 * t19
t242 = torch.sqrt(t241)
t243 = 1.0 / t242
t244 = t4 + t8 + t12 - t242
t245 = 1.0 / t244
t247 = t4 + t8 + t12 + t242 + t128
t248 = 1.0 / t247
t249 = t242 * t245 * t248
t252 = 1.0 + 2.0 * t128 * t249
t253 = torch.log(t252)
t254 = t243 * t253
t259 = t234 * t19 - t143 * t13
t264 = t259 * t20
t272 = 2.0 * t13 * t29 - t105 * t19 - t92 * t37
t282 = t128 * t242
t283 = t244 ** 2
t287 = t243 * t272 / 2.0
t292 = t247 ** 2
t305 = (
0.1e5
* p
* (
v44
+ t2
+ t3
- 2.0 * v48 * t13 * t20
- 2.0 * t24 * t29 * t20
+ 2.0 * t33 * t38
+ 0.5 * v48 * p
)
* t20
- 0.1e5 * p * (v43 + t48 - 2.0 * t33 * t20 + 0.5 * t24 * p) * t38
+ 0.5e4 * t123 * t20 * t134
- 0.5e4 * t143 * t35 * t134 * t37
+ 0.5e4
* t143
* t20
* (p * (1.0 * v13 + 2.0 * t5 + 1.0 * t27 + t152) * t131 - t130 / t156 * t105)
/ t133
+ 0.5e4
* (
(
v22
+ t169
+ ct * (v23 + t167 + ct * (v24 + 2.0 * t165))
+ sa
* (
v27
+ t179
+ ct * (v28 + t177 + ct * (v29 + 2.0 * t175))
+ t82 * (v32 + t189 + ct * (v33 + t187 + ct * (v34 + 2.0 * t185)))
)
+ (
2.0 * t93 * t199
+ 2.0 * t106 * t199
+ 2.0 * t117 * t68
- 2.0 * t117 * t13 * t35 * t37
- t107 * t92
- t110 * t105
)
* t20
- t217 * t35 * t37
)
* t19
+ t234 * t37
- t123 * t13
- t143 * t29
)
* t20
* t254
- 0.5e4 * t259 * t35 * t254 * t37
- 0.25e4 * t264 / t242 / t241 * t253 * t272
+ 0.5e4
* t264
* t243
* (
2.0 * t152 * t249
+ t128 * t243 * t245 * t248 * t272
- 2.0 * t282 / t283 * t248 * (t25 + t26 + t28 - t287)
- 2.0 * t282 * t245 / t292 * (t25 + t26 + t28 + t287 + t152)
)
/ t252
)
return t305
def prepare_inputs(sa, ct, p, device):
out = [
torch.as_tensor(a, device=device)
for a in (sa, ct, p)
]
if device == "gpu":
torch.cuda.synchronize()
return out
def run(sa, ct, p, device="cpu"):
with torch.no_grad():
out = gsw_dHdT(sa, ct, p)
if device == "gpu":
torch.cuda.synchronize()
return out
|
if __name__ == "__main__":
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.