python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import logging
import os
import torch
import torch.utils.data as TD
import torchvision
import torchvision.transforms as transforms
from utils.comm import get_world_size
from . import dataset as D
from . import samplers
from .transforms import build_transforms
from torchvision import datasets as tv_datasets
from .dataset.utils.config_args import config_tsv_dataset_args
def build_dataset(cfg, is_train=True):
"""
Arguments:
cfg: config file.
is_train (bool): whether to setup the dataset for training or testing
"""
datasets = []
for dataset_name in cfg.DATA.TRAIN if is_train else cfg.DATA.TEST:
if dataset_name.endswith('.yaml'):
args, tsv_dataset_name = config_tsv_dataset_args(
cfg, dataset_name
)
img_transforms = build_transforms(cfg, is_train)
args["transforms"] = img_transforms
dataset = getattr(D, tsv_dataset_name)(**args)
elif dataset_name == "imagenet":
if is_train:
datapath = os.path.join(cfg.DATA.PATH, 'train.zip')
data_map = os.path.join(cfg.DATA.PATH, 'train_map.txt')
else:
datapath = os.path.join(cfg.DATA.PATH, 'val.zip')
data_map = os.path.join(cfg.DATA.PATH, 'val_map.txt')
dataset = D.ZipData(
datapath, data_map,
build_transforms(cfg, is_train)
)
elif dataset_name == "imagenet-draco":
if is_train:
datapath = os.path.join(cfg.DATA.PATH, 'train-jpeg')
else:
datapath = os.path.join(cfg.DATA.PATH, 'val-jpeg')
dataset = tv_datasets.ImageFolder(datapath, transform=build_transforms(cfg, is_train))
elif dataset_name == "mnist":
dataset = torchvision.datasets.MNIST(
root=cfg.DATA.PATH, train=is_train, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
)
elif dataset_name == "cifar":
if is_train:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
dataset = torchvision.datasets.CIFAR10(
root=cfg.DATA.PATH, train=True, download=True,
transform=transform_train
)
else:
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
dataset = torchvision.datasets.CIFAR10(
root=cfg.DATA.PATH, train=False, download=True,
transform=transform_test
)
elif dataset_name == "cifar100":
if is_train:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
dataset = torchvision.datasets.CIFAR100(
root=cfg.DATA.PATH, train=True, download=True,
transform=transform_train
)
else:
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
dataset = torchvision.datasets.CIFAR100(
root=cfg.DATA.PATH, train=False, download=True,
transform=transform_test
)
else:
raise ValueError("Unimplemented dataset: {}".format(dataset_name))
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = TD.dataset.ConcatDataset(datasets)
return [dataset]
def make_data_sampler(dataset, shuffle, distributed, is_train, cfg):
if distributed:
if cfg.AUG.REPEATED_AUG and is_train:
logging.info('=> using repeated aug sampler')
return samplers.RASampler(dataset, shuffle=shuffle)
else:
return torch.utils.data.distributed.DistributedSampler(
dataset, rank=torch.distributed.get_rank(), shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_epoch_data_loader(cfg, is_train=True,
drop_last=True, is_distributed=False, start_iter=0):
datasets = build_dataset(cfg, is_train)
num_gpus = get_world_size()
images_per_batch = cfg.DATALOADER.BSZ
assert (
images_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
logger = logging.getLogger(__name__)
logger.info("Experiment with {} images per GPU".format(images_per_gpu))
if is_train:
shuffle = True
else:
shuffle = False if not is_distributed else True
data_loaders = []
for i, dataset in enumerate(datasets):
sampler = make_data_sampler(dataset, shuffle, is_distributed, is_train, cfg)
# default collator works!
num_workers = cfg.DATALOADER.WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
sampler=sampler,
batch_size=images_per_gpu,
drop_last=drop_last,
pin_memory=True,
)
data_loaders.append(data_loader)
if is_train:
# during training, a single (possibly concatenated) data_loader is returned
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
| transformer-ls-master | imagenet/dat/loader.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import os
import base64
from io import BytesIO
import json
from PIL import Image
import torch.utils.data as data
from .utils.tsv_file import TSVFile
from .utils.load_files import load_linelist_file, load_from_yaml_file
from .utils.load_files import find_file_path_in_yaml, load_labelmap_file
class TSVDataset(data.Dataset):
def __init__(self, img_file, label_file=None, hw_file=None, linelist_file=None,
labelmap_file=None, transforms=None):
"""
Dataset Constructor from TSV Files
Input:
img_file: Image file with image key and base64 encoded image str.
label_file: An optional label file with image key and label information.
A label_file is required for training and optional for testing.
hw_file: An optional file with image key and image height/width info.
linelist_file: An optional file with a list of line indexes to load samples.
It is useful to select a subset of samples or duplicate samples.
"""
self.img_file = img_file
self.label_file = label_file
self.hw_file = hw_file
self.linelist_file = linelist_file
self.labelmap_file = labelmap_file
self.transforms = transforms
self.img_tsv = TSVFile(img_file)
self.label_tsv = None if label_file is None else TSVFile(label_file)
self.hw_tsv = None if hw_file is None else TSVFile(hw_file)
self.line_list = load_linelist_file(linelist_file)
self.labelmap = load_labelmap_file(labelmap_file)
def __len__(self):
if self.line_list is None:
return self.img_tsv.num_rows()
else:
return len(self.line_list)
def __getitem__(self, idx):
img = self.get_image(idx)
annotations = self.get_annotations(idx)
target = self.get_target_from_annotations(annotations)
img, target = self.apply_transforms(img, target)
return img, target, idx
def get_image(self, idx):
line_no = self.get_line_no(idx)
row = self.img_tsv.seek(line_no)
# use -1 to support old format with multiple columns.
img = Image.open(BytesIO(base64.b64decode(row[-1])))
return img.convert('RGB')
def get_line_no(self, idx):
return idx if self.line_list is None else self.line_list[idx]
def get_annotations(self, idx):
line_no = self.get_line_no(idx)
if self.label_tsv is not None:
row = self.label_tsv.seek(line_no)
else:
row = self.img_tsv.seek(line_no)
anno = row[1]
if anno.startswith('{') or anno.startswith('['):
annotations = json.loads(anno)
else:
annotations = anno
return annotations
def get_target_from_annotations(self, annotations):
# This function will be overwritten by each dataset to
# decode the labels to specific formats for each task.
if self.labelmap is not None and type(annotations)==str:
annotations = self.labelmap[annotations]
return int(annotations)
def apply_transforms(self, image, target=None):
# This function will be overwritten by each dataset to
# apply transforms to image and targets.
if self.transforms is not None:
image = self.transforms(image)
return image, target
def get_img_key(self, idx):
line_no = self.get_line_no(idx)
# based on the overhead of reading each row.
if self.hw_tsv:
return self.hw_tsv.seek(line_no)[0]
elif self.label_tsv:
return self.label_tsv.seek(line_no)[0]
else:
return self.img_tsv.seek(line_no)[0]
class TSVYamlDataset(TSVDataset):
""" TSVDataset taking a Yaml file for easy function call
"""
def __init__(self, yaml_file, transforms=None):
self.cfg = load_from_yaml_file(yaml_file)
self.root = os.path.dirname(yaml_file)
img_file = find_file_path_in_yaml(self.cfg['img'], self.root)
label_file = find_file_path_in_yaml(self.cfg.get('label', None),
self.root)
hw_file = find_file_path_in_yaml(self.cfg.get('hw', None), self.root)
linelist_file = find_file_path_in_yaml(self.cfg.get('linelist', None),
self.root)
labelmap_file = find_file_path_in_yaml(self.cfg.get("labelmap", None),
self.root)
super(TSVYamlDataset, self).__init__(
img_file, label_file, hw_file, linelist_file, labelmap_file,
transforms=transforms
) | transformer-ls-master | imagenet/dat/dataset/tsv_dataset.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import base64
from io import BytesIO
import json
from PIL import Image
from .tsv_dataset import TSVYamlDataset
class ClsTsvDataset(TSVYamlDataset):
"""
Generic TSV dataset format for Classification.
"""
def __init__(self, yaml_file, transforms=None, **kwargs):
super(ClsTsvDataset, self).__init__(yaml_file, transforms=transforms)
assert self.label_tsv is None
def __getitem__(self, idx):
line_no = self.get_line_no(idx)
row = self.img_tsv.seek(line_no)
# get image
# use -1 to support old format with multiple columns.
img = Image.open(BytesIO(base64.b64decode(row[-1])))
img = img.convert('RGB')
# get target
annotations = json.loads(row[1])
target = annotations[0]['class']
if self.labelmap is not None:
target = self.labelmap[target]
img, target = self.apply_transforms(img, int(target))
return img, target, idx
| transformer-ls-master | imagenet/dat/dataset/cls_tsv.py |
"""
This file is from https://github.com/microsoft/vision-longformer
"""
import os.path as op
from zipfile import ZipFile, BadZipFile
import torch.utils.data as data
from PIL import Image
from io import BytesIO
import multiprocessing
_VALID_IMAGE_TYPES = ['.jpg', '.jpeg', '.tiff', '.bmp', '.png']
class ZipData(data.Dataset):
_IGNORE_ATTRS = {'_zip_file'}
def __init__(self, path, map_file,
transform=None, target_transform=None,
extensions=None):
self._path = path
if not extensions:
extensions = _VALID_IMAGE_TYPES
self._zip_file = ZipFile(path)
self.zip_dict = {}
self.samples = []
self.transform = transform
self.target_transform = target_transform
self.class_to_idx = {}
with open(map_file, 'r') as f:
for line in iter(f.readline, ""):
line = line.strip()
if not line:
continue
cls_idx = [l for l in line.split('\t') if l]
if not cls_idx:
continue
assert len(cls_idx) >= 2, "invalid line: {}".format(line)
idx = int(cls_idx[1])
cls = cls_idx[0]
del cls_idx
at_idx = cls.find('@')
assert at_idx >= 0, "invalid class: {}".format(cls)
cls = cls[at_idx + 1:]
if cls.startswith('/'):
# Python ZipFile expects no root
cls = cls[1:]
assert cls, "invalid class in line {}".format(line)
prev_idx = self.class_to_idx.get(cls)
assert prev_idx is None or prev_idx == idx, "class: {} idx: {} previously had idx: {}".format(
cls, idx, prev_idx
)
self.class_to_idx[cls] = idx
for fst in self._zip_file.infolist():
fname = fst.filename
target = self.class_to_idx.get(fname)
if target is None:
continue
if fname.endswith('/') or fname.startswith('.') or fst.file_size == 0:
continue
ext = op.splitext(fname)[1].lower()
if ext in extensions:
self.samples.append((fname, target))
assert len(self), "No images found in: {} with map: {}".format(self._path, map_file)
def __repr__(self):
return 'ZipData({}, size={})'.format(self._path, len(self))
def __getstate__(self):
return {
key: val if key not in self._IGNORE_ATTRS else None
for key, val in self.__dict__.iteritems()
}
def __getitem__(self, index):
proc = multiprocessing.current_process()
pid = proc.pid # get pid of this process.
if pid not in self.zip_dict:
self.zip_dict[pid] = ZipFile(self._path)
zip_file = self.zip_dict[pid]
if index >= len(self) or index < 0:
raise KeyError("{} is invalid".format(index))
path, target = self.samples[index]
try:
sample = Image.open(BytesIO(zip_file.read(path))).convert('RGB')
except BadZipFile:
print("bad zip file")
return None, None
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
| transformer-ls-master | imagenet/dat/dataset/zipdata.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
from .tsv_dataset import TSVDataset, TSVYamlDataset
from .zipdata import ZipData
from .cls_tsv import ClsTsvDataset
__all__ = [
"TSVDataset",
"TSVYamlDataset",
"ZipData",
"ClsTsvDataset",
] | transformer-ls-master | imagenet/dat/dataset/__init__.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import os.path as op
def config_tsv_dataset_args(cfg, dataset_file):
full_yaml_file = op.join(cfg.DATA.PATH, dataset_file)
assert op.isfile(full_yaml_file)
args = dict(
yaml_file=full_yaml_file,
)
tsv_dataset_name = "TSVYamlDataset"
if 'imagenet_22k' in dataset_file:
tsv_dataset_name = "ClsTsvDataset"
return args, tsv_dataset_name
| transformer-ls-master | imagenet/dat/dataset/utils/config_args.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import base64
import json
import os
import os.path as op
import cv2
import numpy as np
from tqdm import tqdm
from utils.miscellaneous import mkdir
from .tsv_file import TSVFile
def img_from_base64(imagestring):
try:
jpgbytestring = base64.b64decode(imagestring)
nparr = np.frombuffer(jpgbytestring, np.uint8)
r = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return r
except ValueError:
return None
def load_linelist_file(linelist_file):
if linelist_file is not None:
line_list = []
with open(linelist_file, 'r') as fp:
for i in fp:
line_list.append(int(i.strip()))
return line_list
def tsv_writer(values, tsv_file, sep='\t'):
mkdir(op.dirname(tsv_file))
lineidx_file = op.splitext(tsv_file)[0] + '.lineidx'
idx = 0
tsv_file_tmp = tsv_file + '.tmp'
lineidx_file_tmp = lineidx_file + '.tmp'
with open(tsv_file_tmp, 'w') as fp, open(lineidx_file_tmp, 'w') as fpidx:
assert values is not None
for value in values:
assert value is not None
# this step makes sure python2 and python3 encoded img string are the same.
# for python2 encoded image string, it is a str class starts with "/".
# for python3 encoded image string, it is a bytes class starts with "b'/".
# v.decode('utf-8') converts bytes to str so the content is the same.
# v.decode('utf-8') should only be applied to bytes class type.
value = [v if type(v)!=bytes else v.decode('utf-8') for v in value]
v = '{0}\n'.format(sep.join(map(str, value)))
fp.write(v)
fpidx.write(str(idx) + '\n')
idx = idx + len(v)
os.rename(tsv_file_tmp, tsv_file)
os.rename(lineidx_file_tmp, lineidx_file)
def tsv_reader(tsv_file, sep='\t'):
with open(tsv_file, 'r') as fp:
for i, line in enumerate(fp):
yield [x.strip() for x in line.split(sep)]
def config_save_file(tsv_file, save_file=None, append_str='.new.tsv'):
if save_file is not None:
return save_file
return op.splitext(tsv_file)[0] + append_str
def get_line_list(linelist_file=None, num_rows=None):
if linelist_file is not None:
return load_linelist_file(linelist_file)
if num_rows is not None:
return [i for i in range(num_rows)]
def generate_hw_file(img_file, save_file=None):
rows = tsv_reader(img_file)
def gen_rows():
for i, row in tqdm(enumerate(rows)):
row1 = [row[0]]
img = img_from_base64(row[-1])
height = img.shape[0]
width = img.shape[1]
row1.append(json.dumps([{"height":height, "width": width}]))
yield row1
save_file = config_save_file(img_file, save_file, '.hw.tsv')
tsv_writer(gen_rows(), save_file)
def generate_labelmap_file(label_file, save_file=None):
rows = tsv_reader(label_file)
labelmap = []
for i, row in enumerate(rows):
labelmap.extend(set([rect['class'] for rect in json.loads(row[1])]))
labelmap = sorted(list(set(labelmap)))
save_file = config_save_file(label_file, save_file, '.labelmap.tsv')
with open(save_file, 'w') as f:
f.write('\n'.join(labelmap))
def extract_column(tsv_file, col=1, save_file=None):
rows = tsv_reader(tsv_file)
def gen_rows():
for i, row in enumerate(rows):
row1 = [row[0], row[col]]
yield row1
save_file = config_save_file(tsv_file, save_file, '.col.{}.tsv'.format(col))
tsv_writer(gen_rows(), save_file)
def remove_column(tsv_file, col=1, save_file=None):
rows = tsv_reader(tsv_file)
def gen_rows():
for i, row in enumerate(rows):
del row[col]
yield row
save_file = config_save_file(tsv_file, save_file, '.remove.{}.tsv'.format(col))
tsv_writer(gen_rows(), save_file)
def generate_linelist_file(label_file, save_file=None, ignore_attrs=()):
# generate a list of image that has labels
# images with only ignore labels are not selected.
line_list = []
rows = tsv_reader(label_file)
for i, row in tqdm(enumerate(rows)):
labels = json.loads(row[1])
if labels:
if ignore_attrs and all([any([lab[attr] for attr in ignore_attrs if attr in lab]) \
for lab in labels]):
continue
line_list.append([i])
save_file = config_save_file(label_file, save_file, '.linelist.tsv')
tsv_writer(line_list, save_file)
def random_drop_labels(label_file, drop_ratio, linelist_file=None,
save_file=None, drop_image=False):
# randomly drop labels by the ratio
# if drop_image is true, can drop an image by removing all labels
# otherwise will keep at least one label for each image to make sure
# the number of images is equal
rows = tsv_reader(label_file)
line_list = get_line_list(linelist_file)
rows_new = []
cnt_original = 0
cnt_new = 0
for i, row in enumerate(rows):
if line_list and (i not in line_list):
row_new = [row[0], json.dumps([])]
else:
labels = json.loads(row[1])
if len(labels) == 0:
labels_new = []
else:
rand = np.random.random(len(labels))
labels_new = [obj for j, obj in enumerate(labels) if rand[j]>=drop_ratio]
if not drop_image and not labels_new:
# make sure there is at least one label if drop image is not allowed
labels_new = [labels[0]]
cnt_original += len(labels)
cnt_new += len(labels_new)
row_new = [row[0], json.dumps(labels_new)]
rows_new.append(row_new)
save_file = config_save_file(label_file, save_file, '.drop.{}.tsv'.format(drop_ratio))
tsv_writer(rows_new, save_file)
print("original labels = {}".format(cnt_original))
print("new labels = {}".format(cnt_new))
print("given drop_ratio = {}".format(drop_ratio))
print("real drop_ratio = {}".format(float(cnt_original - cnt_new) / cnt_original))
def merge_two_label_files(label_file1, label_file2, save_file=None):
rows1 = tsv_reader(label_file1)
rows2 = tsv_reader(label_file2)
rows_new = []
for row1, row2 in zip(rows1, rows2):
assert row1[0] == row2[0]
labels = json.loads(row1[1]) + json.loads(row2[1])
rows_new.append([row1[0], json.dumps(labels)])
save_file = config_save_file(label_file1, save_file, '.merge.tsv')
tsv_writer(rows_new, save_file)
def is_same_keys_for_files(tsv_file1, tsv_file2, linelist_file1=None,
linelist_file2=None):
# check if two files have the same keys for all rows
tsv1 = TSVFile(tsv_file1)
tsv2 = TSVFile(tsv_file2)
line_list1 = get_line_list(linelist_file1, tsv1.num_rows())
line_list2 = get_line_list(linelist_file2, tsv2.num_rows())
assert len(line_list1) == len(line_list2)
for idx1, idx2 in zip(line_list1, line_list2):
row1 = tsv1.seek(idx1)
row2 = tsv2.seek(idx2)
if row1[0] == row2[0]:
continue
else:
print("key mismatch {}-{}".format(row1[0], row2[0]))
return False
return True
def sort_file_based_on_keys(ref_file, tsv_file, save_file=None):
# sort tsv_file to have the same key in each row as ref_file
if is_same_keys_for_files(ref_file, tsv_file):
print("file keys are the same, skip sorting")
return tsv_file
ref_keys = [row[0] for row in tsv_reader(ref_file)]
all_keys = [row[0] for row in tsv_reader(tsv_file)]
indexes = [all_keys.index(key) for key in ref_keys]
tsv = TSVFile(tsv_file)
def gen_rows():
for idx in indexes:
yield tsv.seek(idx)
save_file = config_save_file(tsv_file, save_file, '.sorted.tsv')
tsv_writer(gen_rows(), save_file)
def reorder_tsv_keys(in_tsv_file, ordered_keys, out_tsv_file):
tsv = TSVFile(in_tsv_file)
keys = [tsv.seek(i)[0] for i in tqdm(range(len(tsv)))]
key_to_idx = {key: i for i, key in enumerate(keys)}
def gen_rows():
for key in tqdm(ordered_keys):
idx = key_to_idx[key]
yield tsv.seek(idx)
tsv_writer(gen_rows(), out_tsv_file)
def reorder_tsv_keys_with_file(in_tsv_file, ref_tsv_file, out_tsv_file):
ordered_keys = [row[0] for row in tsv_reader(ref_tsv_file)]
reorder_tsv_keys(in_tsv_file, ordered_keys, out_tsv_file)
def convert_caption_json_to_tsv(caption_json_file, key_tsv_file, out_tsv_file):
keys = [row[0] for row in tsv_reader(key_tsv_file)]
rows_dict = {key : [] for key in keys}
with open(caption_json_file, 'r') as f:
captions = json.load(f)
for cap in captions:
image_id = cap['image_id']
del cap['image_id']
if image_id in rows_dict:
rows_dict[image_id].append(cap)
rows = [[key, json.dumps(rows_dict[key])] for key in keys]
tsv_writer(rows, out_tsv_file)
def merge_label_fields(in_tsv1, in_tsv2, out_tsv):
# merge the label fields for each box
def gen_rows():
for row1, row2 in tqdm(zip(tsv_reader(in_tsv1), tsv_reader(in_tsv2))):
assert row1[0] == row2[0]
label_info1 = json.loads(row1[1])
label_info2 = json.loads(row2[1])
assert len(label_info1) == len(label_info2)
for lab1, lab2 in zip(label_info1, label_info2):
lab1.update(lab2)
yield [row1[0], json.dumps(label_info1)]
tsv_writer(gen_rows(), out_tsv)
def remove_label_fields(in_tsv, out_tsv, remove_fields):
if type(remove_fields) == str:
remove_fields = [remove_fields]
assert type(remove_fields) == list
def gen_rows():
for row in tqdm(tsv_reader(in_tsv)):
label_info = json.loads(row[1])
for lab in label_info:
for field in remove_fields:
if field in lab:
del lab[field]
yield [row[0], json.dumps(label_info)]
tsv_writer(gen_rows(), out_tsv)
def random_permute_label_file(in_tsv, out_tsv):
# take a label file as input and randomly match image
# with the label from a different image
tsv = TSVFile(in_tsv)
random_index = np.random.permutation(tsv.num_rows())
def gen_rows():
for idx, rand_idx in enumerate(random_index):
key = tsv.seek(idx)[0]
labels = tsv.seek(rand_idx)[1]
yield [key, labels]
tsv_writer(gen_rows(), out_tsv)
# save the random index for reference
save_file = op.splitext(out_tsv)[0] + '.random_index.tsv'
with open(save_file, 'w') as f:
f.write('\n'.join([str(idx) for idx in random_index]))
| transformer-ls-master | imagenet/dat/dataset/utils/tsv_file_ops.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import os
import os.path as op
import errno
import yaml
from collections import OrderedDict
def load_labelmap_file(labelmap_file):
label_dict = None
if labelmap_file is not None and op.isfile(labelmap_file):
label_dict = OrderedDict()
with open(labelmap_file, 'r') as fp:
for line in fp:
item = line.strip().split('\t')
label = item[0]
if label in label_dict:
raise ValueError("Duplicate label " + label + " in labelmap.")
else:
if len(item) >= 2:
label_dict[label] = int(item[1])
else:
label_dict[label] = len(label_dict)
return label_dict
def config_dataset_file(data_dir, dataset_file):
if dataset_file:
if op.isfile(dataset_file):
dataset_file = dataset_file
elif op.isfile(op.join(data_dir, dataset_file)):
dataset_file = op.join(data_dir, dataset_file)
else:
raise ValueError("cannot find file: {}".format(dataset_file))
return dataset_file
def load_linelist_file(linelist_file):
if linelist_file is not None:
line_list = []
with open(linelist_file, 'r') as fp:
for i in fp:
line_list.append(int(i.strip()))
return line_list
def load_box_linelist_file(linelist_file):
if linelist_file is not None:
img_line_list = []
box_line_list = []
with open(linelist_file, 'r') as fp:
for i in fp:
idx = [int(_) for _ in i.strip().split('\t')]
img_line_list.append(idx[0])
box_line_list.append(idx[1])
return [img_line_list, box_line_list]
def load_from_yaml_file(yaml_file):
with open(yaml_file, 'r') as fp:
return yaml.load(fp, Loader=yaml.CLoader)
def find_file_path_in_yaml(fname, root):
if fname is not None:
if op.isfile(fname):
return fname
elif op.isfile(op.join(root, fname)):
return op.join(root, fname)
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), op.join(root, fname)
)
| transformer-ls-master | imagenet/dat/dataset/utils/load_files.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
import logging
import os
import os.path as op
def create_lineidx(filein, idxout):
idxout_tmp = idxout + '.tmp'
with open(filein, 'r') as tsvin, open(idxout_tmp,'w') as tsvout:
fsize = os.fstat(tsvin.fileno()).st_size
fpos = 0
while fpos!=fsize:
tsvout.write(str(fpos)+"\n")
tsvin.readline()
fpos = tsvin.tell()
os.rename(idxout_tmp, idxout)
def read_to_character(fp, c):
result = []
while True:
s = fp.read(32)
assert s != ''
if c in s:
result.append(s[: s.index(c)])
break
else:
result.append(s)
return ''.join(result)
class TSVFile(object):
def __init__(self, tsv_file, generate_lineidx=False):
self.tsv_file = tsv_file
self.lineidx = op.splitext(tsv_file)[0] + '.lineidx'
self._fp = None
self._lineidx = None
# the process always keeps the process which opens the file.
# If the pid is not equal to the currrent pid, we will re-open the file.
self.pid = None
# generate lineidx if not exist
if not op.isfile(self.lineidx) and generate_lineidx:
create_lineidx(self.tsv_file, self.lineidx)
def __del__(self):
if self._fp:
self._fp.close()
def __str__(self):
return "TSVFile(tsv_file='{}')".format(self.tsv_file)
def __repr__(self):
return str(self)
def num_rows(self):
self._ensure_lineidx_loaded()
return len(self._lineidx)
def seek(self, idx):
self._ensure_tsv_opened()
self._ensure_lineidx_loaded()
try:
pos = self._lineidx[idx]
except:
logging.info('{}-{}'.format(self.tsv_file, idx))
raise
self._fp.seek(pos)
return [s.strip() for s in self._fp.readline().split('\t')]
def seek_first_column(self, idx):
self._ensure_tsv_opened()
self._ensure_lineidx_loaded()
pos = self._lineidx[idx]
self._fp.seek(pos)
return read_to_character(self._fp, '\t')
def get_key(self, idx):
return self.seek_first_column(idx)
def __getitem__(self, index):
return self.seek(index)
def __len__(self):
return self.num_rows()
def _ensure_lineidx_loaded(self):
if self._lineidx is None:
# logging.info('loading lineidx: {}'.format(self.lineidx))
with open(self.lineidx, 'r') as fp:
self._lineidx = [int(i.strip()) for i in fp.readlines()]
def _ensure_tsv_opened(self):
if self._fp is None:
self._fp = open(self.tsv_file, 'r')
self.pid = os.getpid()
if self.pid != os.getpid():
logging.info('re-open {} because the process id changed'.format(self.tsv_file))
self._fp = open(self.tsv_file, 'r')
self.pid = os.getpid()
| transformer-ls-master | imagenet/dat/dataset/utils/tsv_file.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from timm.data import create_transform
from PIL import ImageFilter
import logging
import random
import torchvision.transforms as T
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def get_resolution(original_resolution):
"""Takes (H,W) and returns (precrop, crop)."""
area = original_resolution[0] * original_resolution[1]
return (160, 128) if area < 96*96 else (512, 480)
def build_transforms(cfg, is_train=True):
if cfg.MODEL.ARCH.startswith('inception'):
assert cfg.INPUT.IMAGE_SIZE == 299, "Invalid image size for Inception models!"
if cfg.AUG.TIMM_AUG.USE_TRANSFORM and is_train:
logging.info('=> use timm transform for training')
timm_cfg = cfg.AUG.TIMM_AUG
transforms = create_transform(
input_size=cfg.INPUT.IMAGE_SIZE,
is_training=True,
use_prefetcher=False,
no_aug=False,
re_prob=timm_cfg.RE_PROB,
re_mode=timm_cfg.RE_MODE,
re_count=timm_cfg.RE_COUNT,
scale=cfg.AUG.SCALE,
ratio=cfg.AUG.RATIO,
hflip=timm_cfg.HFLIP,
vflip=timm_cfg.VFLIP,
color_jitter=timm_cfg.COLOR_JITTER,
auto_augment=timm_cfg.AUTO_AUGMENT,
interpolation='bicubic' if cfg.INPUT.INTERPOLATION==3 else 'bilinear',
mean=cfg.INPUT.MEAN,
std=cfg.INPUT.STD,
)
return transforms
# assert isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)), 'DATASET.OUTPUT_SIZE should be list or tuple'
normalize = T.Normalize(mean=cfg.INPUT.MEAN, std=cfg.INPUT.STD)
transforms = None
if is_train:
if cfg.FINETUNE.FINETUNE and not cfg.FINETUNE.USE_TRAIN_AUG:
# precrop, crop = get_resolution(cfg.INPUT.IMAGE_SIZE)
crop = cfg.INPUT.IMAGE_SIZE
precrop = int(crop / cfg.INPUT.CROP_PCT)
transforms = T.Compose([
T.Resize(precrop,
interpolation=cfg.INPUT.INTERPOLATION
),
T.RandomCrop((crop, crop)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
])
else:
aug = cfg.AUG
scale = aug.SCALE
ratio = aug.RATIO
ts = [
T.RandomResizedCrop(
cfg.INPUT.IMAGE_SIZE, scale=scale, ratio=ratio, interpolation=cfg.INPUT.INTERPOLATION
),
T.RandomHorizontalFlip(),
]
cj = aug.COLOR_JITTER
if cj[-1] > 0.0:
ts.append(T.RandomApply([T.ColorJitter(*cj[:-1])], p=cj[-1]))
gs = aug.GRAY_SCALE
if gs > 0.0:
ts.append(T.RandomGrayscale(gs))
gb = aug.GAUSSIAN_BLUR
if gb > 0.0:
ts.append(T.RandomApply([GaussianBlur([.1, 2.])], p=gb))
ts.append(T.ToTensor())
ts.append(normalize)
transforms = T.Compose(ts)
else:
transforms = T.Compose([
T.Resize(int(cfg.INPUT.IMAGE_SIZE / cfg.INPUT.CROP_PCT), interpolation=cfg.INPUT.INTERPOLATION),
T.CenterCrop(cfg.INPUT.IMAGE_SIZE),
T.ToTensor(),
normalize,
])
return transforms
| transformer-ls-master | imagenet/dat/transforms/build.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
from .build import build_transforms
| transformer-ls-master | imagenet/dat/transforms/__init__.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
from .ra_sampler import RASampler
__all__ = ["RASampler"]
| transformer-ls-master | imagenet/dat/samplers/__init__.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| transformer-ls-master | imagenet/dat/samplers/ra_sampler.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
import math
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
class WarmupCosineAnnealingLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
max_iter,
min_lr=0,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.max_iter = max_iter
self.min_lr = min_lr
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr * warmup_factor
for base_lr in self.base_lrs
]
else:
return [
self.min_lr + (base_lr - self.min_lr) *
(1 + math.cos(math.pi * self.last_epoch / self.max_iter)) / 2
for base_lr in self.base_lrs
]
class WarmupLinearSchedule(torch.optim.lr_scheduler._LRScheduler):
""" Warmup and then linear decay.
Linearly decreases learning rate from 1. to min_lr
over remaining `t_total - warmup_steps` steps.
"""
def __init__(self,
optimizer,
max_iter,
min_lr=0.0,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1):
self.max_iter = max_iter
self.min_lr = min_lr
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupLinearSchedule, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr * warmup_factor
for base_lr in self.base_lrs
]
else:
rate = max(0.0, float(self.max_iter - self.last_epoch) /
float(max(1.0, self.max_iter - self.warmup_iters)))
return [
self.min_lr + rate * (base_lr - self.min_lr)
for base_lr in self.base_lrs
] | transformer-ls-master | imagenet/optim/lr_scheduler.py |
"""
This file is from https://github.com/microsoft/vision-longformer
"""
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupMultiStepSchedule(LambdaLR):
""" Linear warmup and then decrease at multiple steps.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Reduce LR at specific steps by a given ratio after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, decay_steps, decay_ratio=0.1, last_epoch=-1):
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.decay_ratio = decay_ratio
super(WarmupMultiStepSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
ratio = 1.0
for decay_step in self.decay_steps:
if step > decay_step:
ratio *= self.decay_ratio
return ratio
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
""" Linear warmup and then cosine cycles with hard restarts.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1. + math.cos(math.pi * ((float(self.cycles) * progress) % 1.0))))
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(p.data, alpha=-group['lr'] * group['weight_decay'])
return loss
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(p.data, alpah=group['weight_decay'])
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss | transformer-ls-master | imagenet/optim/optimization.py |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import logging
from .optimization import AdamW, Lamb
from .lr_scheduler import WarmupMultiStepLR
from .lr_scheduler import WarmupCosineAnnealingLR
from .lr_scheduler import WarmupLinearSchedule
from .qhm import QHM
def get_opt(cfg, net, resume=False):
# get optimizer
lr = cfg.OPTIM.LR
momentum = cfg.OPTIM.MOM
# get trainable parameter
# default no decay parameters for resnets
no_decay = ['bn.bias', 'bn.weight', 'bn1.bias', 'bn1.weight',
'bn2.bias', 'bn2.weight', 'bn3.bias', 'bn3.weight']
net0 = net.module if hasattr(net, 'module') else net
if hasattr(net0, 'no_weight_decay'):
no_decay = list(net0.no_weight_decay())
params = [
{'params': [p for n, p in net.named_parameters() if
p.requires_grad and not any(nd in n for nd in no_decay)
],
'weight_decay': cfg.OPTIM.WD},
{'params': [p for n, p in net.named_parameters() if
p.requires_grad and any(nd in n for nd in no_decay)
],
'weight_decay': cfg.OPTIM.WD0,
'do_stats': False}
]
print("Parameters without weight decay:")
print([n for n, p in net.named_parameters() if
p.requires_grad and any(nd in n for nd in no_decay)])
if resume:
for param in params:
param['initial_lr'] = lr
if cfg.OPTIM.OPT == 'sgd':
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum,
weight_decay=cfg.OPTIM.WD)
elif cfg.OPTIM.OPT == 'qhm':
optimizer = QHM(params, lr=cfg.OPTIM.LR, momentum=momentum,
qhm_nu=cfg.OPTIM.NU, weight_decay=cfg.OPTIM.WD)
elif cfg.OPTIM.OPT == 'adam':
optimizer = torch.optim.Adam(params, lr=cfg.OPTIM.LR,
betas=(cfg.OPTIM.ADAM.BETA1, cfg.OPTIM.ADAM.BETA2),
weight_decay=cfg.OPTIM.WD)
elif cfg.OPTIM.OPT == 'lamb':
optimizer = Lamb(params, lr=lr, eps=cfg.OPTIM.ADAM.EPS)
logging.info("Using optimizer {}".format(cfg.OPTIM.OPT))
elif cfg.OPTIM.OPT == 'adamw':
optimizer = AdamW(params, lr=lr, eps=cfg.OPTIM.ADAM.EPS)
logging.info("Using optimizer {}".format(cfg.OPTIM.OPT))
else:
raise ValueError("Optimizer {} not supported!".format(cfg.OPTIM.OPT))
return optimizer
def get_lr_scheduler(cfg, optimizer, last_iter=-1):
lr_policy = cfg.SOLVER.LR_POLICY
epoch_based = cfg.SOLVER.EPOCH_BASED_SCHEDULE
if epoch_based:
warmup_iters = cfg.SOLVER.WARMUP_EPOCHS
max_iters = int(cfg.OPTIM.EPOCHS)
else:
warmup_iters = int(cfg.SOLVER.WARMUP_EPOCHS * cfg.SOLVER.STEPS_PER_EPOCH)
max_iters = cfg.SOLVER.MAX_ITER
if lr_policy not in ("multistep", "cosine", 'linear'):
logging.warning(
"Only 'multistep', 'cosine' or 'linear' lr policy is accepted, "
"got {}".format(lr_policy)
)
return None
if lr_policy == "multistep":
if epoch_based:
steps = tuple(range(cfg.OPTIM.DROP_FREQ, cfg.OPTIM.EPOCHS,
cfg.OPTIM.DROP_FREQ))
else:
steps = tuple([epoch*cfg.SOLVER.STEPS_PER_EPOCH for epoch in
range(cfg.OPTIM.DROP_FREQ, cfg.OPTIM.EPOCHS, cfg.OPTIM.DROP_FREQ)])
logging.info("Using scheduler {}".format(lr_policy))
return WarmupMultiStepLR(
optimizer,
steps,
1.0/cfg.OPTIM.DROP_FACTOR,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=warmup_iters,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
last_epoch=last_iter
)
elif lr_policy == "cosine":
logging.info("Using scheduler {}".format(lr_policy))
return WarmupCosineAnnealingLR(
optimizer,
max_iters,
cfg.SOLVER.MIN_LR,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=warmup_iters,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
last_epoch=last_iter
)
elif lr_policy == "linear":
logging.info("Using scheduler {}".format(lr_policy))
return WarmupLinearSchedule(
optimizer,
max_iters,
cfg.SOLVER.MIN_LR,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=warmup_iters,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
last_epoch=last_iter
)
| transformer-ls-master | imagenet/optim/__init__.py |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from torch.optim import Optimizer
class QHM(Optimizer):
r"""
Stochastic gradient method with Quasi-Hyperbolic Momentum (QHM):
h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
d(k) = (1 - \nu) * g(k) + \nu * h(k)
x(k+1) = x(k) - \alpha * d(k)
"Quasi-hyperbolic momentum and Adam for deep learning"
by Jerry Ma and Denis Yarats, ICLR 2019
optimizer = QHM(params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0)
Args:
params (iterable): iterable params to optimize or dict of param groups
lr (float): learning rate, \alpha in QHM update (default:-1 need input)
momentum (float, optional): \beta in QHM update, range[0,1) (default:0)
qhm_nu (float, optional): \nu in QHM update, range[0,1] (default: 1)
\nu = 0: SGD without momentum (\beta is ignored)
\nu = 1: SGD with momentum \beta and dampened gradient (1-\beta)
\nu = \beta: SGD with "Nesterov momentum" \beta
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> optimizer = torch.optim.QHM(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self, params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0):
# nu can take values outside of the interval [0,1], but no guarantee of convergence?
if lr <= 0:
raise ValueError("Invalid value for learning rate (>0): {}".format(lr))
if momentum < 0 or momentum > 1:
raise ValueError("Invalid value for momentum [0,1): {}".format(momentum))
if weight_decay < 0:
raise ValueError("Invalid value for weight_decay (>=0): {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, qhm_nu=qhm_nu, weight_decay=weight_decay)
super(QHM, self).__init__(params, defaults)
# extra_buffer == True only in SSLS with momentum > 0 and nu != 1
self.state['allocate_step_buffer'] = False
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates model and returns loss.
"""
loss = None
if closure is not None:
loss = closure()
self.add_weight_decay()
self.qhm_direction()
self.qhm_update()
return loss
def add_weight_decay(self):
# weight_decay is the same as adding L2 regularization
for group in self.param_groups:
weight_decay = group['weight_decay']
for p in group['params']:
if p.grad is None:
continue
if weight_decay > 0:
p.grad.data.add_(p.data, alpha=weight_decay)
def qhm_direction(self):
for group in self.param_groups:
momentum = group['momentum']
qhm_nu = group['qhm_nu']
for p in group['params']:
if p.grad is None:
continue
x = p.data # Optimization parameters
g = p.grad.data # Stochastic gradient
# Compute the (negative) step directoin d and necessary momentum
state = self.state[p]
if abs(momentum) < 1e-12 or abs(qhm_nu) < 1e-12: # simply SGD if beta=0 or nu=0
d = state['step_buffer'] = g
else:
if 'momentum_buffer' not in state:
h = state['momentum_buffer'] = torch.zeros_like(x)
else:
h = state['momentum_buffer']
# Update momentum buffer: h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
h.mul_(momentum).add_(g, alpha=1 - momentum)
if abs(qhm_nu - 1) < 1e-12: # if nu=1, then same as SGD with momentum
d = state['step_buffer'] = h
else:
if self.state['allocate_step_buffer']: # copy from gradient
if 'step_buffer' not in state:
state['step_buffer'] = torch.zeros_like(g)
d = state['step_buffer'].copy_(g)
else: # use gradient buffer
d = state['step_buffer'] = g
# Compute QHM momentum: d(k) = (1 - \nu) * g(k) + \nu * h(k)
d.mul_(1 - qhm_nu).add_(h, alpha=qhm_nu)
def qhm_update(self):
"""
Perform QHM update, need to call compute_qhm_direction() before calling this.
"""
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
p.data.add_(self.state[p]['step_buffer'], alpha=-group['lr'])
| transformer-ls-master | imagenet/optim/qhm.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import os
import torch
from .comm import is_main_process
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
# self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
# self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def last_value(self):
return self.deque[-1]
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = {}
self.params = {}
self.delimiter = delimiter
def update_params(self, update_dict):
for param_group, group_dict in update_dict.items():
if param_group not in self.params:
self.params[param_group] = {}
for param_name, param_value in group_dict.items():
# skipping parameters if they start with '_'
if param_name.startswith('_'):
continue
if isinstance(param_value, torch.Tensor):
param_value = param_value.item()
assert isinstance(param_value, (float, int))
self.params[param_group][param_name] = param_value
def update_metrics(self, update_dict):
for metric_group, group_dict in update_dict.items():
if metric_group not in self.meters:
self.meters[metric_group] = defaultdict(SmoothedValue)
for metric_name, metric_value in group_dict.items():
# skipping metrics if they start with '_'
if metric_name.startswith('_'):
continue
if isinstance(metric_value, torch.Tensor):
metric_value = metric_value.item()
assert isinstance(metric_value, (float, int))
self.meters[metric_group][metric_name].update(metric_value)
def get_logs(self, iteration):
return_str = []
if len(self.meters) > 0:
offset_m = max([len(group_name) for group_name in self.meters.keys()])
else:
offset_m = 0
if len(self.params) > 0:
offset_p = max([len(group_name) for group_name in self.params.keys()])
else:
offset_p = 0
offset = max(offset_m, offset_p)
for group_name, values in sorted(self.meters.items(),
key=lambda x: x[0]):
loss_str = []
for name, meter in values.items():
loss_str.append("{}: {:.4f} ({:.4f})".format(
name, meter.median, meter.global_avg,
))
return_str.append(
"{:{offset}s} - {}".format(
group_name, self.delimiter.join(loss_str), offset=offset,
),
)
for group_name, values in self.params.items():
loss_str = []
for name, param in values.items():
loss_str.append("{}: {:.6f}".format(name, param))
return_str.append(
"{:{offset}s} - {}".format(
group_name, self.delimiter.join(loss_str), offset=offset,
),
)
return "\n ".join(return_str)
class TensorboardLogger(MetricLogger):
def __init__(self,
log_dir,
delimiter='\t'):
super(TensorboardLogger, self).__init__(delimiter)
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError(
'To use tensorboard please install tensorboardX '
'[ pip install tensorboardx ].'
)
if is_main_process():
self.tb_logger = SummaryWriter(log_dir)
self.tb_logger_avg = SummaryWriter(os.path.join(log_dir, 'avg'))
self.tb_logger_med = SummaryWriter(os.path.join(log_dir, 'med'))
else:
self.tb_logger = None
self.tb_logger_avg = None
self.tb_logger_med = None
def get_logs(self, iteration):
if self.tb_logger:
for group_name, values in self.meters.items():
for name, meter in values.items():
self.tb_logger.add_scalar(
'{}/{}'.format(group_name, name),
meter.last_value, iteration,
)
self.tb_logger_avg.add_scalar(
'{}/{}'.format(group_name, name),
meter.avg, iteration,
)
self.tb_logger_med.add_scalar(
'{}/{}'.format(group_name, name),
meter.median, iteration,
)
for group_name, values in self.params.items():
for name, param in values.items():
self.tb_logger.add_scalar(
'{}/{}'.format(group_name, name),
param, iteration,
)
return super(TensorboardLogger, self).get_logs(iteration)
def close(self):
if is_main_process():
self.tb_logger.close()
self.tb_logger_avg.close()
self.tb_logger_med.close()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| transformer-ls-master | imagenet/utils/metric_logger.py |
"""
This file is from https://github.com/microsoft/vision-longformer
"""
import os
import math
import logging
import shutil
import torch
from collections import OrderedDict
from .comm import is_main_process
import torch.distributed as dist
# def is_dist_avail_and_initialized():
# if not dist.is_available():
# return False
# if not dist.is_initialized():
# return False
# return True
#
# def get_rank():
# if not is_dist_avail_and_initialized():
# return 0
# return dist.get_rank()
#
# def is_main_process():
# return get_rank() == 0
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def resize_pos_embed_1d(posemb, shape_new):
# Rescale the grid of position embeddings when loading from state_dict.
ntok_old = posemb.shape[1]
if ntok_old > 1:
ntok_new = shape_new[1]
posemb_grid = posemb.permute(0, 2, 1).unsqueeze(dim=-1)
posemb_grid = torch.nn.functional.interpolate(posemb_grid, size=[ntok_new, 1], mode='bilinear')
posemb_grid = posemb_grid.squeeze(dim=-1).permute(0, 2, 1)
posemb = posemb_grid
return posemb
def resize_pos_embed_2d(posemb, shape_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = shape_new[0]
gs_old = int(math.sqrt(len(posemb))) # 2 * w - 1
gs_new = int(math.sqrt(ntok_new)) # 2 * w - 1
posemb_grid = posemb.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = torch.nn.functional.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(gs_new * gs_new, -1)
return posemb_grid
def align_and_update_state_dicts(model_state_dict, loaded_state_dict, skip_unmatched_layers=True):
"""
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
If skip_unmatched_layers is True, it will skip layers when the shape mismatch.
Otherwise, it will raise error.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# loaded_key string, if it matches
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys
]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys)
)
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_size = max([len(key) for key in current_keys]) if current_keys else 1
max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
logger = logging.getLogger(__name__)
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
if model_state_dict[key].shape != loaded_state_dict[key_old].shape and skip_unmatched_layers:
if 'x_pos_embed' in key or 'y_pos_embed' in key:
shape_old = loaded_state_dict[key_old].shape
shape_new = model_state_dict[key].shape
new_val = resize_pos_embed_1d(loaded_state_dict[key_old], shape_new)
if shape_new == new_val.shape:
model_state_dict[key] = new_val
logger.info("[RESIZE] {} {} -> {} {}".format(
key_old, shape_old, key, shape_new))
else:
logger.info("[WARNING]", "{} {} != {} {}, skip".format(
key_old, new_val.shape, key, shape_new))
elif 'local_relative_position_bias_table' in key:
shape_old = loaded_state_dict[key_old].shape
shape_new = model_state_dict[key].shape
new_val = resize_pos_embed_2d(loaded_state_dict[key_old], shape_new)
if shape_new == new_val.shape:
model_state_dict[key] = new_val
logger.info("[RESIZE] {} {} -> {} {}".format(
key_old, shape_old, key, shape_new))
else:
logger.info("[WARNING]", "{} {} != {} {}, skip".format(
key_old, new_val.shape, key, shape_new))
elif 'head' in key:
shape_new = model_state_dict[key].shape
logger.info("Use the first {} classes to initialize head because of size mis-match!".format(shape_new[0]))
if key.endswith('weight'):
model_state_dict[key] = loaded_state_dict[key_old][:shape_new[0], :].to(model_state_dict[key].device)
elif key.endswith('bias'):
model_state_dict[key] = loaded_state_dict[key_old][:shape_new[0]].to(model_state_dict[key].device)
else:
raise RuntimeError("Key {} is not expected".format(key))
else:
# if layer weights does not match in size, skip this layer
logger.info("SKIPPING LAYER {} because of size mis-match".format(key))
continue
model_state_dict[key] = loaded_state_dict[key_old]
logger.info(
log_str_template.format(
key,
max_size,
key_old,
max_size_loaded,
tuple(loaded_state_dict[key_old].shape),
)
)
class Checkpointer(object):
def __init__(
self,
model,
arch,
optimizer=None,
scheduler=None,
save_dir="",
logger=None,
is_test=False,
epoch=0,
best_acc=0.,
only_save_last=0
):
self.model = model
self.arch = arch
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
self.is_test = is_test
self.resume = False
self.epoch = epoch
self.best_acc = best_acc
self.only_save_last = only_save_last
def save(self, is_best, **kwargs):
name = 'checkpoint_{}'.format(self.epoch)
if self.only_save_last:
name = 'checkpoint_last'
if not (self.save_dir and is_main_process()):
return
data = {"net": self.model.state_dict(), "arch": self.arch,
"epoch": self.epoch, "best_acc": self.best_acc}
if self.optimizer is not None:
data["optimizer"] = self.optimizer.state_dict()
if self.scheduler is not None:
data["scheduler"] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, "{}.pth".format(name))
self.logger.info("Saving checkpoint to {}".format(save_file))
torch.save(data, save_file)
# self.tag_last_checkpoint(save_file)
# use relative path name to save the checkpoint
self.tag_last_checkpoint("{}.pth".format(name))
if is_best:
shutil.copyfile(save_file,
os.path.join(self.save_dir, "model_best.pth"))
self.delete_on_master(self.save_dir)
def delete_on_master(self, chk_path):
if is_main_process():
if self.epoch > 20:
for epo in range(self.epoch - 20):
del_chk_path = os.path.join(chk_path, f'checkpoint_{epo}.pth')
if os.path.exists(del_chk_path):
os.remove(del_chk_path)
def load(self, f=None):
if self.is_test and os.path.isfile(f):
# load the weights in config file if it is specified in testing
# stage otherwise it will load the lastest checkpoint in
# output_dir for testing
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
return checkpoint
if self.has_checkpoint():
# override argument with existing checkpoint
f = self.get_checkpoint_file()
# get the absolute path
f = os.path.join(self.save_dir, f)
self.resume = True
if not os.path.isfile(f):
# no checkpoint could be found
self.logger.info("No checkpoint found. Initializing model from "
"scratch")
# save the random initialization
self.save(is_best=False)
return {}
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
# if resume training, load optimizer and scheduler,
# otherwise use the specified LR in config yaml for fine-tuning
if self.resume:
if "epoch" in checkpoint:
self.epoch = checkpoint.pop('epoch')
if "best_acc" in checkpoint:
self.best_acc = checkpoint.pop('best_acc')
if "optimizer" in checkpoint and self.optimizer:
self.logger.info("Loading optimizer from {}".format(f))
self.optimizer.load_state_dict(checkpoint.pop("optimizer"))
if "scheduler" in checkpoint and self.scheduler:
self.logger.info("Loading scheduler from {}".format(f))
self.scheduler.load_state_dict(checkpoint.pop("scheduler"))
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
return os.path.exists(save_file)
def get_checkpoint_file(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f.strip(), map_location=torch.device("cpu"))
def _load_model(self, checkpoint):
model_state_dict = self.model.state_dict()
if 'arch' in checkpoint:
assert checkpoint.pop('arch') == self.arch
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(checkpoint.pop("net"),
prefix="module.")
else:
loaded_state_dict = strip_prefix_if_present(checkpoint,
prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
self.model.load_state_dict(model_state_dict)
| transformer-ls-master | imagenet/utils/checkpoint.py |
"""
This file is from https://github.com/microsoft/vision-longformer
"""
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather_on_master(data):
"""Same as all_gather, but gathers data on master process only, using CPU.
Thus, this does not work with NCCL backend unless they add CPU support.
The memory consumption of this function is ~ 3x of data size. While in
principal, it should be ~2x, it's not easy to force Python to release
memory immediately and thus, peak memory usage could be up to 3x.
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
# trying to optimize memory, but in fact, it's not guaranteed to be released
del data
storage = torch.ByteStorage.from_buffer(buffer)
del buffer
tensor = torch.ByteTensor(storage)
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()])
size_list = [torch.LongTensor([0]) for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,))
tensor = torch.cat((tensor, padding), dim=0)
del padding
if is_main_process():
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)))
dist.gather(tensor, gather_list=tensor_list, dst=0)
del tensor
else:
dist.gather(tensor, gather_list=[], dst=0)
del tensor
return
data_list = []
for tensor in tensor_list:
buffer = tensor.cpu().numpy().tobytes()
del tensor
data_list.append(pickle.loads(buffer))
del buffer
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu,
gather_on_cpu=False):
if gather_on_cpu:
all_predictions = gather_on_master(predictions_per_gpu)
else:
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
return predictions
| transformer-ls-master | imagenet/utils/comm.py |
transformer-ls-master | imagenet/utils/__init__.py |
|
"""
This file is from https://github.com/microsoft/vision-longformer
"""
import errno
import os
import os.path as op
import logging
import numpy as np
import torch
import random
import shutil
from .comm import is_main_process
import yaml
def mkdir(path):
# if it is the current folder, skip.
# otherwise the original code will raise FileNotFoundError
if path == '':
return
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def save_config(cfg, path):
if is_main_process():
with open(path, 'w') as f:
f.write(cfg.dump())
def delete_tsv_files(tsvs):
for t in tsvs:
if op.isfile(t):
try_delete(t)
line = op.splitext(t)[0] + '.lineidx'
if op.isfile(line):
try_delete(line)
def concat_files(ins, out):
mkdir(op.dirname(out))
out_tmp = out + '.tmp'
with open(out_tmp, 'wb') as fp_out:
for i, f in enumerate(ins):
logging.info('concating {}/{} - {}'.format(i, len(ins), f))
with open(f, 'rb') as fp_in:
shutil.copyfileobj(fp_in, fp_out, 1024*1024*10)
os.rename(out_tmp, out)
def concat_tsv_files(tsvs, out_tsv):
concat_files(tsvs, out_tsv)
sizes = [os.stat(t).st_size for t in tsvs]
sizes = np.cumsum(sizes)
all_idx = []
for i, t in enumerate(tsvs):
for idx in load_list_file(op.splitext(t)[0] + '.lineidx'):
if i == 0:
all_idx.append(idx)
else:
all_idx.append(str(int(idx) + sizes[i - 1]))
with open(op.splitext(out_tsv)[0] + '.lineidx', 'w') as f:
f.write('\n'.join(all_idx))
def load_list_file(fname):
with open(fname, 'r') as fp:
lines = fp.readlines()
result = [line.strip() for line in lines]
if len(result) > 0 and result[-1] == '':
result = result[:-1]
return result
def try_once(func):
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.info('ignore error \n{}'.format(str(e)))
return func_wrapper
@try_once
def try_delete(f):
os.remove(f)
def set_seed(seed, n_gpu):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def print_and_run_cmd(cmd):
print(cmd)
os.system(cmd)
def write_to_yaml_file(context, file_name):
with open(file_name, 'w') as fp:
yaml.dump(context, fp, encoding='utf-8')
def load_from_yaml_file(yaml_file):
with open(yaml_file, 'r') as fp:
return yaml.load(fp, Loader=yaml.CLoader)
def config_iteration(output_dir, steps_per_epoch):
save_file = os.path.join(output_dir, 'last_checkpoint')
epoch = 0
if os.path.exists(save_file):
with open(save_file, 'r') as f:
fname = f.read().strip().strip('.pth')
model_name = os.path.basename(fname)
# if model_name.startswith('checkpoint_'):
epoch = int(model_name[11:])
return epoch*steps_per_epoch - 1
| transformer-ls-master | imagenet/utils/miscellaneous.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
# Written by Pengchuan Zhang, [email protected]
import logging
import torch.nn as nn
import torchvision.models as tvmodels
from .msvit import MsViT
def build_model(cfg):
# ResNet models from torchvision
resnet_model_names = sorted(name for name in tvmodels.__dict__
if name.islower() and not name.startswith("__")
and callable(tvmodels.__dict__[name]))
print("torchvision models: \n", resnet_model_names)
# Vision Transformer models
vitmodeldict = {
'msvit': MsViT,
}
vit_model_names = list(vitmodeldict.keys())
print("Vision Transformer models: \n", vit_model_names)
# Build model
print('==> Building model..')
if cfg.MODEL.ARCH in resnet_model_names:
logging.info("Use torchvision predefined model")
if cfg.MODEL.PRETRAINED:
logging.info("=> using pre-trained model '{}'".format(cfg.MODEL.ARCH))
net = tvmodels.__dict__[cfg.MODEL.ARCH](pretrained=True,)
if net.fc.out_features != cfg.DATA.NUM_CLASSES:
net.fc = nn.Linear(net.fc.in_features, cfg.DATA.NUM_CLASSES)
else:
logging.info("=> creating model '{}'".format(cfg.MODEL.ARCH))
net = tvmodels.__dict__[cfg.MODEL.ARCH](num_classes=cfg.DATA.NUM_CLASSES)
elif cfg.MODEL.ARCH in vit_model_names:
logging.info("Use vision transformer model")
args = dict(
img_size=cfg.INPUT.IMAGE_SIZE,
drop_rate=cfg.MODEL.VIT.DROP,
drop_path_rate=cfg.MODEL.VIT.DROP_PATH,
norm_embed=cfg.MODEL.VIT.NORM_EMBED,
avg_pool=cfg.MODEL.VIT.AVG_POOL,
)
if cfg.MODEL.ARCH.startswith('msvit'):
args['arch'] = cfg.MODEL.VIT.MSVIT.ARCH
args['sharew'] = cfg.MODEL.VIT.MSVIT.SHARE_W
args['attn_type'] = cfg.MODEL.VIT.MSVIT.ATTN_TYPE
args['share_kv'] = cfg.MODEL.VIT.MSVIT.SHARE_KV
args['only_glo'] = cfg.MODEL.VIT.MSVIT.ONLY_GLOBAL
args['sw_exact'] = cfg.MODEL.VIT.MSVIT.SW_EXACT
args['ln_eps'] = cfg.MODEL.VIT.MSVIT.LN_EPS
args['mode'] = cfg.MODEL.VIT.MSVIT.MODE
logging.info("=> creating model '{}'".format(cfg.MODEL.ARCH))
net = vitmodeldict[cfg.MODEL.ARCH](num_classes=cfg.DATA.NUM_CLASSES, **args)
else:
raise ValueError(
"Unimplemented model architecture: {}".format(cfg.MODEL.ARCH))
return net
| transformer-ls-master | imagenet/models/__init__.py |
"""Code for the vision transformer model based on ViL.
Adapted from https://github.com/microsoft/vision-longformer by Chen Zhu ([email protected])
"""
import math
from functools import partial
import logging
import torch
from torch import nn
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from .layers import Attention, AttentionLS
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, patch_size, nx, ny, in_chans=3, embed_dim=768, nglo=1,
norm_layer=nn.LayerNorm, norm_embed=True, drop_rate=0.0,
ape=True):
# maximal global/x-direction/y-direction tokens: nglo, nx, ny
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
self.norm_embed = norm_layer(embed_dim) if norm_embed else None
self.nx = nx
self.ny = ny
self.Nglo = nglo
if nglo >= 1:
self.cls_token = nn.Parameter(torch.zeros(1, nglo, embed_dim))
trunc_normal_(self.cls_token, std=.02)
else:
self.cls_token = None
self.ape = ape
if ape:
self.cls_pos_embed = nn.Parameter(torch.zeros(1, nglo, embed_dim))
self.x_pos_embed = nn.Parameter(torch.zeros(1, nx, embed_dim // 2))
self.y_pos_embed = nn.Parameter(torch.zeros(1, ny, embed_dim // 2))
trunc_normal_(self.cls_pos_embed, std=.02)
trunc_normal_(self.x_pos_embed, std=.02)
trunc_normal_(self.y_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
def forward(self, xtuple):
x, nx, ny = xtuple
B = x.shape[0]
x = self.proj(x)
nx, ny = x.shape[-2:]
x = x.flatten(2).transpose(1, 2)
assert nx == self.nx and ny == self.ny, "Fix input size!"
if self.norm_embed:
x = self.norm_embed(x)
# concat cls_token
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.ape:
# add position embedding
pos_embed_2d = torch.cat([
self.x_pos_embed.unsqueeze(2).expand(-1, -1, ny, -1),
self.y_pos_embed.unsqueeze(1).expand(-1, nx, -1, -1),
], dim=-1).flatten(start_dim=1, end_dim=2)
x = x + torch.cat([self.cls_pos_embed, pos_embed_2d], dim=1).expand(
B, -1, -1)
x = self.pos_drop(x)
return x, nx, ny
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# for Performer, start
def get_module_device(module):
return next(module.parameters()).device
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
# for Performer, end
class AttnBlock(nn.Module):
""" Meta Attn Block
"""
def __init__(self, dim, num_heads, qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm,
attn_type='full', w=7, d=1, sharew=False, nglo=1,
only_glo=False,
seq_len=None, num_feats=256, share_kv=False, sw_exact=0,
rratio=2, rpe=False, wx=14, wy=14, mode=0, dp_rank=0):
super().__init__()
self.norm = norm_layer(dim)
if attn_type == 'full':
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop, rpe=rpe, wx=wx, wy=wy, nglo=nglo)
elif attn_type == 'ls':
# Our Long-short term attention.
self.attn = AttentionLS(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop, rpe=rpe, nglo=nglo,
dp_rank=dp_rank, w=w
)
else:
raise ValueError(
"Not supported attention type {}".format(attn_type))
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
def forward(self, xtuple):
x, nx, ny = xtuple
x = x + self.drop_path(self.attn(self.norm(x), nx, ny))
return x, nx, ny
class MlpBlock(nn.Module):
""" Meta MLP Block
"""
def __init__(self, dim, out_dim=None, mlp_ratio=4., drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
out_features=out_dim, act_layer=act_layer, drop=drop)
self.shortcut = nn.Identity()
if out_dim is not None and out_dim != dim:
self.shortcut = nn.Sequential(nn.Linear(dim, out_dim),
nn.Dropout(drop))
def forward(self, xtuple):
x, nx, ny = xtuple
x = self.shortcut(x) + self.drop_path(self.mlp(self.norm(x)))
return x, nx, ny
class MsViT(nn.Module):
""" Multiscale Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, arch, img_size=512, in_chans=3, num_classes=1000,
qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
norm_embed=False, w=7, d=1, sharew=False, only_glo=False,
share_kv=False, attn_type='longformerhand', sw_exact=0, mode=0, **args):
super().__init__()
self.num_classes = num_classes
if 'ln_eps' in args:
ln_eps = args['ln_eps']
self.norm_layer = partial(nn.LayerNorm, eps=ln_eps)
logging.info("Customized LayerNorm EPS: {}".format(ln_eps))
else:
self.norm_layer = norm_layer
self.drop_path_rate = drop_path_rate
self.attn_type = attn_type
# for performer, start
if attn_type == "performer":
self.auto_check_redraw = True # TODO: make this an choice
self.feature_redraw_interval = 1
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
# for performer, end
self.attn_args = dict({
'attn_type': attn_type,
'qkv_bias': qkv_bias,
'qk_scale': qk_scale,
'drop': drop_rate,
'attn_drop': attn_drop_rate,
'w': w,
'd': d,
'sharew': sharew,
'only_glo': only_glo,
'share_kv': share_kv,
'sw_exact': sw_exact,
'norm_layer': norm_layer,
'mode': mode,
})
self.patch_embed_args = dict({
'norm_layer': norm_layer,
'norm_embed': norm_embed,
'drop_rate': drop_rate,
})
self.mlp_args = dict({
'mlp_ratio': 4.0,
'norm_layer': norm_layer,
'act_layer': nn.GELU,
'drop': drop_rate,
})
self.Nx = img_size
self.Ny = img_size
def parse_arch(arch):
layer_cfgs = []
for layer in arch.split('_'):
layer_cfg = {'l': 1, 'h': 3, 'd': 192, 'n': 1, 's': 1, 'g': 1,
'p': 2, 'f': 7, 'a': 1, 'r': 0} # defaults. r is our low-rank attention
for attr in layer.split(','):
layer_cfg[attr[0]] = int(attr[1:])
layer_cfgs.append(layer_cfg)
return layer_cfgs
self.layer_cfgs = parse_arch(arch)
self.num_layers = len(self.layer_cfgs)
self.depth = sum([cfg['n'] for cfg in self.layer_cfgs])
self.out_planes = self.layer_cfgs[-1]['d']
self.Nglos = [cfg['g'] for cfg in self.layer_cfgs]
self.avg_pool = args['avg_pool'] if 'avg_pool' in args else False
self.dp_rank = [cfg['r'] for cfg in self.layer_cfgs]
dprs = torch.linspace(0, drop_path_rate, self.depth).split(
[cfg['n'] for cfg in self.layer_cfgs]
) # stochastic depth decay rule
self.layer1 = self._make_layer(in_chans, self.layer_cfgs[0],
dprs=dprs[0], layerid=1)
self.layer2 = self._make_layer(self.layer_cfgs[0]['d'],
self.layer_cfgs[1], dprs=dprs[1],
layerid=2)
self.layer3 = self._make_layer(self.layer_cfgs[1]['d'],
self.layer_cfgs[2], dprs=dprs[2],
layerid=3)
if self.num_layers == 3:
self.layer4 = None
elif self.num_layers == 4:
self.layer4 = self._make_layer(self.layer_cfgs[2]['d'],
self.layer_cfgs[3], dprs=dprs[3],
layerid=4)
else:
raise ValueError("Numer of layers {} not implemented yet!".format(self.num_layers))
self.norm = norm_layer(self.out_planes)
# Classifier head
self.head = nn.Linear(self.out_planes,
num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _make_layer(self, in_dim, layer_cfg, dprs, layerid=0):
layer_id, num_heads, dim, num_block, is_sparse_attn, nglo, patch_size, num_feats, ape \
= layer_cfg['l'], layer_cfg['h'], layer_cfg['d'], layer_cfg['n'], layer_cfg['s'], layer_cfg['g'], layer_cfg['p'], layer_cfg['f'], layer_cfg['a']
dp_rank = layer_cfg['r']
assert layerid == layer_id, "Error in _make_layer: layerid {} does not equal to layer_id {}".format(layerid, layer_id)
self.Nx = nx = self.Nx // patch_size
self.Ny = ny = self.Ny // patch_size
seq_len = nx * ny + nglo
self.attn_args['nglo'] = nglo
self.patch_embed_args['nglo'] = nglo
self.attn_args['num_feats'] = num_feats # shared for linformer and performer
self.attn_args['rratio'] = num_feats # srformer reuses this parameter
self.attn_args['w'] = num_feats # longformer reuses this parameter
self.attn_args['dp_rank'] = dp_rank
if is_sparse_attn == 0:
self.attn_args['attn_type'] = 'full'
# patch embedding
layers = [
PatchEmbed(patch_size, nx, ny, in_chans=in_dim, embed_dim=dim, ape=ape,
**self.patch_embed_args)
]
for dpr in dprs:
layers.append(AttnBlock(
dim, num_heads, drop_path=dpr, seq_len=seq_len, rpe=not ape,
wx=nx, wy=ny,
**self.attn_args
))
layers.append(MlpBlock(dim, drop_path=dpr, **self.mlp_args))
return nn.Sequential(*layers)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
no_decay = {'pos_embed', 'cls_token',
'norm.weight', 'norm.bias',
'norm_embed', 'head.bias',
'relative_position'}
return no_decay
def get_classifier(self):
return self.head
def forward_features(self, x):
B = x.shape[0]
x, nx, ny = self.layer1((x, None, None))
x = x[:, self.Nglos[0]:].transpose(-2, -1).reshape(B, -1, nx, ny)
x, nx, ny = self.layer2((x, nx, ny))
x = x[:, self.Nglos[1]:].transpose(-2, -1).reshape(B, -1, nx, ny)
x, nx, ny = self.layer3((x, nx, ny))
if self.layer4 is not None:
x = x[:, self.Nglos[2]:].transpose(-2, -1).reshape(B, -1, nx, ny)
x, nx, ny = self.layer4((x, nx, ny))
x = self.norm(x)
if self.Nglos[-1] > 0 and (not self.avg_pool):
return x[:, 0]
else:
return torch.mean(x, dim=1)
def forward(self, x):
if self.attn_type == "performer" and self.auto_check_redraw:
self.check_redraw_projections()
x = self.forward_features(x)
x = self.head(x)
return x
| transformer-ls-master | imagenet/models/msvit.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
# Written by Pengchuan Zhang, [email protected]
from torch import nn
import torch
from timm.models.layers import trunc_normal_
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0., proj_drop=0.,
rpe=False, wx=14, wy=14, nglo=1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Inspired by swin transformer:
# https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py#L88-L103
# define parameter tables for local and global relative position bias
self.rpe = rpe
if rpe:
self.wx = wx
self.wy = wy
self.nglo = nglo
self.local_relative_position_bias_table = nn.Parameter(
torch.zeros((2 * wx - 1) * (2 * wy - 1),
num_heads)) # (2*wx-1, 2*wy-1, nH)
trunc_normal_(self.local_relative_position_bias_table, std=.02)
if nglo >= 1:
self.g2l_relative_position_bias = nn.Parameter(
torch.zeros(2, num_heads, nglo)) # (2, nH, nglo)
self.g2g_relative_position_bias = nn.Parameter(
torch.zeros(num_heads, nglo, nglo)) # (nH, nglo, nglo)
trunc_normal_(self.g2l_relative_position_bias, std=.02)
trunc_normal_(self.g2g_relative_position_bias, std=.02)
# get pair-wise relative position index
coords_h = torch.arange(wx)
coords_w = torch.arange(wy)
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, wx, wy
coords_flatten = torch.flatten(coords, 1) # 2, Wx*Wy
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wx*Wy, Wx*Wy
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wx*Wy, Wx*Wy, 2
relative_coords[:, :, 0] += wx - 1 # shift to start from 0
relative_coords[:, :, 1] += wy - 1
relative_coords[:, :, 0] *= 2 * wy - 1
relative_position_index = relative_coords.sum(-1) # Wx*Wy, Wx*Wy
self.register_buffer("relative_position_index", relative_position_index)
def forward(self, x, nx=None, ny=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
if self.rpe:
assert N == self.nglo + self.wx*self.wy, "For relative position, N != self.nglo + self.wx*self.wy!"
local_relative_position_bias = self.local_relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.wx*self.wy, self.wx*self.wy, -1) # Wh*Ww, Wh*Ww,nH
relative_position_bias = local_relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
if self.nglo > 0:
# relative position embedding of global tokens
global_relative_position_bias = torch.cat([
self.g2g_relative_position_bias,
self.g2l_relative_position_bias[0].unsqueeze(-1).expand(-1, -1, self.wx*self.wy)
], dim=-1) # nH, nglo, N
# relative position embedding of local tokens
local_relative_position_bias = torch.cat([
self.g2l_relative_position_bias[1].unsqueeze(1).expand(-1, self.wx*self.wy, -1),
relative_position_bias,
], dim=-1) # nH, Wh*Ww, N
relative_position_bias = torch.cat([
global_relative_position_bias,
local_relative_position_bias,
], dim=1) # nH, N, N
attn = attn + relative_position_bias.unsqueeze(0)
attn = (attn - torch.max(attn, dim=-1, keepdim=True)[0]).softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def compute_macs(module, input, output):
# T: num_token
# S: num_token
input = input[0]
_, T, C = input.shape
S = T
macs = 0
n_params = 0
# Scaled-dot-product macs
# [B x T x C] x [B x C x S] --> [B x T x S]
# multiplication-addition is counted as 1 because operations can be fused
num_macs_kq = T * S * C
# [B x T x S] x [B x S x C] --> [B x T x C]
num_macs_v = T * C * S
macs += num_macs_kq + num_macs_v
# print('macs att', macs / 1e8)
# self attention: T should be equal to S
assert T == S
qkv_params = sum([p.numel() for p in module.qkv.parameters()])
n_params += qkv_params
# multiply by Seq length
macs += qkv_params * T
# print('macs qkv', qkv_params * T / 1e8)
proj_params = sum([p.numel() for p in module.proj.parameters()])
n_params += proj_params
macs += (proj_params * T)
# print('macs proj', proj_params * T / 1e8)
module.__flops__ += macs
# return n_params, macs | transformer-ls-master | imagenet/models/layers/full_attention.py |
# Written by Chen Zhu during an internship at NVIDIA, [email protected]
from .transformer_ls import AttentionLS
from .full_attention import Attention | transformer-ls-master | imagenet/models/layers/__init__.py |
# Copyright (c) 2021 NVIDIA CORPORATION. Licensed under the MIT license.
# Written by Chen Zhu during an internship at NVIDIA, [email protected]
from torch import nn
import torch
from timm.models.layers import trunc_normal_
import torch.nn.functional as F
class AttentionLS(nn.Module):
"""Implementation for long-short term attention.
Flexible options for using window attention, global token and dynamic projection.
Args:
dim: input and output feature dimension.
num_heads: number of attention heads.
qkv_bias: whether to use bias for the projection of query, key and values.
qk_scale: scale factor on query and key for numerical stability.
By default, set to square root of head dimensions.
attn_drop: dropout probability for attention matrix.
proj_drop: dropout probability for the final output.
rpe: whether to use relative position encoding.
nglo: number of global tokens (e.g., CLS).
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0., proj_drop=0., rpe=False, nglo=1,
dp_rank=2, w=2):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.nglo = nglo
# Equals to segment size (w) in the paper.
self.window_size = w
# Equals to r in the paper.
self.dp_rank = dp_rank
if self.dp_rank > 0:
self.to_dynamic_projection = nn.Linear(dim, dp_rank * num_heads)
# The LN of DualLN corresponding to dynamic projection
self.dual_ln_dp = nn.LayerNorm(dim)
# The LN of DualLN corresponding to all the tokens
self.dual_ln_full = nn.LayerNorm(dim)
# Adapted from ViL: https://github.com/microsoft/vision-longformer/blob/main/src/models/layers/longformer2d.py#L55-L100
# We only add RPE to window attention.
# Unnecessary to add bias for global tokens, since DualLN already adds biases.
self.rpe = rpe
if rpe:
# handle the boarder conditions...
w_pad = int(w*0.5)
self.local_relative_position_bias_table = nn.Parameter(
torch.zeros(2 * (w + w_pad - 1) * (2 * w_pad + w + 1) + 1, num_heads))
trunc_normal_(self.local_relative_position_bias_table, std=.02)
# get pair-wise relative position index
coords_h = torch.arange(-w_pad, w_pad + w)
coords_w = torch.arange(-w_pad, w_pad + w)
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, 2w, 2w
coords = coords.view(2, (w + w_pad * 2)**2).transpose(0, 1).unsqueeze(0) # 1, 4w**2, 2
q_coords_hw = torch.arange(0, w)
q_coords = torch.stack(torch.meshgrid([q_coords_hw, q_coords_hw])) # 2, w, w
q_coords = q_coords.view(2, w**2).transpose(0, 1).unsqueeze(1) # w**2, 1, 2
relative_coords = q_coords - coords
relative_coords += w_pad + w - 1 # shift to start from 0
relative_coords[:, :, 0] *= 2 * w_pad + w
relative_position_index = relative_coords.sum(-1) # w^2, 4w^2
self.register_buffer("relative_position_index", relative_position_index)
def forward(self, x, nx=None, ny=None):
B, N, C = x.shape
N_feat = N - self.nglo
self.img_size = nx
qkv = self.qkv(x)
# query, key, value
q, k, v = qkv.chunk(3, dim=2)
q = q.mul(self.scale)
# Layer norm on the projected keys and values
k = self.dual_ln_full(k)
v = self.dual_ln_full(v)
# output size: bsz x n_heads x seqlen x d
if self.nglo > 0:
q_cls, q = q[:, :self.nglo], q[:, self.nglo:]
k_cls, k = k[:, :self.nglo], k[:, self.nglo:]
v_cls, v = v[:, :self.nglo], v[:, self.nglo:]
q_cls = q_cls.reshape(B, self.nglo, self.num_heads, C // self.num_heads).transpose(1, 2)
k_cls = k_cls.reshape(B, self.nglo, self.num_heads, C // self.num_heads).transpose(1, 2)
v_cls = v_cls.reshape(B, self.nglo, self.num_heads, C // self.num_heads).transpose(1, 2)
q = q.reshape(B, N_feat, self.num_heads, C//self.num_heads).transpose(1, 2)
k = k.reshape(B, N_feat, self.num_heads, C//self.num_heads).transpose(1, 2)
v = v.reshape(B, N_feat, self.num_heads, C//self.num_heads).transpose(1, 2)
# Long-range Attention (Dynamic Projection)
if self.dp_rank > 0:
# b x h x r x (l w)
# Compute the projection matrix (P_i in the paper)
c_scores = self.to_dynamic_projection(x[:, self.nglo:]).transpose(1, 2).contiguous().view(
B, self.num_heads, self.dp_rank, -1)
c_scores = c_scores.softmax(dim=-1, dtype=torch.float32).to(x)
# b x h x r x d
k_lms = c_scores.matmul(k)
k_lms = k_lms.transpose(1, 2).contiguous().view(B, self.dp_rank, -1)
k_lms = self.dual_ln_dp(k_lms).view(B, self.dp_rank, self.num_heads, -1).contiguous().permute(0, 2, 3, 1)
# b x h x (lw) x r
dots_all = q.matmul(k_lms)
if self.window_size > 0:
# Switch the order of dimensions if using window attention.
dots_all = self.group_dots(dots_all)
else:
dots_all = None
# Short-term Attention (Window Attention)
# In our window attention, each token attends to at most (4w^2) tokens.
if self.window_size > 0:
dots_win = self.compute_window_scores(q, k)
w2 = int(self.window_size*self.window_size)
if self.rpe:
w_pad = int(0.5 * self.window_size)
local_relative_position_bias = self.local_relative_position_bias_table[
self.relative_position_index.view(-1)].view(1, w2, (w_pad*2 + self.window_size)**2, -1) # w^2, kv_nums,H
local_relative_position_bias = local_relative_position_bias.permute(
0, 3, 1, 2).expand(B, -1, -1, -1).unsqueeze(2).unsqueeze(2)
dots_win += local_relative_position_bias
if dots_all is None:
dots_all = dots_win
else:
dots_all = torch.cat([dots_all, dots_win], dim=-1)
# Global token.
if self.nglo > 0:
# and compute the scores of queries on CLS
dots_q_cls = q.matmul(k_cls.transpose(-1, -2))
if self.window_size > 0:
dots_q_cls = self.group_dots(dots_q_cls)
dots_all = torch.cat([dots_all, dots_q_cls], dim=-1)
attn = dots_all.softmax(dim=-1, dtype=torch.float32).to(x)
attn = self.attn_drop(attn)
out = 0
if self.window_size > 0:
offset = max(0, self.dp_rank)
kv_group_size = self.window_size
total_win_size = max(1, self.window_size // 2) * 2 + kv_group_size
attn_win = attn[:, :, :, :, :, offset:offset + total_win_size ** 2]
out += self.compute_window_pv(attn_win, v)
attn = self.ungroup_dots(attn)
# attn will be b x h x lw x n_k from now on
if self.dp_rank > 0:
attn_lm = attn[:, :, :, :self.dp_rank]
v_lms = c_scores.matmul(v.float()).to(v).transpose(1, 2).contiguous().view(B, self.dp_rank, -1)
v_lms = self.dual_ln_dp(v_lms).view(B, self.dp_rank, self.num_heads, -1).contiguous().transpose(1, 2)
out += attn_lm.matmul(v_lms)
if self.nglo > 0:
attn_cls = attn[:, :, :, -self.nglo:]
out += attn_cls.mul(v_cls)
# b x h x 1 x lw
cls_inner = q_cls.matmul(k_cls.transpose(-1, -2))
cls_dots = q_cls.matmul(out.transpose(-1, -2))
cls_dots = torch.cat([cls_inner, cls_dots], dim=-1)
cls_dots = cls_dots.softmax(dim=-1, dtype=torch.float32).to(x)
cls_next = cls_dots[:, :, :, self.nglo:].matmul(out) # the post_cls variant
cls_next += cls_dots[:, :, :, :self.nglo].matmul(v_cls)
out = torch.cat([cls_next, out], dim=2)
out = out.transpose(1, 2).contiguous().view(B, N, -1)
# x = (attn @ v).transpose(1, 2).reshape(B, N, C)
out = self.proj(out)
out = self.proj_drop(out)
return out
def compute_window_scores(self, q, k):
"""Compute the inner products for the window attention.
Frist, divide the query into non-overlapping windows.
Then, use torch.as_trided (implemented in self.get_overlapping_tiles) to create a view of the keys
that corresponds to the windows with at most 2x memory overhead.
Finally, compute the inner product.
"""
# q: b h (l w) d
b, h, _, d = q.shape
side_size = max(self.window_size//2, 1)
# q_group_size: segment size
kv_width = 2 * side_size + self.window_size # assuming q_stride=1
q_n_group = self.img_size // self.window_size
q_tiles = q.reshape(b, h, q_n_group, self.window_size, q_n_group, self.window_size, d).permute(
0, 1, 2, 4, 3, 5, 6)
# q_tiles: b x h x n_group x n_group x w^2 x d
q_tiles = q_tiles.contiguous().view(b, h, q_n_group, q_n_group, -1, d)
# k_tiles: b x h x n_group x n_group x 9w^2 x d
k_tiles = self.get_overlapping_tiles(k).contiguous().view(b, h, q_n_group, q_n_group, -1, d)
# dot_tiles: b x h x n_group x n_group x w^2 x 9w^2
dot_tiles = q_tiles.matmul(k_tiles.transpose(-1, -2))
# fill "-inf" into the zero-padding parts
dot_tiles = dot_tiles.view(b, h, q_n_group, q_n_group, -1, kv_width, kv_width)
dot_tiles[:, :, 0, :, :, :side_size].fill_(float('-inf'))
dot_tiles[:, :, -1, :, :, -side_size:].fill_(float('-inf'))
dot_tiles[:, :, :, 0, :, :, :side_size].fill_(float('-inf'))
dot_tiles[:, :, :, -1, :, :, -side_size:].fill_(float('-inf'))
dot_tiles = dot_tiles.view(b, h, q_n_group, q_n_group, -1, kv_width ** 2)
return dot_tiles
def get_overlapping_tiles(self, x):
"""Get overlapping tiles in the 2D spatial domain, ensuring each query computes correlation with all neighbors
"""
# x: b h (l w) d
b, h, _, d = x.shape
side_size = max(self.window_size // 2, 1)
total_size = 2 * side_size + self.window_size
kv_group_size = self.window_size
kv_width = self.img_size
x = x.view(b, h, kv_width, kv_width, d)
x = F.pad(x, [0, 0, side_size, side_size, side_size, side_size], value=0)
out_shape = [b, h, kv_width // kv_group_size, kv_width // kv_group_size,
total_size, total_size, d]
in_stride = x.stride()
out_stride = [in_stride[0], in_stride[1], in_stride[2] * kv_group_size, in_stride[3] * kv_group_size,
in_stride[2], in_stride[3], in_stride[4]]
# note we ignored the boundary here
return x.as_strided(size=out_shape, stride=out_stride)
def compute_window_pv(self, attn, v):
"""Compute the inner product of attention matrix and the values for the window attention.
"""
b, h, n_group, _, w2, n_k = attn.shape
d = v.shape[-1]
v_tiles = self.get_overlapping_tiles(v).contiguous().view(b, h, n_group, n_group, -1, d)
# b x h x n_group x n_group x w^2 x d
pv = attn.matmul(v_tiles)
# return: b x h x (lw) x d
ret = self.ungroup_dots(pv)
return ret
def group_dots(self, dots):
b, h = dots.shape[:2]
n_group = self.img_size // self.window_size
dots = dots.reshape(b, h, n_group, self.window_size, n_group, self.window_size,
-1).permute(0, 1, 2, 4, 3, 5, 6)
dots = dots.contiguous().view(b, h, n_group, n_group, self.window_size * self.window_size, -1)
return dots
def ungroup_dots(self, dots):
b, h, n_group, _, _, n_keys = dots.shape
dots = dots.reshape(b, h, n_group, n_group, self.window_size, self.window_size,
-1).permute(0, 1, 2, 4, 3, 5, 6)
dots = dots.contiguous().view(b, h, -1, n_keys)
return dots
| transformer-ls-master | imagenet/models/layers/transformer_ls.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
# Install required packages from requirements.txt file
requirements_relative_path = "/requirements.txt"
package_folder = os.path.dirname(os.path.realpath(__file__))
requirements_path = package_folder + requirements_relative_path
if os.path.isfile(requirements_path):
with open(requirements_path) as f:
install_requires = f.read().splitlines()
# Extract version number from VERSION file
release_version = "0.0.0"
if os.path.exists('VERSION'):
with open('VERSION') as version_file:
release_version = version_file.read().strip()
setuptools.setup(
name="nvidia-clara-cpost",
author="NVIDIA Clara Deploy",
version=release_version,
description="Python package to run Clara Pipeline Operator Sizing Tool (cpost)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitlab-master.nvidia.com/Clara/sdk/-/tree/main/Tools/cpost",
install_requires=install_requires,
packages=setuptools.find_packages('.'),
entry_points={
'console_scripts': [
'cpost = src.main:main'
]
},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
| clara-pipeline-operator-sizing-tool-main | setup.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import sys
from dataclasses import dataclass
from unittest.mock import MagicMock, patch
import pytest
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from clarac_utils import OperatorConfig # nopep8 # noqa: E402
from utils import (assert_installed, check_images_and_tags, convert_percent_to_cores, # nopep8 # noqa: E402
prompt_yes_or_no, round_up_to_multiple, subproc_run_wrapper, write_to_csv)
@pytest.mark.parametrize("data_in, base, data_out",
[(3, 2, 4),
(4, 2, 4),
(15.5, 5, 20),
(148.05, 256, 256),
(256.05, 256, 512)])
def test_round_up_to_multiple(data_in, base, data_out):
assert round_up_to_multiple(data_in, base) == data_out
@pytest.mark.parametrize("data_in, data_out",
[(100.05, 2),
(1343.5, 14),
(50.55, 1)])
def test_convert_percent_to_cores(data_in, data_out):
assert convert_percent_to_cores(data_in) == data_out
@pytest.mark.parametrize("program, exist", [("echo", True), ("clara", True), ("claraabc", False)])
def test_assert_installed(program, exist):
if program == "clara":
pytest.skip()
if exist:
assert assert_installed(program) is None
else:
with pytest.raises(SystemExit) as exc:
assert_installed(program)
assert exc.value.code == 1
@pytest.mark.parametrize("mocked_return, run_called_count", [
pytest.param(MagicMock(**{"stdout": b'tag1\n'}), 2, id="exists_locally"),
pytest.param(MagicMock(**{"stdout": b'', "returncode": 0}), 4, id="can_be_pulled"),
pytest.param(MagicMock(**{"stdout": b'', "returncode": 1, "stderr": b'error message'}), 2, id="pull_failed"),
])
@patch("utils.subproc_run")
def test_check_images_and_tags(mock_subproc_run, mocked_return, run_called_count):
mock_subproc_run.return_value = mocked_return
mock_service = [MagicMock(**{"image_n_tag": "tag1"})]
op1 = OperatorConfig("Input1", "tag1", None, None, [{"path": "/input"}], None, None, mock_service)
if mocked_return.returncode == 1:
with pytest.raises(SystemExit):
check_images_and_tags([op1])
else:
check_images_and_tags([op1])
assert mock_subproc_run.call_count == run_called_count
@patch("utils.TRITON_IMAGE_TAG", "triton-tag")
@pytest.mark.parametrize("mocked_return, expect_exit, run_called_count",
[
pytest.param([MagicMock(**{"stdout": b'triton-tag\n'})],
False, 2, id="exists_locally"),
pytest.param(
[MagicMock(**{"stdout": b''}),
MagicMock(**{"stdout": b'', "returncode": 0})],
False, 3, id="can_be_pulled"),
pytest.param(
[MagicMock(**{"stdout": b''}),
MagicMock(**{"stdout": b'', "returncode": 1, "stderr": b'error message'})],
True, 3, id="pull_failed"),
])
@patch("utils.subproc_run")
def test_check_images_and_tags_with_triton(mock_subproc_run, mocked_return, expect_exit, run_called_count):
mock_subproc_run.side_effect = [MagicMock(**{"stdout": b'tag1\n'})] + mocked_return
op1 = OperatorConfig("Input1", "tag1", None, None, [{"path": "/input"}], None, ["model1"])
if expect_exit:
with pytest.raises(SystemExit):
check_images_and_tags([op1])
else:
check_images_and_tags([op1])
assert mock_subproc_run.call_count == run_called_count
@pytest.mark.parametrize("mocked_return", [
pytest.param(MagicMock(**{"stdout": b'container_id\n', "returncode": 0}), id="all_good"),
pytest.param(MagicMock(**{"stderr": b'error message', "returncode": 1}), id="error")
])
@patch("utils.subproc_run")
def test_subproc_run_wrapper(mock_subproc_run, mocked_return):
mock_subproc_run.return_value = mocked_return
if mocked_return.returncode == 1:
with pytest.raises(SystemExit):
subproc_run_wrapper(["some", "cmd"])
else:
result = subproc_run_wrapper(["some", "cmd"])
assert result == "container_id"
@pytest.mark.parametrize("choice, expected_result", [
("y", True),
("Y", True),
("yes", True),
("YES", True),
("yup", True),
("n", False),
("N", False),
("no", False),
("NO", False),
("nope", False),
("j\nx\nyeeee", True),
("exxxy\nadsfa\nnaaah", False),
("\nx\ny", True)
])
def test_prompt_yes_or_no(choice, expected_result):
sys.stdin = io.StringIO(choice)
assert prompt_yes_or_no("Please give your response") == expected_result
def test_write_to_csv(tmp_path):
@ dataclass
class MockMetric:
field1: str
field2: int
mock_q = MagicMock()
mock_q.get.side_effect = [None, MockMetric("abc", 12), MockMetric("fdvc", 15), 0]
output_dir = tmp_path / "sub_dir" / "test_write_to_csv"
field_names = ["field1", "field2"]
write_to_csv(mock_q, field_names, output_dir)
assert output_dir.read_text() == "field1,field2\nabc,12\nfdvc,15\n"
| clara-pipeline-operator-sizing-tool-main | tests/test_utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from container import Container, Metrics, RawMetrics # nopep8 # noqa: E402
def is_empty(any_structure):
"""Helper method to check if structure is empty."""
if any_structure:
return False
else:
return True
TEMP_DIR = Path(tempfile.gettempdir())
TEST_SYS_FS = TEMP_DIR / "test_sys_fs"
@patch("container.SYSFS_PATH", TEST_SYS_FS)
class TestContainer:
def test_init_container(self):
container = Container()
assert isinstance(container, Container)
assert is_empty(container.id)
assert is_empty(container.raw_metrics)
assert is_empty(container.metric_paths)
def test_create_metrics_path_no_id(self):
container = Container()
with pytest.raises(RuntimeError):
container.construct_metrics_path()
def test_create_metrics_path_with_id(self):
container = Container()
container.id = "testID1"
container.construct_metrics_path()
cpu_path = TEST_SYS_FS / "cpuacct" / "docker" / container.id / "cpuacct.usage"
per_cpu_path = TEST_SYS_FS / "cpuacct" / "docker" / container.id / "cpuacct.usage_percpu"
mem_path = TEST_SYS_FS / "memory" / "docker" / container.id / "memory.usage_in_bytes"
assert container.metric_paths == (cpu_path, per_cpu_path, mem_path)
def test_metrics_path_exists(self, tmp_path):
container = Container()
p1, p2, p3 = tmp_path / "p1", tmp_path / "p2", tmp_path / "p3"
container.metric_paths = (p1, p2, p3)
assert not container.metrics_path_exists()
p1.touch()
assert not container.metrics_path_exists()
p2.touch()
assert not container.metrics_path_exists()
p3.touch()
assert container.metrics_path_exists()
@patch("container.psutil.cpu_times")
def test_read_raw_metrics(self, mock_cpu, tmp_path):
mock_cpu_data = [10, 20, 10, 20, 10, 20, 10, 20]
mock_cpu.return_value = mock_cpu_data
container = Container()
p1, p2, p3 = tmp_path / "p1", tmp_path / "p2", tmp_path / "p3"
content1, content2, content3 = b'123', b'456', b'789'
p1.write_bytes(content1)
p2.write_bytes(content2)
p3.write_bytes(content3)
container.metric_paths = (p1, p2, p3)
raw_metrics = container._read_raw_metrics()
assert isinstance(raw_metrics, RawMetrics)
assert isinstance(raw_metrics.timestamp, float)
assert raw_metrics.cpu == float(content1)
assert raw_metrics.per_cpu == content2
assert raw_metrics.sys_cpu == sum(mock_cpu_data[:7])
assert raw_metrics.memory == float(content3)
def test_sample_metrics_no_path(self):
container = Container()
with pytest.raises(RuntimeError):
container.sample_metrics()
@patch("container.Container._read_raw_metrics")
@patch("container.Container._process_raw_data")
def test_sample_metrics(self, mock_process_data, mock_read_metrics):
container = Container()
container.metric_paths = (1, 2, 3)
mock_read_metrics.side_effect = [1, 2, 3]
def sum_two(prev, cur):
return (prev + cur)
mock_process_data.side_effect = sum_two
container.sample_metrics()
assert container.raw_metrics == [1]
assert container.metrics == []
container.sample_metrics()
assert container.raw_metrics == [1, 2]
assert container.metrics == [3]
container.sample_metrics()
assert container.raw_metrics == [1, 2, 3]
assert container.metrics == [3, 5]
@patch("container.ONLINE_CPUS", 4)
def test_process_raw_data(self):
container = Container()
raw_data = [
RawMetrics(
timestamp=2.0, cpu=800000.0, per_cpu=b'300000 0 0 500000 \n', sys_cpu=14000000.00,
memory=6500000),
RawMetrics(
timestamp=3.0, cpu=1000000.0, per_cpu=b'500000 0 0 500000 \n', sys_cpu=14000000.60,
memory=8500000)]
post_data = container._process_raw_data(raw_data[0], raw_data[1])
cpu_delta = (raw_data[1].cpu - raw_data[0].cpu) / 1e9
sys_delta = raw_data[1].sys_cpu - raw_data[0].sys_cpu
assert post_data == Metrics(
timestamp=2.5,
cpu_percent=(cpu_delta/sys_delta)*4*100,
memory=7.50,
)
| clara-pipeline-operator-sizing-tool-main | tests/test_container.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import time
from multiprocessing import Manager, Queue
from random import uniform as rand_float
from unittest.mock import MagicMock, call, patch
import pytest
from src.cli import ContinueOptions
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from clarac_utils import OperatorConfig, ServiceConfig # nopep8 # noqa: E402
from container import Metrics # nopep8 # noqa: E402
from pipeline_utils import (_enqueue_output, build_operator_cmd, clean_up_containers, # nopep8 # noqa: E402
get_output_writers, print_operator_summary, print_pipeline_summary, run_pipeline,
run_pipeline_alone, run_pipeline_with_services, sample_operator, start_operator,
start_pipeline_services)
from triton_utils import RUN_MODE # nopep8 # noqa: E402
def test_enqueue_output(tmp_path):
file_path = tmp_path / "test_enqueue"
data = b"1255\n1233\n"
file_path.write_bytes(data)
q = Queue()
opened_file = file_path.open("rb")
_enqueue_output(opened_file, q)
assert q.get(timeout=1) == b"1255\n"
assert q.get(timeout=1) == b"1233\n"
@patch("pipeline_utils.Popen")
def test_start_operator(mock_popen):
raw_container_id = b'8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5\n'
actual_container_id = raw_container_id.decode('utf-8').strip()
mock_popen.return_value = MagicMock(**{"returncode": 0,
"poll.return_value": None,
"stdout.readline.return_value": raw_container_id,
"stdout.close.return_value": None})
manager = Manager()
expected_container_id = manager.Value('c_wchar_p', '')
mock_event = MagicMock()
cmd = ["some", "docker", "run", "command"]
start_operator(expected_container_id, mock_event, cmd)
assert actual_container_id == expected_container_id.value
mock_event.set.assert_called_once()
@patch("pipeline_utils.Process")
@patch("pipeline_utils.Popen")
def test_start_operator_popen_error(mock_popen, mock_multi_process):
mock_exit_msg = "exiting because of error"
mock_popen.return_value = MagicMock(**{"returncode": 1, "stderr.read.return_value": mock_exit_msg.encode("UTF-8")})
manager = Manager()
expected_container_id = manager.Value('c_wchar_p', '')
mock_event = MagicMock()
cmd = ['some', 'docker', 'run', 'command']
with pytest.raises(SystemExit) as exc:
start_operator(expected_container_id, mock_event, cmd)
assert mock_exit_msg in str(exc.value)
mock_event.set.assert_not_called()
@pytest.mark.parametrize("mock_exitcode, expected_code", [(b'0\n', None), (b'125\n', 125), (b'error\n', 1)])
@patch("pipeline_utils.Queue")
@patch("pipeline_utils.subproc_run")
@patch("pipeline_utils.Process")
@patch("pipeline_utils.Popen")
def test_start_operator_docker_error(
mock_popen, mock_multi_process, mock_subproc_run, mock_q, mock_exitcode, expected_code):
mock_popen.return_value = MagicMock(**{"returncode": None, "poll.return_value": None})
mock_q.return_value = MagicMock(
**{"get_nowait.return_value": b'8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5\n'})
mock_subproc_run.return_value = MagicMock(**{"returncode": None, "stdout": mock_exitcode})
manager = Manager()
expected_container_id = manager.Value('c_wchar_p', '')
mock_event = MagicMock()
cmd = ['some', 'docker', 'run', 'command']
if mock_exitcode == b'0\n':
start_operator(expected_container_id, mock_event, cmd)
else:
with pytest.raises(SystemExit) as exc:
start_operator(expected_container_id, mock_event, cmd)
assert exc.value.code == expected_code
mock_event.set.assert_called_once()
def test_sample_operator_logic():
mock_q = MagicMock()
mock_container = MagicMock()
mock_container.metrics_path_exists.side_effect = [0, 1, 1, 0]
mock_container.sample_metrics.return_value = None
sample_operator(mock_container, mock_q)
assert mock_container.method_calls == [
call.metrics_path_exists(),
call.metrics_path_exists(),
call.metrics_path_exists(),
call.sample_metrics(),
call.metrics_path_exists(),
]
assert mock_q.put.call_count == 2
assert mock_q.put.call_args_list == [call(None), call(0)]
@pytest.mark.parametrize("sampling_time,expected", [(rand_float(0.0001, 0.19), [0.2]), (0.3, [0.3])])
def test_sample_operator_sampling_rate(sampling_time, expected):
mock_q = MagicMock()
mock_container = MagicMock()
sampling_num = 10
mock_container.metrics_path_exists.side_effect = [0, 1] + [1] * sampling_num + [0]
result_timestamps = []
def mock_sample():
"""Mock sampling function that appends a timestamp to a list."""
timestamp = time.perf_counter()
time.sleep(sampling_time)
result_timestamps.append(timestamp)
mock_container.sample_metrics = mock_sample
sample_operator(mock_container, mock_q)
assert len(result_timestamps) == sampling_num, "The number of samples does not match with expected."
result_diffs = [round(j - i, 1) for i, j in zip(result_timestamps[:-1], result_timestamps[1:])]
assert result_diffs == expected * (sampling_num - 1), "Something is wrong with the accuracy of time.sleep()"
# autopep8: off
@pytest.mark.parametrize(
"op_config, expected_args",
[
pytest.param(
OperatorConfig("op_name", "image:tag", None, {"VAR0": 2, "VAR1": "hi"}, [{"path": "/input"}], [{"path": "/output"}]),
["--env", "VAR0=2", "--env", "VAR1=hi", "-v", "%tmp%/app_data:/input", "-v", "%tmp%/op_name:/output", "image:tag"], id="with_ENV_VAR"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, None, None),
["image:tag"], id="no_input_output"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, [{"path": "/input"}], [{"path": "/output"}]),
["-v", "%tmp%/app_data:/input", "-v", "%tmp%/op_name:/output", "image:tag"], id="min_input_output"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, [{"from": "liver", "path": "/input"}], None),
["-v", "%tmp%/liver:/input", "image:tag"], id="input_contains_from"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, [{"from": "liver", "name": "classification", "path": "/input"}, {"path": "/dcm"}], None),
["-v", "%tmp%/liver/classification:/input", "-v", "%tmp%/app_data:/dcm", "image:tag"], id="double_inputs"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, [{"path": "/input"}], [{"name": "logs", "path": "/output"}]),
["-v", "%tmp%/app_data:/input", "-v", "%tmp%/op_name/logs:/output", "image:tag"], id="named_output"
),
pytest.param(
OperatorConfig("op_name", "image:tag", ["some", "command"], None, [{"path": "/input"}], [{"path": "/output"}]),
["-v", "%tmp%/app_data:/input", "-v", "%tmp%/op_name:/output", "image:tag", "some", "command"], id="image_with_command"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, None, None, ["model1"]),
["--env", "NVIDIA_TRITON_HTTPURI=localhost:8000", "--env", "CLARA_TRITON_URI=localhost:8000", "--env", "NVIDIA_CLARA_TRTISURI=localhost:8000", "--env", "NVIDIA_TRITON_GRPCURI=localhost:8001", "image:tag"], id="model_repo"
),
pytest.param(
OperatorConfig("op_name", "image:tag", None, None, None, None, None, [ServiceConfig("name", "it", None, None)]),
["image:tag"], id="pipeline_services"
),
pytest.param(
OperatorConfig("op_name", "image:tag", ["some", "command"], {"VAR0": 2},
[{"from": "liver", "name": "classification", "path": "/input"}, {"path": "/dcm"}],
[{"name": "dicom", "path": "/output"}, {"name": "logs", "path": "/logs"}]),
["--env", "VAR0=2", "-v", "%tmp%/liver/classification:/input", "-v", "%tmp%/app_data:/dcm",
"-v", "%tmp%/op_name/dicom:/output", "-v", "%tmp%/op_name/logs:/logs", "image:tag", "some", "command"],
id="all_in_one"
),
],
)
# autopep8: on
def test_build_operator_cmd(tmp_path, op_config, expected_args):
input_path = tmp_path / "app_data"
def swap_tmp(temp_dir, args):
return [re.sub(r'%tmp%', temp_dir, i) for i in args]
expected_args = swap_tmp(str(tmp_path), expected_args)
config = op_config
result_cmd = build_operator_cmd(input_path, tmp_path, config, "localhost")
assert (tmp_path / "op_name").is_dir()
assert result_cmd == ["docker", "run", "-d", "--rm", "--env", "NVIDIA_CLARA_NOSYNCLOCK=1"] + expected_args
def test_print_operator_summary(caplog):
metrics = [Metrics(1.5, 10, 20), Metrics(1.5, 20, 20), Metrics(1.5, 30, 25)]
with caplog.at_level(logging.INFO):
print_operator_summary(metrics, "opeartor_name")
# [1] only gets the table section
messages = [rec.getMessage() for rec in caplog.records][1]
messages = messages.split("\n")
cpu_line = messages[3]
mem_line = messages[4]
assert "CPU" in cpu_line
assert "20" in cpu_line
assert "30" in cpu_line
assert "Memory" in mem_line
assert "21.6" in mem_line
assert "25" in mem_line
@pytest.mark.parametrize("run_mode", [RUN_MODE.NO_INFERENCE_SERVER, RUN_MODE.MODEL_REPO, RUN_MODE.PIPELINE_SERVICES])
@patch("pipeline_utils.run_pipeline_with_services")
@patch("pipeline_utils.run_pipeline_alone")
@patch("pipeline_utils.run_triton_model_repo")
@patch("pipeline_utils.decide_method_to_run_triton")
def test_run_pipeline(mock_decide, mock_run_triton, mock_run_alone, mock_run_services, run_mode):
mock_decide.return_value = run_mode
mock_run_triton.return_value.__enter__.return_value = MagicMock()
run_pipeline([], None, None, None, ContinueOptions.NONE)
if run_mode == RUN_MODE.NO_INFERENCE_SERVER:
mock_run_triton.assert_not_called()
mock_run_alone.assert_called_once()
mock_run_services.assert_not_called()
elif run_mode == RUN_MODE.MODEL_REPO:
mock_run_triton.assert_called_once()
mock_run_alone.assert_called_once()
mock_run_services.assert_not_called()
elif run_mode == RUN_MODE.PIPELINE_SERVICES:
mock_run_triton.assert_not_called()
mock_run_alone.assert_not_called()
mock_run_services.assert_called_once()
def test_get_output_writers(tmp_path):
mock_writer = MagicMock(**{"join.return_value": None})
with get_output_writers(tmp_path) as writers:
assert writers == []
writers.append(mock_writer)
assert mock_writer.join.call_count == 1
def test_get_no_output_writers():
with get_output_writers(None) as writers:
assert writers is None
@patch("pipeline_utils.build_operator_cmd")
@patch("pipeline_utils.run_operator")
@patch("pipeline_utils.TemporaryDirectory")
def test_run_pipeline_alone(mock_temp_file, mock_run_operator, mock_build_cmd, tmp_path):
mock_temp_file.return_value.__enter__.return_value = "tmp_file_name"
mock_run_operator.side_effect = [None, True, None]
m1, m2, m3 = MagicMock(**{"name": "1"}), MagicMock(**{"name": "2"}), MagicMock(**{"name": "3"})
execution_order = [m1, m2, m3]
run_pipeline_alone(execution_order, tmp_path, None, ContinueOptions.NONE, None)
assert len(mock_run_operator.call_args_list) == 2
assert m1 in mock_run_operator.call_args_list[0].args
assert m2 in mock_run_operator.call_args_list[1].args
@patch("pipeline_utils.subproc_run_wrapper")
def test_clean_up_containers(mock_subproc_run_wrapper):
running_containers = {"image1": ("ID1", "ip_address")}
clean_up_containers(running_containers)
assert mock_subproc_run_wrapper.call_args.args[0] == ["docker", "kill", "ID1"]
assert running_containers == {}
@patch("pipeline_utils.start_triton")
@patch("pipeline_utils.clean_up_containers")
def test_start_pipeline_services(mock_clean_up_containers, mock_start_triton):
container_info = ("container_id_123", "ip_address")
mock_start_triton.return_value = container_info
service_config_1 = ServiceConfig("trtis", "image_tag", ["some", "cmd"], {"VAR": "port_num"})
op_config_1 = OperatorConfig("name", None, None, None, None, None, None, [service_config_1])
services_dict = {}
start_pipeline_services(op_config_1, services_dict, "some-dir")
assert services_dict["image_tag some cmd"] == container_info
assert op_config_1.variables == {"VAR": "ip_address:port_num"}
assert mock_start_triton.call_count == 1
# Same service -> no new services created
start_pipeline_services(op_config_1, services_dict, "some-dir")
assert services_dict["image_tag some cmd"] == container_info
assert op_config_1.variables == {"VAR": "ip_address:port_num"}
assert mock_start_triton.call_count == 1
# Different service -> new service created
service_config_2 = ServiceConfig("trtis", "image_tag2", ["some", "cmd"], {"VAR": "port_num2"})
op_config_2 = OperatorConfig("name", None, None, None, None, None, None, [service_config_2])
start_pipeline_services(op_config_2, services_dict, "some-dir")
mock_clean_up_containers.assert_called_once()
assert services_dict["image_tag2 some cmd"] == container_info
assert op_config_2.variables == {"VAR": "ip_address:port_num2"}
assert mock_start_triton.call_count == 2
@patch("pipeline_utils.start_triton")
@patch("pipeline_utils.clean_up_containers")
def test_start_service_not_supported(mock_clean_up_containers, mock_start_triton, caplog):
service_config_1 = ServiceConfig("other service", "image_tag", ["some", "cmd"], {"VAR": "value"})
op_config_1 = OperatorConfig("name", None, None, None, None, None, None, [service_config_1])
services_dict = {}
with caplog.at_level(logging.WARNING):
start_pipeline_services(op_config_1, services_dict, "some-dir")
messages = [rec.getMessage() for rec in caplog.records]
mock_clean_up_containers.assert_not_called()
mock_start_triton.assert_not_called()
assert "does not support" in messages[0]
assert "Skipping `other service`" in messages[1]
@patch("pipeline_utils.clean_up_containers")
@patch("pipeline_utils.build_operator_cmd")
@patch("pipeline_utils.start_pipeline_services")
@patch("pipeline_utils.run_operator")
@patch("pipeline_utils.TemporaryDirectory")
def test_run_pipeline_with_services(
mock_temp_file, mock_run_operator, mock_start_pipeline_services, mock_build_cmd, mock_clean_up_containers,
tmp_path):
def mock_add_dict(op, services_dict, *args):
services_dict["name"] = "cont_id"
mock_start_pipeline_services.side_effect = mock_add_dict
mock_temp_file.return_value.__enter__.return_value = "tmp_file_name"
mock_run_operator.side_effect = [None, True, None]
mock_config1 = MagicMock(**{"services": True})
mock_config2 = MagicMock(**{"services": False})
execution_order = [mock_config1, mock_config2, mock_config2]
run_pipeline_with_services(execution_order, tmp_path, None, tmp_path, ContinueOptions.NONE)
assert len(mock_run_operator.call_args_list) == 2
mock_start_pipeline_services.assert_called_once()
assert mock_build_cmd.call_count == 2
mock_clean_up_containers.assert_called_once()
@patch("pipeline_utils.tabulate")
def test_print_pipeline_summary(mock_tabulate):
raw_data = {
'dicom-reader':
[['CPU', '130.407 %', '732.975 %', 'cpu: 8'],
['Memory', '109.309 MB', '431.407 MB', 'memory: 512']],
'spleen-segmentation':
[['CPU', '126.747 %', '1144.132 %', 'cpu: 12'],
['Memory', '1403.712 MB', '4339.55 MB', 'memory: 8192']],
'dicom-writer':
[['CPU', '168.027 %', '676.498 %', 'cpu: 7'],
['Memory', '481.506 MB', '866.976 MB', 'memory: 1024']],
'register-dicom-output\n(Non-zero exitcode)':
[['CPU', '14.524 %', '18.102 %', 'cpu: 1'],
['Memory', '2.074 MB', '2.589 MB', 'memory: 4']]}
print_pipeline_summary(raw_data)
# This format is desired to keep the display result from tabulate clean
assert mock_tabulate.call_args.args[0] == [
['dicom-reader', 'CPU\nMemory', '130.407 %\n109.309 MB', '732.975 %\n431.407 MB', 'cpu: 8\nmemory: 512'],
['spleen-segmentation', 'CPU\nMemory', '126.747 %\n1403.712 MB', '1144.132 %\n4339.55 MB', 'cpu: 12\nmemory: 8192'],
['dicom-writer', 'CPU\nMemory', '168.027 %\n481.506 MB', '676.498 %\n866.976 MB', 'cpu: 7\nmemory: 1024'],
['register-dicom-output\n(Non-zero exitcode)', 'CPU\nMemory', '14.524 %\n2.074 MB', '18.102 %\n2.589 MB',
'cpu: 1\nmemory: 4']]
| clara-pipeline-operator-sizing-tool-main | tests/test_pipeline_utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pytest
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from clarac_utils import OperatorConfig # nopep8 # noqa: E402
from topology_sort import PipelineDAG, topo_sort_pipeline # nopep8 # noqa: E402
def test_topo_sort():
g = PipelineDAG()
g.add_input_edge(2, 5)
g.add_input_edge(0, 5)
g.add_input_edge(0, 4)
g.add_input_edge(1, 4)
g.add_input_edge(3, 2)
g.add_input_edge(1, 3)
assert g.topological_sort() == [5, 4, 2, 0, 3, 1]
def test_topo_sort_2():
g = PipelineDAG()
g.add_input_edge(2, 1)
g.add_input_edge(3, 2)
g.add_input_edge(4, 3)
assert g.topological_sort() == [1, 2, 3, 4]
def test_topo_sort_error():
g = PipelineDAG()
g.add_input_edge(2, 1)
g.add_input_edge(3, 2)
g.add_input_edge(1, 3)
with pytest.raises(RuntimeError):
g.topological_sort()
def test_a_pipeline():
op1 = OperatorConfig("Input1", "tag", None, None, [{"path": "/input"}], None)
op2 = OperatorConfig("Input2", "tag", None, None, [{"from": "Input1", "path": "/input"}], None)
op3 = OperatorConfig("Input3", "tag", None, None, [{"from": "Input2", "path": "/input"}], None)
sequence = topo_sort_pipeline([op2, op3, op1])
assert sequence == [op1, op2, op3]
def test_a_single_operator_pipeline():
op1 = OperatorConfig("Input1", "tag", None, None, [{"path": "/input"}], None)
sequence = topo_sort_pipeline([op1])
assert sequence == [op1]
def test_twp_operator_pipeline():
op1 = OperatorConfig("Input1", "tag", None, None, [{"path": "/input"}], None)
op2 = OperatorConfig("Input2", "tag", None, None, [{"from": "Input1", "path": "/input"}], None)
sequence = topo_sort_pipeline([op2, op1])
assert sequence == [op1, op2]
def test_complex_pipeline():
op1 = OperatorConfig("Input1", "tag", None, None, [{"path": "/input"}], None)
op2 = OperatorConfig("Input2", "tag", None, None, [{"path": "/input"}], None)
op3 = OperatorConfig("Input3", "tag", None, None,
[{"from": "Input1", "path": "/input"},
{"from": "Input2", "path": "/input"}],
None)
op4 = OperatorConfig("Input4", "tag", None, None, [{"from": "Input2", "path": "/input"}], None)
op5 = OperatorConfig("Input5", "tag", None, None,
[{"from": "Input3", "path": "/input"},
{"from": "Input4", "path": "/input"}],
None)
sequence = topo_sort_pipeline([op3, op4, op1, op2, op5])
assert sequence == [op1, op2, op3, op4, op5]
| clara-pipeline-operator-sizing-tool-main | tests/test_topology_sort.py |
clara-pipeline-operator-sizing-tool-main | tests/__init__.py |
|
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
from dataclasses import dataclass
from typing import List
from unittest.mock import MagicMock, patch
import pytest
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from triton_utils import (RUN_MODE, _extract_models_from_configs, check_models_directory, # nopep8 # noqa: E402
check_triton_status, decide_method_to_run_triton, inspect_ip_address, run_triton_model_repo,
start_triton)
@pytest.fixture(scope="function")
def create_triton_models_dir(tmp_path):
"""Custom Pytest fixture to mock triton models directory.
Args:
model_names: List of str representing triton model names.
Returns:
None
"""
def _func(model_names):
# Create the folders needed and some extra models in that directory
for dir_name in model_names:
config_file = tmp_path / "models" / dir_name / "config.pbtxt"
config_file.parent.mkdir(parents=True, exist_ok=True)
file_content = f'name: "{dir_name}"\n'
config_file.write_text(file_content)
yield _func
def test_fixture_create_models_dir(tmp_path, create_triton_models_dir):
names = ["liver", "heart"]
create_triton_models_dir(names)
assert sorted(os.listdir(str(tmp_path / "models"))) == sorted(names)
@dataclass
class MockConfig:
models: List[str] = None
@pytest.mark.parametrize("configs, expected", [
([MockConfig(), MockConfig(["m1", "m2"]), MockConfig(["m3"]), MockConfig(), MockConfig(["m4", "m5", "m6"])],
["m1", "m2", "m3", "m4", "m5", "m6"]),
([MockConfig(), MockConfig()], []),
([MockConfig(["m1", "m2"]), MockConfig(["m1"])], ["m1", "m2"])
])
def test_extract_models_from_configs(configs, expected):
result = _extract_models_from_configs(configs)
assert sorted(result) == expected
@patch("triton_utils._extract_models_from_configs")
def test_check_model_repository_no_models_needed(mock_models, tmp_path):
mock_models.return_value = []
mock_configs = MagicMock()
result = check_models_directory(mock_configs, tmp_path)
assert result == []
@patch("triton_utils._extract_models_from_configs")
def test_check_model_repository_no_model_dir(mock_models):
mock_models.return_value = ["liver", "spleen", "heart"]
mock_configs = MagicMock()
with pytest.raises(SystemExit):
check_models_directory(mock_configs, None)
@pytest.mark.parametrize("mock_models, dir_name, file_content", [
pytest.param(["liver"], "liver", 'name: "segmentation_liver_v1"\n', id="content_not_match"),
pytest.param(["liver"], "liver_seg", 'name: "liver"\n', id="dir_name_not_match"),
pytest.param(["liver", "heart"], "liver", 'name: "liver"\n', id="missing_model")
])
@patch("triton_utils._extract_models_from_configs")
def test_check_model_repository_bad_input(mock_func, mock_models, dir_name, file_content, tmp_path):
mock_func.return_value = mock_models
mock_configs = MagicMock()
config_file = tmp_path / "models" / dir_name / "config.pbtxt"
config_file.parent.mkdir(parents=True)
config_file.write_text(file_content)
with pytest.raises(SystemExit):
check_models_directory(mock_configs, config_file.parents[1])
@pytest.mark.parametrize("mock_models", [
pytest.param(["liver"], id="one_model"),
pytest.param(["liver", "spleen", "heart"], id="three_models"),
])
@patch("triton_utils._extract_models_from_configs")
def test_check_model_repository_good_input(mock_func, mock_models, tmp_path, create_triton_models_dir):
mock_func.return_value = mock_models
mock_configs = MagicMock()
create_triton_models_dir(mock_models + ["eyes", "lung"])
result = check_models_directory(mock_configs, tmp_path / "models")
assert sorted(result) == sorted(mock_models)
@pytest.mark.parametrize("mock_configs, exp_mode", [
pytest.param([MagicMock(**{"models": True, "services": None})], RUN_MODE.MODEL_REPO, id="model_repo"),
pytest.param([MagicMock(**{"models": None, "services": True})], RUN_MODE.PIPELINE_SERVICES, id="services"),
pytest.param([MagicMock(**{"models": None, "services": None})], RUN_MODE.NO_INFERENCE_SERVER, id="neither"),
])
def test_decide_method_to_run_triton(mock_configs, exp_mode):
assert decide_method_to_run_triton(mock_configs) == exp_mode
def test_decide_method_to_run_triton_error():
mock_configs = [MagicMock(**{"models": True, "services": True})]
with pytest.raises(SystemExit):
decide_method_to_run_triton(mock_configs)
@pytest.mark.parametrize(
"model_names, mock_reponses",
[
pytest.param(
[],
[MagicMock(**{"status_code": 200})],
id="no_model_names"),
pytest.param(
["model1"],
[MagicMock(**{"status_code": 200, "text": None}), MagicMock(**{"status_code": 200, "text": None})],
id="1_model_name"),
]
)
@patch("triton_utils.TRITON_WAIT_SLEEP_TIME_SECONDS", 0)
@patch("triton_utils.TRITON_WAIT_TIME_SECONDS", 0)
@patch("triton_utils.requests")
def test_check_triton_status_200(mock_requests, model_names, mock_reponses):
mock_requests.configure_mock(**{"ConnectionError": ValueError})
mock_requests.get.side_effect = mock_reponses
check_triton_status(triton_models_names=model_names, host="some_host", port="1234")
assert f"http://some_host:1234" in mock_requests.get.call_args.args[0]
@pytest.mark.parametrize(
"model_names, mock_reponses, exp_msg",
[
pytest.param(
[],
[MagicMock(**{"status_code": 400, "text": "some msg"})],
"Triton is not working", id="no_model_names"),
pytest.param(
["model1"],
[MagicMock(**{"status_code": 200, "text": None}), MagicMock(**{"status_code": 400, "text": "some msg"})],
"Error:", id="1_model_name"),
]
)
@patch("triton_utils.TRITON_WAIT_SLEEP_TIME_SECONDS", 0)
@patch("triton_utils.TRITON_WAIT_TIME_SECONDS", 0)
@patch("triton_utils.requests")
def test_check_triton_status_error(mock_requests, model_names, mock_reponses, exp_msg):
mock_requests.configure_mock(**{"ConnectionError": ValueError})
mock_requests.get.side_effect = mock_reponses
with pytest.raises(SystemExit) as exc:
check_triton_status(triton_models_names=model_names)
assert exp_msg in str(exc.value)
@patch("triton_utils.subproc_run_wrapper")
def test_inspect_ip_address(mock_subproc_run_wrapper):
mock_subproc_run_wrapper.return_value = "'125.12.199.0'"
result = inspect_ip_address("container_name")
assert result == "125.12.199.0"
@pytest.mark.parametrize("model_names", [["spleen", "arm", "legs"], []])
@patch("triton_utils.check_triton_status")
@patch("triton_utils.inspect_ip_address")
@patch("triton_utils.subproc_run_wrapper")
def test_start_triton(mock_subproc_run_wrapper, mock_inspect, mock_check_triton_status, model_names):
mock_subproc_run_wrapper.return_value = "container_id"
mock_inspect.return_value = "ip_address"
result = start_triton("models", ["some", "command"], triton_models_names=model_names)
assert result == ("container_id", "ip_address")
# Check that all the models used are listed in the call_args for Popen
if model_names != []:
for name in model_names:
assert f"--load-model={name}" in mock_subproc_run_wrapper.call_args_list[0].args[0]
@patch("triton_utils.subproc_run_wrapper")
@patch("triton_utils.check_models_directory")
@patch("triton_utils.start_triton")
def test_run_triton_model_repo(mock_start_triton, mock_check_dir, mock_subproc_run_wrapper):
triton_models_names = ["spleen", "arm", "legs"]
mock_check_dir.return_value = triton_models_names
process_mock = MagicMock()
process_mock.configure_mock(**{"returncode": None, "terminate.return_value": None})
mock_start_triton.return_value = ("container_id", "ip_address")
with run_triton_model_repo([], "some_dir"):
pass
mock_subproc_run_wrapper.assert_called_once()
assert "container_id" in mock_subproc_run_wrapper.call_args_list[0].args[0]
| clara-pipeline-operator-sizing-tool-main | tests/test_triton_utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from argparse import ArgumentTypeError
import pytest
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from cli import ContinueOptions, parse_args # nopep8 # noqa: E402
@pytest.fixture(scope="function")
def file_maker(tmp_path):
"""A function scoped pytest fixture to return the path of a temporary file."""
file_path = tmp_path / "pipeline_defn"
file_path.touch()
return str(file_path)
def swap_pattern(pattern, substitute, args):
"""Helper method to substitute a pattern in args for cleaner tests."""
return [re.sub(pattern, substitute, i) for i in args]
def test_swap_pattern():
args = ["%tmp_file%", "some_input_dir", "%tmp%", "hello", "%tmp%"]
result = swap_pattern("%tmp%", "abc", args)
assert result == ["%tmp_file%", "some_input_dir", "abc", "hello", "abc"]
@pytest.mark.parametrize("input_args", [["%tmp_file%"], [], ["-x"], ["-v"]])
def test_missing_required_args(input_args, file_maker, capsys):
input_args = swap_pattern(r'%tmp_file%', file_maker, input_args)
with pytest.raises(SystemExit) as pytest_wrapped_e:
parse_args(input_args)
out, err = capsys.readouterr()
assert "" == out
assert "error: the following arguments are required" in err
assert "usage: cpost" in err
assert pytest_wrapped_e.value.code == 2
@pytest.mark.parametrize("input_args, error",
[
(["some_pipeline_path", "some_input_dir"], ArgumentTypeError),
(["/tmp", "/tmp"], ArgumentTypeError),
(["%tmp_file%", "some_input_dir"], ArgumentTypeError),
(["%tmp_file%", "%tmp_file%"], ArgumentTypeError),
(["%tmp_file%", "/tmp", "--metrics_dir", "some_dir"], ArgumentTypeError),
(["%tmp_file%", "/tmp", "--metrics_dir", "%tmp_file%"], ArgumentTypeError),
(["%tmp_file%", "/tmp", "--models_dir", "some_dir"], ArgumentTypeError),
(["%tmp_file%", "/tmp", "--models_dir", "%tmp_file%"], ArgumentTypeError)
])
def test_invalid_path(input_args, error, file_maker):
input_args = swap_pattern(r'%tmp_file%', file_maker, input_args)
with pytest.raises(SystemExit) as pytest_wrapped_e:
with pytest.raises(error) as excinfo:
parse_args(input_args)
assert "No such" in str(excinfo.value)
assert pytest_wrapped_e.value.code == 2
@pytest.mark.parametrize("optional_dir_specified", [True, False])
def test_valid_path(optional_dir_specified, tmp_path, file_maker):
input_dir = tmp_path / test_valid_path.__name__
input_dir.mkdir()
pipeline = file_maker
if not optional_dir_specified:
input_args = [pipeline, str(input_dir)]
parsed = parse_args(input_args)
assert parsed.input_dir == input_dir
assert str(parsed.pipeline_path) == pipeline
assert parsed.metrics_dir is None
assert parsed.models_dir is None
assert parsed.force == ContinueOptions.NONE
else:
metrics_dir = tmp_path / "test_output_metrics"
metrics_dir.mkdir()
models_dir = tmp_path / "model_repo"
models_dir.mkdir()
input_args = [pipeline, str(input_dir), "--metrics_dir", str(metrics_dir), "--models_dir", str(models_dir)]
parsed = parse_args(input_args)
assert parsed.input_dir == input_dir
assert str(parsed.pipeline_path) == pipeline
assert parsed.metrics_dir == metrics_dir
assert parsed.models_dir == models_dir
assert parsed.force == ContinueOptions.NONE
@pytest.mark.parametrize("force_args, exp_option",
[(["--force", "cont"], ContinueOptions.CONT),
(["--force=cont"], ContinueOptions.CONT),
([], ContinueOptions.NONE),
(["--force", "none"], ContinueOptions.NONE),
(["--force", "stop"], ContinueOptions.STOP)])
def test_parse_force_options(force_args, exp_option, tmp_path, file_maker):
input_dir = tmp_path / test_parse_force_options.__name__
input_dir.mkdir()
pipeline = file_maker
input_args = force_args + [pipeline, str(input_dir)]
parsed = parse_args(input_args)
assert parsed.input_dir == input_dir
assert str(parsed.pipeline_path) == pipeline
assert parsed.metrics_dir is None
assert parsed.models_dir is None
assert parsed.force == exp_option
@pytest.mark.parametrize("force_args, err_msg",
[(["--force", "continue"], "argument --force: invalid choice: 'continue'"),
(["--force"], "argument --force: invalid choice:"),
(["--force", "aaaa"], "argument --force: invalid choice: 'aaaa'")])
def test_parse_force_options_error(force_args, err_msg, tmp_path, capsys, file_maker):
input_dir = tmp_path / test_parse_force_options_error.__name__
input_dir.mkdir()
pipeline = file_maker
input_args = force_args + [pipeline, str(input_dir)]
with pytest.raises(SystemExit):
parse_args(input_args)
out, err = capsys.readouterr()
assert err_msg in err
| clara-pipeline-operator-sizing-tool-main | tests/test_cli.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from unittest.mock import MagicMock, patch
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from main import main # nopep8 # noqa: E402
@patch("main.run_pipeline")
@patch("main.topo_sort_pipeline")
@patch("main.check_images_and_tags")
@patch("main.run_clarac")
@patch("main.assert_installed")
@patch("main.set_up_logging")
@patch("main.parse_args")
def test_main(mock_parse, mock_set_logging, mock_assert_install, mock_run_clarac, mock_check, mock_sort, mock_run):
mock_parse.return_value = MagicMock(**{"verbose": 2, "pipeline_path": "some_path"})
mock_run_clarac.return_value = MagicMock(**{"operators": "operators"})
main()
mock_set_logging.assert_called_with(2)
assert mock_assert_install.call_count == 2
mock_run_clarac.assert_called_with("some_path")
mock_check.assert_called_with("operators")
mock_sort.assert_called_with("operators")
mock_run.assert_called_once()
| clara-pipeline-operator-sizing-tool-main | tests/test_main.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
sys.path.append("{}/{}".format(os.path.dirname(os.path.realpath(__file__)), "../src"))
from clarac_utils import OperatorConfig, PipelineConfig, ServiceConfig, run_clarac # nopep8 # noqa: E402
@pytest.mark.parametrize("og_variables, exp_variables",
[(None, {"a": 1, "b": 2}),
({"c": 3},
{"c": 3, "a": 1, "b": 2})])
def test_op_config_update_variables(og_variables, exp_variables):
new_variables = {"a": 1, "b": 2}
op = OperatorConfig("op1", "image_tag", None, og_variables, None, None)
op.update_variables(new_variables)
assert op.variables == exp_variables
@patch("clarac_utils.subproc_run")
def test_run_clarac_subproc_error(mock_subproc_run, tmp_path):
mock_subproc_run.return_value = MagicMock(**{"returncode": 1, "stderr": "some error"})
with pytest.raises(SystemExit):
run_clarac(tmp_path)
@patch("clarac_utils.NamedTemporaryFile")
@patch("clarac_utils.subproc_run")
def test_run_clarac_yaml_error(mock_subproc_run, mock_temp_file, tmp_path):
mock_subproc_run.return_value = MagicMock(**{"returncode": 0, "stdout": "some output"})
mock_file = tmp_path / "bad.yaml"
mock_file.touch()
mock_file.write_text("api-version: '0.4.0'\n name: null-pipeline")
with open(mock_file) as mock_file_obj:
mock_temp_file.return_value.__enter__.return_value = mock_file_obj
with pytest.raises(SystemExit):
run_clarac(tmp_path)
@pytest.mark.skip("Skipping due to pipeline setup for clarac is incomplete")
def test_run_clarac():
pipeline_file = Path(__file__).parent / "pipelines" / ("nullpipeline.yaml")
config = run_clarac(pipeline_file)
assert isinstance(config, PipelineConfig)
assert config.name == "null-pipeline"
assert len(config.operators) == 3
op = config.operators[0]
assert op.name == "null-reader"
assert op.image_n_tag == "null-pipeline/operator-py:0.8.1"
assert op.command is None
assert op.variables == {"CLARA_TRACE": 2}
assert op.inputs == [{"name": None, "path": "/input"}]
assert op.outputs == [{"name": None, "path": "/output"}]
assert op.models is None
assert op.services is None
op = config.operators[1]
assert op.name == "null-inference"
assert op.image_n_tag == "null-pipeline/operator-py:0.8.1"
assert op.command is None
assert op.variables == {"CLARA_TRACE": 2}
assert op.inputs == [{"from": "null-reader", "name": None, "path": "/input"}]
assert op.outputs == [{"name": None, "path": "/output"}]
assert op.models is None
assert op.services is None
op = config.operators[2]
assert op.name == "null-writer"
assert op.image_n_tag == "null-pipeline/operator-py:0.8.1"
assert op.command is None
assert op.variables == {"CLARA_TRACE": 2}
assert op.inputs == [{"from": "null-inference", "name": None, "path": "/input"}]
assert op.outputs == [{"name": None, "path": "/output"}]
assert op.models is None
assert op.services is None
@pytest.mark.skip("Skipping due to pipeline setup for clarac is incomplete")
def test_run_clarac_with_triton_models():
pipeline_file = Path(__file__).parent / "pipelines" / ("operator_with_model.yaml")
config = run_clarac(pipeline_file)
assert isinstance(config, PipelineConfig)
assert config.name == "null-pipeline"
assert len(config.operators) == 1
op = config.operators[0]
assert op.name == "null-reader"
assert op.image_n_tag == "null-pipeline/operator-py:0.8.1"
assert op.inputs == [{"name": None, "path": "/input"}]
assert op.outputs == [{"name": None, "path": "/output"}]
assert op.command == ["python", "register.py", "--agent", "renderserver"]
assert op.models == ["segmentation_ct_spleen_v1", "segmentation_ct_liver_v1"]
assert op.services is None
@pytest.mark.skip("Skipping due to pipeline setup for clarac is incomplete")
def test_run_clarac_with_pipeline_services():
pipeline_file = Path(__file__).parent / "pipelines" / ("operator_with_services.yaml")
config = run_clarac(pipeline_file)
assert isinstance(config, PipelineConfig)
assert config.name == "null-pipeline"
assert len(config.operators) == 1
op = config.operators[0]
assert op.name == "null-reader"
assert op.image_n_tag == "null-pipeline/operator-py:0.8.1"
assert op.inputs == [{"name": None, "path": "/input"}]
assert op.outputs == [{"name": None, "path": "/output"}]
assert op.command is None
assert op.models is None
assert len(op.services) == 1
op_service = op.services[0]
assert isinstance(op_service, ServiceConfig)
assert op_service.name == "trtis"
assert op_service.image_n_tag == "nvcr.io/nvidia/tritonserver:latest"
assert op_service.command == ["some", "command"]
assert op_service.http_connections == {"NVIDIA_CLARA_TRTISURI": 8000}
| clara-pipeline-operator-sizing-tool-main | tests/test_clarac_utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import time
from contextlib import contextmanager
from enum import Enum, auto
from typing import List
import requests
from clarac_utils import OperatorConfig
from constants import (TRITON_HTTP_PORT, TRITON_IMAGE_TAG, TRITON_READY_TIMEOUT_SECONDS,
TRITON_WAIT_SLEEP_TIME_SECONDS, TRITON_WAIT_TIME_SECONDS)
from utils import subproc_run_wrapper
class RUN_MODE(Enum):
NO_INFERENCE_SERVER = auto()
MODEL_REPO = auto()
PIPELINE_SERVICES = auto()
def _extract_models_from_configs(op_configs: List[OperatorConfig]):
"""Helper method to obtain models from list of OperatorConfig.
Args:
op_configs: List of OperatorConfigs to extract information from
Returns:
List of string which represents the names of each model with no repeating models
"""
logging.debug("Abstracting model form pipeline definition")
result = list(set([model for op in op_configs if op.models for model in op.models]))
logging.debug(f"The models present are `{result}`")
return result
def check_models_directory(op_configs, models_dir) -> List[str]:
"""Checks if the model directory contains the models needed in the pipeline.
Args:
op_configs: List of OperatorConfigs to extract information from
models_dir: A directory that contains Triton models
Returns:
model_names: List of model names used by this pipeline
"""
logging.info("Checking model directory for dependent models ...")
required_models = _extract_models_from_configs(op_configs)
if required_models == []:
logging.debug("Pipeline did not specify any Triton models, skipping check for models_dir")
return []
else:
logging.debug("Examining model directory ...")
if models_dir is None:
sys.exit(f"Model directory must be provided since your pipeline uses: {required_models}")
# The directory can contain more models than what's needed
model_names = []
for model_name in required_models:
logging.debug(f"Checking for model `{model_name}` ...")
matching_config = list(models_dir.glob(f"{model_name}/config.pbtxt"))
if len(matching_config) == 0:
sys.exit(f"Model `{model_name}` is missing in the models directory")
elif len(matching_config) > 1:
logging.warning(
f"Found more than one matching config file for model `{model_name}`. Using the first occurrence.")
model_path = matching_config[0]
with open(model_path) as f:
name_in_file = f.readline().split(":")[1].strip()[1:-1]
if name_in_file != model_path.parent.name:
sys.exit(
f"Expected name in config {name_in_file} to be equal to directory name {model_path.parent.name}")
model_names.append(model_path.parent.name)
logging.info("All model directory checks are complete!")
return model_names
def decide_method_to_run_triton(op_configs) -> RUN_MODE:
"""Decide how to run triton based on the given op_configs.
Args:
op_configs: List of OperatorConfig objects
Return:
RUN_MODE.MODEL_REPO, RUN_MODE.PIPELINE_SERVICES or RUN_MODE.NO_INFERENCE_SERVER
Raises:
SystemExit if both models and services are present in the op_config
"""
model_repo = False
services = False
for op in op_configs:
if op.models:
model_repo = True
if op.services:
services = True
if model_repo and services:
sys.exit("CPOST does not support model_repository and pipeline services at the same time")
if model_repo:
return RUN_MODE.MODEL_REPO
elif services:
return RUN_MODE.PIPELINE_SERVICES
return RUN_MODE.NO_INFERENCE_SERVER
def check_triton_status(triton_models_names=[], host="localhost", port=TRITON_HTTP_PORT):
"""Check status of Triton server via http.
Kwargs:
triton_models_names: list of triton model names to verify, default: []
host: ip address of triton, default: localhost
port: the port to query http status, default: "8000"
Returns:
None
Raises:
SystemExit if requests.get returned with a non-200 status
"""
logging.debug("Waiting and checking Triton status ...")
time.sleep(TRITON_WAIT_TIME_SECONDS)
start_time = time.perf_counter()
while time.perf_counter() - start_time < TRITON_READY_TIMEOUT_SECONDS:
time.sleep(TRITON_WAIT_SLEEP_TIME_SECONDS)
try:
ready = requests.get(f"http://{host}:{port}/api/status")
if ready.status_code != 200:
sys.exit(f"Triton is not working, status code = {ready.status_code} with message {ready.text}")
break
except requests.ConnectionError:
continue
else:
raise TimeoutError("Timeout when waiting for triton to be ready.")
# Verify that each model is ready
for model_name in triton_models_names:
ready = requests.get(
f"http://{host}:{port}/api/status/{model_name}", timeout=TRITON_READY_TIMEOUT_SECONDS)
if ready.status_code != 200:
sys.exit(f"Error: {ready.status_code} {ready.reason}, {ready.headers}")
logging.debug("Triton is ready to be used")
def inspect_ip_address(container_name):
"""Inspect and obtain the IP address for the given container.
Args:
container_name: docker name or docker container ID
Returns:
network_ip: the IP address of the container
"""
cmd = ["docker", "inspect", "--format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'", container_name]
output = subproc_run_wrapper(cmd)
network_ip = output[1:-1] # Strip away the quotes around the returned IP address
logging.debug(f"{container_name} can be communicated on address {network_ip}")
return network_ip
def start_triton(models_dir, command, image_tag=TRITON_IMAGE_TAG, triton_models_names=[]):
"""Starts triton container and wait for it to be ready.
Args:
models_dir: Absolute path of models_directory
command: list of commands to run for the container
Kwargs:
image_tag: The image and tag for the container, e.g. image:tag, default to TRITON_IMAGE_TAG
triton_models_names: List of triton model names to load, default = []
Returns:
triton_container_id, ip_address: Tuple of string
"""
# build triton command
loading_models = [f"--load-model={name}" for name in triton_models_names]
cmd = ["docker", "run", "--gpus=1", "--rm", "-d", "-p8000:8000", "-p8001:8001", "-p8002:8002",
"-v", f"{models_dir}:/models", image_tag] + command + loading_models
logging.debug(f"Spinning up Triton with {cmd}")
triton_container_id = subproc_run_wrapper(cmd)
ip_address = inspect_ip_address(triton_container_id)
check_triton_status(triton_models_names=triton_models_names, host=ip_address)
return triton_container_id, ip_address
@contextmanager
def run_triton_model_repo(execution_order, models_dir):
"""Run Triton in a context manager if pipeline requires Triton.
Args:
execution_order: List of OperatorConfigs to extract information from
models_dir: Absolute path of models_directory
Yields:
ip_address
"""
try:
triton_models_names = check_models_directory(execution_order, models_dir)
command = ["tritonserver", "--model-repository=/models", "--model-control-mode=explicit"]
triton_container_id, ip_address = start_triton(models_dir, command, triton_models_names=triton_models_names)
yield ip_address
finally:
logging.debug("Stopping Triton ...")
subproc_run_wrapper(["docker", "kill", triton_container_id])
logging.debug("Finished cleaning up Triton")
| clara-pipeline-operator-sizing-tool-main | src/triton_utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from dataclasses import dataclass
from subprocess import run as subproc_run
from tempfile import NamedTemporaryFile
from typing import Dict, List
import yaml
@dataclass
class ServiceConfig:
name: str
image_n_tag: str
command: List[str]
http_connections: Dict
@dataclass
class OperatorConfig:
name: str
image_n_tag: str
command: List[str]
variables: Dict
inputs: List
outputs: List
models: List[str] = None
services: List[ServiceConfig] = None
def update_variables(self, var_dict):
"""Update the variable attribute with the given dictionary."""
if self.variables:
self.variables = {**var_dict, **self.variables}
else:
self.variables = {**var_dict}
@dataclass
class PipelineConfig:
name: str
operators: List[OperatorConfig]
def run_clarac(source_file: str) -> PipelineConfig:
"""Run Clara Complier in a subprocess using the given pipeline definition and parse the results.
Args:
source_file: path to the pipeline definition file
Returns:
A PipelineConfig object
"""
def _extract_services(services):
"""Extract services section in pipeline definition into list of ServiceConfig."""
result = []
for service in services:
service_image_n_tag = service["container"]["image"] + ":" + service["container"]["tag"]
command = service["container"].get("command")
if command:
command = [c.replace("$(NVIDIA_CLARA_SERVICE_DATA_PATH)", "") for c in command]
op_service = ServiceConfig(
name=service["name"],
image_n_tag=service_image_n_tag,
command=command,
http_connections={con["name"]: con["port"] for con in service["connections"].get("http")})
result.append(op_service)
return result
logging.debug("Running Clara Complier to validate the pipeline definition ...")
with NamedTemporaryFile() as result_file:
cmd = ["clarac", "-p", source_file, "-o", result_file.name, "--resolve-imports"]
proc = subproc_run(cmd)
if proc.returncode != 0:
logging.error(proc.stderr)
sys.exit(proc.returncode)
else:
logging.debug(f"stdout from Clara Complier: {proc.stdout}")
logging.debug(f"Clara Complier returned with error code {proc.returncode}, loading result as python object")
try:
config = yaml.load(result_file, yaml.FullLoader)
except yaml.YAMLError as exc:
logging.error(f"Error in configuration file from Clara Complier: {exc}")
sys.exit(2)
logging.debug(f"The content loaded from Clara Complier is: {config}")
operators = []
# Get the objects of interest, construct a list, and return it
for op in config["operators"]:
# Get services and names of triton models used by this operator
op_models = [model_dict["name"] for model_dict in op.get("models")] if op.get("models") else None
op_services = _extract_services(op.get("services")) if op.get("services") else None
image_n_tag = op["container"]["image"] + ":" + op["container"]["tag"]
cmd = op["container"].get("command")
operator = OperatorConfig(name=op["name"], image_n_tag=image_n_tag, command=cmd, variables=op.get(
"variables"), inputs=op["input"], outputs=op.get("output"), models=op_models, services=op_services)
operators.append(operator)
return PipelineConfig(name=config["name"], operators=operators)
| clara-pipeline-operator-sizing-tool-main | src/clarac_utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from pathlib import Path
B_MB_FACTOR = 1e6
SYSFS_PATH = Path("/sys/fs/cgroup")
ON_POSIX = 'posix' in sys.builtin_module_names
NS_PER_S = 1e9
CLOCK_TICKS_PER_S = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
ONLINE_CPUS = os.sysconf(os.sysconf_names['SC_NPROCESSORS_ONLN'])
ID_WAITING_TIME_SECONDS = 15
METRIC_SAMPLING_PERIOD_SECONDS = 0.2 # i.e 200ms
TRITON_IMAGE_TAG = "nvcr.io/nvidia/tritonserver:20.07-v1-py3"
TRITON_READY_TIMEOUT_SECONDS = 30
TRITON_WAIT_TIME_SECONDS = 15
TRITON_WAIT_SLEEP_TIME_SECONDS = 1
TRITON_HTTP_ENV_VAR = "NVIDIA_TRITON_HTTPURI"
TRITON_HTTP_PORT = 8000
TRITON_GRPC_ENV_VAR = "NVIDIA_TRITON_GRPCURI"
TRITON_GRPC_PORT = 8001
LEGACY_TRTIS_HTTP_ENV_VAR = "NVIDIA_CLARA_TRTISURI"
LEGACY_TRITON_HTTP_ENV_VAR = "CLARA_TRITON_URI"
| clara-pipeline-operator-sizing-tool-main | src/constants.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
class PipelineDAG:
"""Class for the Pipeline DAG used for sorting."""
def __init__(self):
self.input_deg_graph = defaultdict(lambda: 0)
self.output_graph = defaultdict(list) # dictionary containing adjacency List
def add_input_edge(self, node: str, input_node: str):
"""Add the node by giving its input node.
Args:
node: Node to be added
input_node: One of its dependency nodes
Returns:
None
"""
self.output_graph[input_node].append(node)
# Update the input_degree_graph as we are adding each node
self.input_deg_graph[input_node] += 0
self.input_deg_graph[node] += 1
def topological_sort(self):
"""Topologically sort the given graph based on Kahn's algorithm.
Args:
None
Returns:
A list that is the topological order of the current graph
Raises:
Runtime Error if the graph contains cycles
"""
visited_count = 0
topo_order = []
# Create a list for all node with in-degree 0
zero_indegree = [node for node, length in self.input_deg_graph.items() if length == 0]
# Pick zero-in-degree node one by one and check if any new zero-in-degree node shows up
while zero_indegree:
# Get the first zero in-degree node and add it to topo_order
cur_node = zero_indegree.pop(0)
topo_order.append(cur_node)
# Iterate through output nodes of cur_node and decrease their in-degree by 1
for i in self.output_graph[cur_node]:
self.input_deg_graph[i] -= 1
# If in-degree becomes zero, add it to zero_indegree
if self.input_deg_graph[i] == 0:
zero_indegree.append(i)
visited_count += 1
# Check for a cycle in the graph
if visited_count != len(self.output_graph.keys()):
raise RuntimeError("There exists a cycle in the given graph")
return topo_order
def topo_sort_pipeline(operators):
"""Topologically sort the given operators.
Args:
operators: List of OperatorConfig objects
Returns:
A topologically ordered OperatorConfig objects
"""
logging.debug(f"Topolocally order the given input: {operators}")
if len(operators) == 1:
result = operators.copy()
else:
# Construct a dictionary from operators so that we can convert names back to OperatorConfigs later
op_dict = {op.name: op for op in operators}
dag = PipelineDAG()
for op in operators:
for input_path in op.inputs:
if input_path.get("from"):
dag.add_input_edge(op.name, input_path.get("from"))
sequence = dag.topological_sort()
result = [op_dict[op_name] for op_name in sequence]
logging.debug(f"Topologically order result is: {result}")
return result
| clara-pipeline-operator-sizing-tool-main | src/topology_sort.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| clara-pipeline-operator-sizing-tool-main | src/__init__.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from dataclasses import fields as data_fields
from datetime import datetime
import psutil
from constants import B_MB_FACTOR, NS_PER_S, ONLINE_CPUS, SYSFS_PATH
@dataclass
class Metrics:
timestamp: float
cpu_percent: float
memory: float # in MB
METRICS_HEADER = [obj.name for obj in data_fields(Metrics)]
@dataclass
class RawMetrics:
timestamp: float
cpu: float
per_cpu: bytes
sys_cpu: tuple
memory: float # in bytes
class Container:
def __init__(self) -> None:
"""Initializes the Container object with id, metrics_path, raw_metrics, and metrics.
Args:
None
Returns:
None
"""
self.id = ""
self.metric_paths = () # Tuple[Path, Path, Path]
self.raw_metrics = [] # List[RawMetrics]
self.metrics = []
def construct_metrics_path(self):
"""Constructs metrics reading paths in a tuple based on self.id attribute.
Args:
None
Returns:
None
Raises:
RuntimeError if id is not set when this is called
"""
if self.id:
_cpu_path = SYSFS_PATH / "cpuacct" / "docker" / self.id / "cpuacct.usage"
_per_cpu_path = SYSFS_PATH / "cpuacct" / "docker" / self.id / "cpuacct.usage_percpu"
_mem_path = SYSFS_PATH / "memory" / "docker" / self.id / "memory.usage_in_bytes"
self.metric_paths = (_cpu_path, _per_cpu_path, _mem_path)
else:
raise RuntimeError("Container ID is not set when creating paths")
def metrics_path_exists(self) -> bool:
"""Checks if all the paths in the container.metrics_path attribute exist.
Args:
None
Returns:
A boolean value for whether all metrics_paths exist on the system.
"""
return self.metric_paths[0].exists() and self.metric_paths[1].exists() and self.metric_paths[2].exists()
def _read_raw_metrics(self) -> RawMetrics:
"""Reads raw metrics data based on the self.metric_path and timestamp it.
Args:
None
Returns:
A RawMetrics object
"""
timestamp = datetime.utcnow().timestamp()
# Rationale for raw_sys_cpu arithmetic: getSystemCPUUsage() in docker/daemon/stats_collector_unix.go
# in https://github.com/rancher/docker
raw_sys_cpu = sum(psutil.cpu_times()[:7]) # in seconds
# Note: Converting to float takes an extra 1000ns
raw_cpu = float(self.metric_paths[0].read_bytes())
# If we know this len is the same as the system cpu num, then we don't need per_cpu anymore
raw_per_cpu = self.metric_paths[1].read_bytes()
raw_mem = float(self.metric_paths[2].read_bytes())
return RawMetrics(timestamp, raw_cpu, raw_per_cpu, raw_sys_cpu, raw_mem)
def sample_metrics(self) -> None:
"""Samples raw metrics data and append to self.raw_metrics list.
FileNotFoundError and OSError errno 19 implies that the file no longer
exist and thus these are bypassed.
Args:
None
Returns:
None or metric, which is a Metrics object
Raises:
RuntimeError if self.metric_paths is not set when this is called
"""
if self.metric_paths:
try:
raw_metrics = self._read_raw_metrics()
self.raw_metrics.append(raw_metrics)
# process metrics starting at second item
if len(self.raw_metrics) >= 2:
metric = self._process_raw_data(self.raw_metrics[-2], self.raw_metrics[-1])
self.metrics.append(metric)
return metric
else:
return
except FileNotFoundError:
return
except OSError as err:
if err.errno == 19: # no such device error
return
else:
raise(err)
else:
raise RuntimeError("Metrics paths must constructed before sampling.")
@staticmethod
def _process_raw_data(prev, cur):
"""Process the given data and convert units.
Computation according to https://docs.docker.com/engine/api/v1.41/#operation/ContainerStats
Args:
prev: the prior RawMetrics object
cur: the current RawMetrics object
Returns:
result: A list of MetricsData object
"""
ts_avg = (prev.timestamp + cur.timestamp) / 2.0
cpu_percent = 0.0
# Convert from nanoseconds to seconds
cpu_delta = (cur.cpu - prev.cpu) / NS_PER_S
# Below does not need div by CLOCK_TICKS_PER_S because it has been done in psutils
sys_cpu_delta = cur.sys_cpu - prev.sys_cpu
if cpu_delta > 0.0 and sys_cpu_delta > 0.0:
cpu_percent = (cpu_delta / sys_cpu_delta) * ONLINE_CPUS * 100.0
# Since we're averaging the cpu, we also need to average the memory to match the averaged timestamp
memory_avg = (prev.memory + cur.memory) / 2.0 / B_MB_FACTOR
return Metrics(ts_avg, cpu_percent=cpu_percent, memory=memory_avg)
| clara-pipeline-operator-sizing-tool-main | src/container.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from enum import IntEnum
from pathlib import Path
class ContinueOptions(IntEnum):
"""Enum to organize options to prompt user, continue execution, or stop execution when operator fails."""
NONE = 0 # prompt user y/n
CONT = 1 # continue execution
STOP = 2 # stop execution
# methods for compatible with argparse and error message
def __str__(self):
return self.name.lower()
def __repr__(self):
return str(self)
@staticmethod
def argparse(s):
try:
return ContinueOptions[s.upper()]
except KeyError: # To be used with `choices` in add_argument()
return s
class MyParser(argparse.ArgumentParser):
"""Custom parser class to override the error method."""
def error(self, message):
"""Overriding the default error method to print help message before exiting."""
sys.stderr.write('error: %s\n' % message)
self.print_help(sys.stderr)
self.exit(2)
def valid_file(path):
"""Helper method for parse_args to convert to Path and verify if the file path exists.
Args:
path: path to file from parse_args()
Returns:
The absolute path of the given file path if it exists
Raises:
argparse.ArgumentTypeError if the file given does not exist
"""
path = Path(path)
if path.exists() and path.is_file():
return path.absolute()
raise argparse.ArgumentTypeError(f"No such file or the given path is not a file: '{path}'")
def valid_dir(path):
"""Helper method for parse_args to convert to Path and verify if the directory exists.
Args:
path: path to directory from parse_args()
Returns:
The absolute path of the given directory if it exists
Raises:
argparse.ArgumentTypeError if the directory given does not exist or if not a directory
"""
path = Path(path)
if path.exists() and path.is_dir():
return path.absolute()
raise argparse.ArgumentTypeError(f"No such directory or the given path is not a directory: '{path}'")
def parse_args(args):
"""Create an argument parser and parse the command-line arguments.
Args:
args: A list of arguments to parse
Returns:
A parser object containing parsed arguments
"""
parser = MyParser(prog="cpost", description="Clara Pipeline Sizing Tool CLI")
parser.add_argument("pipeline_path", metavar="<pipeline_path>",
type=valid_file, help="pipeline definition file path")
parser.add_argument("input_dir", metavar="<input_dir>", type=valid_dir, help="input payload directory")
parser.add_argument("--metrics_dir", type=valid_dir,
help="metrics output directory, if not specified, write to stdout")
parser.add_argument("--models_dir", type=valid_dir,
help="directory for Triton models, required if pipeline uses Triton")
parser.add_argument(
"-v", "--verbose", action='store_true',
help="verbose output (DEBUG level). If not specified, default output is INFO level.")
parser.add_argument(
"--force", default=ContinueOptions.NONE, const=ContinueOptions.NONE, nargs='?', type=ContinueOptions.argparse,
choices=list(ContinueOptions),
help='force continue or stop when operator failure occurs. \
(default: %(default)s, which will prompt the user for each failure).')
return parser.parse_args(args)
| clara-pipeline-operator-sizing-tool-main | src/cli.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import dataclasses
import logging
import math
import shutil
import sys
from pathlib import Path
from subprocess import PIPE, Popen
from subprocess import run as subproc_run
from typing import List
from clarac_utils import OperatorConfig
from constants import ON_POSIX, TRITON_IMAGE_TAG
def round_up_to_multiple(x, base):
"""Round up the given number to the nearest multiple of the given base number."""
return math.ceil(float(x) / float(base)) * base
def convert_percent_to_cores(x):
"Convert the given percentage to CPU cores."
return int(math.ceil(x / 100.0))
def assert_installed(prog: str):
"""Check if the given program is installed, terminate if not.
Args:
prog: Name of the commandline program
Returns:
None. If program is not installed, sys.exit(1)
"""
logging.debug(f"Checking for dependency {prog} ...")
if not shutil.which(prog):
sys.stderr.write(f"error: {prog} not installed, please install {prog}\n")
sys.exit(1)
logging.debug(f"Dependency {prog} fulfilled")
def set_up_logging(verbose):
"""Setup logging for cpost to standard out.
Args:
verbose: Boolean value indicating whether log level will be debug or not
Returns:
None.
"""
if verbose: # pragma: no cover
level = logging.DEBUG
else: # pragma: no cover
level = logging.INFO
# logging config are default to StreamHandlers
logging.basicConfig(format='%(message)s', level=level) # pragma: no cover
def check_images_and_tags(operators: List[OperatorConfig]):
"""For the image and tag of each operator, examine local images and pull if not found locally.
Args:
operators: List of OperatorConfig objects
Returns:
None
Raises:
sys.exit if the docker pull command errorred out
"""
uses_triton_model_repo = False
logging.info("Checking for container images and tags needed for the pipeline...")
def _check_image_exists_locally(image_and_tag):
logging.debug(f"Checking if `{image_and_tag}` are in local images...")
local_check_proc = subproc_run(
["docker", "images", image_and_tag, "--format", "{{.Repository}}:{{.Tag}}"],
capture_output=True)
result = local_check_proc.stdout.decode('UTF-8')
if image_and_tag in result:
logging.debug(f"`{image_and_tag}` found.")
return True
else:
return False
def _pull_image(image_and_tag):
logging.debug(f"`{image_and_tag}` not found, try pulling from registry ...")
pull_proc = subproc_run(["docker", "pull", image_and_tag], capture_output=True)
if pull_proc.returncode == 0:
logging.debug(f"Docker pull command for `{image_and_tag}` returned with code {pull_proc.returncode}")
logging.debug(f"stdout is: \n{pull_proc.stdout.decode('UTF-8').strip()}")
else:
logging.error(f"Docker pull command for `{image_and_tag}` returned with code {pull_proc.returncode}")
logging.error(f"stdout is: {pull_proc.stdout.decode('UTF-8')}")
logging.error(f"stderr is: {pull_proc.stderr.decode('UTF-8')}")
sys.exit("Please verify docker access and the pipeline definition")
for operator in operators:
if not _check_image_exists_locally(operator.image_n_tag):
_pull_image(operator.image_n_tag)
if operator.models:
uses_triton_model_repo = True
if operator.services:
for op_service in operator.services:
if not _check_image_exists_locally(op_service.image_n_tag):
_pull_image(op_service.image_n_tag)
if uses_triton_model_repo:
if not _check_image_exists_locally(TRITON_IMAGE_TAG):
_pull_image(TRITON_IMAGE_TAG)
logging.info("All container images are ready to be used.")
def subproc_run_wrapper(cmd, **kwargs):
sub_proc = subproc_run(cmd, capture_output=True, **kwargs)
if sub_proc.returncode == 0:
std_out = sub_proc.stdout.decode('UTF-8').strip()
logging.debug(f"Subprocess returned with stdout {std_out}")
return std_out
else:
logging.error(
f"Running {cmd} returned with {sub_proc.returncode} with error {sub_proc.stderr}")
return sys.exit(f"Failed to run subprocess with command {cmd}")
def prompt_yes_or_no(condition: str):
"""Prompt the user with a question and waits for the y/n input.
Args:
condition: Condition that needs user's input
Returns:
Boolean value corresponding to yes or no
"""
while "the answer is invalid":
reply = input(condition + ' (y/n): ').lower().strip()
if reply:
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
def write_to_csv(que, field_names, output_file):
"""Write data in que to the output file in csv format.
Args:
que: a multiprocess.Queue contains the data to be written
field_names: Header for the csv file
output_file: String or Path of the output file location
Returns:
None
"""
output_file = Path(output_file)
if not output_file.parent.exists():
output_file.parent.mkdir(parents=True)
with open(output_file, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=field_names)
csv_writer.writeheader()
while True:
item = que.get()
if item is None:
continue
if item == 0:
que.close()
break
csv_writer.writerow(dataclasses.asdict(item))
f.flush()
logging.info(f"Results are stored in {output_file}")
| clara-pipeline-operator-sizing-tool-main | src/utils.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
sys.path.append('{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), '../src'))
from clarac_utils import run_clarac # nopep8 # noqa: E402
from pipeline_utils import run_pipeline # nopep8 # noqa: E402
from topology_sort import topo_sort_pipeline # nopep8 # noqa: E402
from utils import assert_installed, check_images_and_tags, set_up_logging # nopep8 # noqa: E402
from cli import parse_args # nopep8 # noqa: E402
def main():
parsed_args = parse_args(sys.argv[1:])
set_up_logging(parsed_args.verbose)
assert_installed("clarac")
assert_installed("docker")
logging.info("All software dependencies are fullfilled.")
pipeline_config = run_clarac(parsed_args.pipeline_path)
check_images_and_tags(pipeline_config.operators)
execution_order = topo_sort_pipeline(pipeline_config.operators)
run_pipeline(execution_order, parsed_args.input_dir, parsed_args.metrics_dir,
parsed_args.models_dir, parsed_args.force)
if __name__ == "__main__": # pragma: no cover
main()
| clara-pipeline-operator-sizing-tool-main | src/main.py |
# Copyright 2021 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import time
from contextlib import contextmanager
from dataclasses import astuple
from multiprocessing import Manager, Process, Queue
from pathlib import Path
from queue import Empty
from subprocess import PIPE, Popen
from subprocess import run as subproc_run
from tempfile import TemporaryDirectory
from clarac_utils import OperatorConfig
from constants import (ID_WAITING_TIME_SECONDS, LEGACY_TRITON_HTTP_ENV_VAR, LEGACY_TRTIS_HTTP_ENV_VAR,
METRIC_SAMPLING_PERIOD_SECONDS, ON_POSIX, TRITON_GRPC_ENV_VAR, TRITON_GRPC_PORT,
TRITON_HTTP_ENV_VAR, TRITON_HTTP_PORT)
from container import METRICS_HEADER, Container
from tabulate import tabulate
from triton_utils import (RUN_MODE, check_triton_status, decide_method_to_run_triton, inspect_ip_address,
run_triton_model_repo, start_triton)
from utils import convert_percent_to_cores, prompt_yes_or_no, round_up_to_multiple, subproc_run_wrapper, write_to_csv
from cli import ContinueOptions
def _enqueue_output(out, queue):
"""Reads the file content, add to queue, and close the file handler when done.
Args:
out: opened file handler or stdout
queue: multiprocessing.Queue object
Returns:
None
"""
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def start_operator(container_id, id_returned_event, cmd):
"""Runs the given docker command and assign docker ID to the given shared value.
Args:
container_id: A multiprocessing.Value object to allow sharing of values.
id_returned_event: A multiprocess.Event object, set when container_id is assigned.
cmd: The full docker command to run an image.
Returns:
None
"""
cmd_proc = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=ON_POSIX)
logging.info("Running operator ...")
q = Queue()
checker = Process(target=_enqueue_output, args=(cmd_proc.stdout, q), daemon=True)
checker.start()
while cmd_proc.poll() is None:
try:
raw_id = q.get_nowait()
except Empty:
continue
else:
# Validate the result, expect length to be 64 + 1 from '\n'
if len(raw_id) == 65:
container_id.value = raw_id.decode('utf-8').strip()
logging.info(f"The container id is: {container_id.value}")
id_returned_event.set()
break
else:
sys.exit(f"The output of docker should be the 64 bit container ID, got {raw_id} instead.")
else:
if cmd_proc.returncode != 0:
checker.terminate()
checker.join()
# This means that cmd_proc has errorred and terminated. Log the error and return
logging.warning(f"Operator failed to start with returncode {cmd_proc.returncode}")
sys.exit(f"The operator failed with stderr:\n{cmd_proc.stderr.read().decode('UTF-8')}")
checker.terminate()
checker.join()
if cmd_proc.returncode is None:
logging.debug("Operator is running...")
# We need to know if docker exited correctly
docker_wait_proc = subproc_run(["docker", "wait", container_id.value], capture_output=True)
returned_str = docker_wait_proc.stdout.decode('UTF-8').strip()
if returned_str == "0":
logging.debug(f"Operator finished successfully with exitcode {returned_str}")
else:
logging.error(f"Operator failed with exitcode is: {returned_str}")
try:
return_code = int(returned_str)
sys.exit(return_code)
except ValueError:
sys.exit(1)
else:
logging.debug(f"Docker run command returned with {cmd_proc.returncode}")
def sample_operator(container, que):
"""Samples and writes metrics for the given operator as long as its metrics paths exist.
Sampling frequency is determined by METRIC_SAMPLING_PERIOD_SECONDS.
Args:
container: Container object.
que: None or a multiprocessing.Queue object to store data that needs to be written to csv.
Returns:
None
"""
# Waits for the files to be created by docker
while not container.metrics_path_exists():
continue
# Samples until the files disappear
logging.debug("Starts sampling container ...")
before_sample = time.perf_counter()
while container.metrics_path_exists():
metric = container.sample_metrics()
if que:
que.put(metric)
after_sample = time.perf_counter()
sleep_time = METRIC_SAMPLING_PERIOD_SECONDS - (after_sample - before_sample)
sleep_time = sleep_time if sleep_time > 0 else 0
if sleep_time == 0:
logging.info(
f"Sampling taking longer than sampling period with time of {(after_sample - before_sample)} seconds")
# NOTE: Due to the inaccurate nature of time.sleep(), our sampling will not be extremely precise
time.sleep(sleep_time)
before_sample = time.perf_counter()
# Signal the end of que
if que:
que.put(0)
logging.debug("Finished sampling container.")
def build_operator_cmd(input_dir: Path, data_folder_name: str, op_config: OperatorConfig, triton_ip: str = None):
"""Constructs the docker command used to run operator.
Args:
input_dir: A Path object for the input payload data in local system
data_folder_name: Name of the data folder to store temporary data
op_config: A OperatorConfig object containing information about the operator
triton_ip: None, or Triton's IP address
Returns:
cmd: A list of string representing the docker command that can be used to run the operator
"""
logging.debug(f"Constructing commands for operator {op_config.name} ...")
op_output_dir = Path(data_folder_name) / op_config.name
op_output_dir.mkdir()
cmd = ["docker", "run", "-d", "--rm", "--env", "NVIDIA_CLARA_NOSYNCLOCK=1"]
# If models is present, then we supply Triton ports to this
if op_config.models:
cmd.extend(["--env", f"{TRITON_HTTP_ENV_VAR}={triton_ip}:{TRITON_HTTP_PORT}"])
cmd.extend(["--env", f"{LEGACY_TRITON_HTTP_ENV_VAR}={triton_ip}:{TRITON_HTTP_PORT}"])
cmd.extend(["--env", f"{LEGACY_TRTIS_HTTP_ENV_VAR}={triton_ip}:{TRITON_HTTP_PORT}"])
cmd.extend(["--env", f"{TRITON_GRPC_ENV_VAR}={triton_ip}:{TRITON_GRPC_PORT}"])
# Add operator specific environment variables
if op_config.variables:
for key, value in op_config.variables.items():
cmd.extend(["--env", f"{key}={value}"])
# Mount input and output volumes
def build_volume_mount(local, remote):
return ["-v", ":".join([local, remote])]
# Mount input volumes
if op_config.inputs:
for input_obj in op_config.inputs:
# If `from` is not present, we use the input payload directory
if input_obj.get("from") is None:
cmd.extend(build_volume_mount(str(input_dir), input_obj["path"]))
# If `from` is specified, we use the specified operator's output directory as the input for this operator
else:
op_input_dir = op_output_dir.parent / input_obj["from"]
# If `name` is specified, then find the subdirectory and use this as the input
if input_obj.get("name"):
cmd.extend(build_volume_mount(str((op_input_dir / input_obj["name"])), input_obj["path"]))
else:
cmd.extend(build_volume_mount(str(op_input_dir), input_obj["path"]))
# Mount output volumes
if op_config.outputs:
for output_obj in op_config.outputs:
# If `name` is specified, create a subdirectory with this name
if output_obj.get("name"):
sub_dir = Path(op_output_dir / output_obj["name"])
sub_dir.mkdir(parents=True)
cmd.extend(build_volume_mount(str(sub_dir), output_obj["path"]))
else:
cmd.extend(build_volume_mount(str(op_output_dir), output_obj["path"]))
# Add the image and tag, and command last
cmd.append(op_config.image_n_tag)
if op_config.command:
cmd.extend(op_config.command)
logging.debug(f"Docker command for operator {op_config.name} is: {cmd}")
return cmd
def print_operator_metrics(metrics, metrics_header, op_name):
"""Logs the metrics to console in a table format.
Args:
metrics: list of Metrics object
metrics_header: Header of the metrics data
op_name: Name of the operator
Returns:
None
"""
logging.info("{:_^60}".format(f"Operator {op_name} Metrics Data")) # pragma: no cover
data = [astuple(metric) for metric in metrics] # pragma: no cover
logging.info(tabulate(data, metrics_header, tablefmt="pretty")) # pragma: no cover
def print_operator_summary(metrics, op_name):
"""Calculate and logs the metrics statistics in a readable format.
Args:
metrics: list of Metrics object
op_name: Name of the operator
Returns:
None
"""
logging.info("{:_^60}".format(f"Operator {op_name} Summary"))
# Calculate metrics for CPU and memory
cpu_data = [metric.cpu_percent for metric in metrics]
cpu_avg = round(sum(cpu_data)/len(metrics), 3)
cpu_max = round(max(cpu_data), 3)
memory_data = [metric.memory for metric in metrics]
memory_avg = round(sum(memory_data)/len(metrics), 3)
memory_max = round(max(memory_data), 3)
recommended_cpu = convert_percent_to_cores(cpu_max)
# Add 100MB of buffer memory and round to multiple of base 256
recommended_memory = round_up_to_multiple(memory_max + 100.0, 256)
# Log it onto console
data = [["CPU", f"{cpu_avg} %", f"{cpu_max} %", f"cpu: {recommended_cpu}"], [
"Memory", f"{memory_avg} MB", f"{memory_max} MB", f"memory: {recommended_memory}"]]
logging.info(
tabulate(
data, ["Metric", "Average", "Maximum", "Resource"],
tablefmt="pretty"))
return data
def print_pipeline_summary(pipeline_metrics_dict):
"""Display the pipeline summary table.
Args:
pipeline_metrics_dict: Dictionary with key being operator name and values are metrics
Returns:
None
"""
pipeline_data = []
for op_name, op_summary in pipeline_metrics_dict.items():
p_sumamry = [op_name] + ["\n".join([str(row1), str(row2)]) for row1, row2 in zip(op_summary[0], op_summary[1])]
pipeline_data.append(p_sumamry)
logging.info(
tabulate(
pipeline_data, ["Operator", "Metric", "Average", "Maximum", "Resource"],
tablefmt="grid", numalign="right"))
def run_operator(
op_config, docker_cmd, output_writers, metrics_output, continue_option,
pipeline_summary_dict):
"""Run the operator using the directories given.
Args:
op_config: a OperatorConfig object
docker_cmd: List of docker commands to run the operator
output_writers: List of writers or None
metrics_output: A Path object for the metrics directory or None
continue_option: A ContinueOptions Enum object
pipeline_summary_dict: Dictionary with key being operator name and values are metrics
Returns:
True when the operator failed and user wants to stop, otherwise None
"""
container = Container()
manager = Manager()
container_id = manager.Value('c_wchar_p', '')
id_returned_event = manager.Event()
if output_writers is not None:
write_que = Queue()
writer_process = Process(
target=write_to_csv,
args=(write_que, METRICS_HEADER, (metrics_output / f"{op_config.name}_final_result.csv")))
writer_process.start()
output_writers.append(writer_process)
else:
write_que = None
p_start = Process(target=start_operator, args=(container_id, id_returned_event, docker_cmd))
before_id = time.perf_counter() # timing
p_start.start()
if id_returned_event.wait(timeout=ID_WAITING_TIME_SECONDS):
# Event.wait() returns true if it has been set
after_id = time.perf_counter() # timing
container.id = container_id.value
container.construct_metrics_path()
sample_operator(container, write_que)
end = time.perf_counter() # timing
logging.debug(f"Time it takes to get container ID: {after_id-before_id} s")
logging.debug(f"Waiting and Sampling Time: {end-after_id} s")
p_start.join()
# print metrics to console if not written to csv
if output_writers is None:
print_operator_metrics(container.metrics, METRICS_HEADER, op_config.name)
operator_summary = print_operator_summary(container.metrics, op_config.name)
pipeline_summary_dict[op_config.name] = operator_summary
else:
logging.warning(f"Obtaining docker ID timed out. Operator {op_config.name} failed")
p_start.terminate()
p_start.join()
if output_writers is not None:
writer_process.terminate()
if p_start.exitcode != 0: # i.e. container_id timed out
logging.warning(f"Operator {op_config.name} failed with exitcode {p_start.exitcode}")
if pipeline_summary_dict.get(op_config.name):
new_key = f"{op_config.name}\n(Non-zero exitcode)"
pipeline_summary_dict[new_key] = pipeline_summary_dict.pop(op_config.name)
if continue_option == ContinueOptions.CONT:
return
if continue_option == ContinueOptions.STOP:
return True
if not prompt_yes_or_no(
"Would you like to continue execution at the risk of the rest of pipeline failing (y)? If (n), cpost will stop and cleanup."):
# When user says no, we exit the for-loop and return
return True
def run_pipeline(execution_order, input_data_dir, metrics_output, models_dir, continue_option):
"""Run the pipeline operators in the given execution_order using the directories given.
Args:
execution_order: List of OperatorConfig objects in the order of execution
input_data_dir: Path to the input payload directory
metrics_output: A Path object for the metrics directory or stdout
models_dir: A directory that contains Triton models
continue_option: A ContinueOptions Enum object
Returns:
None
"""
triton_mode = decide_method_to_run_triton(execution_order)
if triton_mode == RUN_MODE.NO_INFERENCE_SERVER:
return run_pipeline_alone(execution_order, input_data_dir, metrics_output, continue_option)
if triton_mode == RUN_MODE.MODEL_REPO:
with run_triton_model_repo(execution_order, models_dir) as triton_ip:
run_pipeline_alone(execution_order, input_data_dir, metrics_output, continue_option, triton_ip)
else: # PIPELINE_SERVICES
run_pipeline_with_services(execution_order, input_data_dir, metrics_output,
models_dir, continue_option)
@contextmanager
def get_output_writers(metrics_output):
"""Context manager for keeping a list of output writers and cleaning up.
The list is used to keep output_writer processes which are threads/multiprocessing.Process.
Args:
metrics_output: a pathlib.Path object or None
Yields:
None if metrics_output is None. Empty list if metrics_output is Path
"""
try:
write_csv_flag = True if isinstance(metrics_output, Path) else False
if write_csv_flag:
output_writers = []
yield output_writers
else:
yield None
finally:
if write_csv_flag:
for writer in output_writers:
writer.join()
def run_pipeline_alone(execution_order, input_data_dir, metrics_output, continue_option, triton_ip=None):
"""Run the pipeline operators in the given execution_order using the directories given.
Args:
execution_order: List of OperatorConfig objects in the order of execution
input_data_dir: Path to the input payload directory
metrics_output: A Path object for the metrics directory or stdout
continue_option: A ContinueOptions Enum object
triton_ip: None, or Triton's IP address
Returns:
None
"""
with TemporaryDirectory() as data_folder_name:
with get_output_writers(metrics_output) as output_writers:
pipeline_summary_dict = {}
for op_config in execution_order:
logging.info("\n{:_^60}".format(f"Executing Operator {op_config.name}"))
docker_cmd = build_operator_cmd(input_data_dir, data_folder_name, op_config, triton_ip)
exit = run_operator(op_config, docker_cmd, output_writers,
metrics_output, continue_option, pipeline_summary_dict)
if exit:
break
print_pipeline_summary(pipeline_summary_dict)
def clean_up_containers(running_dict):
"""Kill the containers in the given dictionary and remove the item from the dictionary.
Args:
running_dict: Dictionary where key is image name and value is (container ID, ip_address)
Returns:
None
"""
for old_key, container_info in running_dict.items():
logging.debug(f"Tear down unused services {old_key}")
if container_info:
subproc_run_wrapper(["docker", "kill", container_info[0]])
running_dict.clear()
def start_pipeline_services(op_config, running_dict, models_dir):
"""Start the pipeline services for the given op_config.
Args:
op_config: A OperatorConfig object
running_dict: Dictionary for keep track of currently running services
models_dir: A directory that contains Triton models
Return:
None
"""
for service in op_config.services:
logging.debug(f"Checking service with name {service.name}")
key = service.image_n_tag + " " + " ".join(service.command)
if running_dict.get(key):
# Add the connection variables
ip_address = running_dict[key][1]
http_connections_dict = {k: f"{ip_address}:{v}" for k, v in service.http_connections.items()}
op_config.update_variables(http_connections_dict)
logging.debug("Found running services that suit the needs")
else:
logging.debug("Didn't find matching service, starting new service")
if len(running_dict) != 0: # tear down current services before spin up another one
clean_up_containers(running_dict)
if "trtis" in service.name or "triton" in service.name:
triton_container_id, ip_address = start_triton(models_dir, service.command, service.image_n_tag)
running_dict[key] = (triton_container_id, ip_address)
http_connections_dict = {k: f"{ip_address}:{v}" for k, v in service.http_connections.items()}
op_config.update_variables(http_connections_dict)
else:
logging.warning("CPOST currently does not support services other than triton or trtis.")
logging.warning(f"Skipping `{service.name}`, operator may fail because of this.")
def run_pipeline_with_services(
execution_order, input_data_dir, metrics_output, models_dir, continue_option):
"""Run the pipeline operators in the given execution_order using the directories given.
Args:
execution_order: List of OperatorConfig objects in the order of execution
input_data_dir: Path to the input payload directory
metrics_output: A Path object for the metrics directory or stdout
models_dir: A directory that contains Triton models
continue_option: A ContinueOptions Enum object
Returns:
None
"""
with TemporaryDirectory() as data_folder_name:
with get_output_writers(metrics_output) as output_writers:
try:
running_services = {}
pipeline_summary_dict = {}
for op_config in execution_order:
if op_config.services:
start_pipeline_services(op_config, running_services, models_dir)
logging.info("\n{:_^60}".format(f"Executing Operator {op_config.name}"))
docker_cmd = build_operator_cmd(input_data_dir, data_folder_name, op_config)
exit = run_operator(op_config, docker_cmd, output_writers,
metrics_output, continue_option, pipeline_summary_dict)
if exit:
break
print_pipeline_summary(pipeline_summary_dict)
finally:
# Stop any currently running services
clean_up_containers(running_services)
| clara-pipeline-operator-sizing-tool-main | src/pipeline_utils.py |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
| mellotron-master | plotting_utils.py |
import tensorflow as tf
from text.symbols import symbols
def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
################################
# Experiment Parameters #
################################
epochs=50000,
iters_per_checkpoint=500,
seed=1234,
dynamic_loss_scaling=True,
fp16_run=False,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
ignore_layers=['speaker_embedding.weight'],
################################
# Data Parameters #
################################
training_files='filelists/ljs_audiopaths_text_sid_train_filelist.txt',
validation_files='filelists/ljs_audiopaths_text_sid_val_filelist.txt',
text_cleaners=['english_cleaners'],
p_arpabet=1.0,
cmudict_path="data/cmu_dictionary",
################################
# Audio Parameters #
################################
max_wav_value=32768.0,
sampling_rate=22050,
filter_length=1024,
hop_length=256,
win_length=1024,
n_mel_channels=80,
mel_fmin=0.0,
mel_fmax=8000.0,
f0_min=80,
f0_max=880,
harm_thresh=0.25,
################################
# Model Parameters #
################################
n_symbols=len(symbols),
symbols_embedding_dim=512,
# Encoder parameters
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
# Decoder parameters
n_frames_per_step=1, # currently only 1 is supported
decoder_rnn_dim=1024,
prenet_dim=256,
prenet_f0_n_layers=1,
prenet_f0_dim=1,
prenet_f0_kernel_size=1,
prenet_rms_dim=0,
prenet_rms_kernel_size=1,
max_decoder_steps=1000,
gate_threshold=0.5,
p_attention_dropout=0.1,
p_decoder_dropout=0.1,
p_teacher_forcing=1.0,
# Attention parameters
attention_rnn_dim=1024,
attention_dim=128,
# Location Layer parameters
attention_location_n_filters=32,
attention_location_kernel_size=31,
# Mel-post processing network parameters
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
# Speaker embedding
n_speakers=123,
speaker_embedding_dim=128,
# Reference encoder
with_gst=True,
ref_enc_filters=[32, 32, 64, 64, 128, 128],
ref_enc_size=[3, 3],
ref_enc_strides=[2, 2],
ref_enc_pad=[1, 1],
ref_enc_gru_size=128,
# Style Token Layer
token_embedding_size=256,
token_num=10,
num_heads=8,
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate=False,
learning_rate=1e-3,
learning_rate_min=1e-5,
learning_rate_anneal=50000,
weight_decay=1e-6,
grad_clip_thresh=1.0,
batch_size=32,
mask_padding=True, # set model's padded outputs to padded values
)
if hparams_string:
tf.compat.v1.logging.info('Parsing command line hparams: %s', hparams_string)
hparams.parse(hparams_string)
if verbose:
tf.compat.v1.logging.info('Final parsed hparams: %s', hparams.values())
return hparams
| mellotron-master | hparams.py |
import re
import numpy as np
import music21 as m21
import torch
import torch.nn.functional as F
from text import text_to_sequence, get_arpabet, cmudict
CMUDICT_PATH = "data/cmu_dictionary"
CMUDICT = cmudict.CMUDict(CMUDICT_PATH)
PHONEME2GRAPHEME = {
'AA': ['a', 'o', 'ah'],
'AE': ['a', 'e'],
'AH': ['u', 'e', 'a', 'h', 'o'],
'AO': ['o', 'u', 'au'],
'AW': ['ou', 'ow'],
'AX': ['a'],
'AXR': ['er'],
'AY': ['i'],
'EH': ['e', 'ae'],
'EY': ['a', 'ai', 'ei', 'e', 'y'],
'IH': ['i', 'e', 'y'],
'IX': ['e', 'i'],
'IY': ['ea', 'ey', 'y', 'i'],
'OW': ['oa', 'o'],
'OY': ['oy'],
'UH': ['oo'],
'UW': ['oo', 'u', 'o'],
'UX': ['u'],
'B': ['b'],
'CH': ['ch', 'tch'],
'D': ['d', 'e', 'de'],
'DH': ['th'],
'DX': ['tt'],
'EL': ['le'],
'EM': ['m'],
'EN': ['on'],
'ER': ['i', 'er'],
'F': ['f'],
'G': ['g'],
'HH': ['h'],
'JH': ['j'],
'K': ['k', 'c', 'ch'],
'KS': ['x'],
'L': ['ll', 'l'],
'M': ['m'],
'N': ['n', 'gn'],
'NG': ['ng'],
'NX': ['nn'],
'P': ['p'],
'Q': ['-'],
'R': ['wr', 'r'],
'S': ['s', 'ce'],
'SH': ['sh'],
'T': ['t'],
'TH': ['th'],
'V': ['v', 'f', 'e'],
'W': ['w'],
'WH': ['wh'],
'Y': ['y', 'j'],
'Z': ['z', 's'],
'ZH': ['s']
}
########################
# CONSONANT DURATION #
########################
PHONEMEDURATION = {
'B': 0.05,
'CH': 0.1,
'D': 0.075,
'DH': 0.05,
'DX': 0.05,
'EL': 0.05,
'EM': 0.05,
'EN': 0.05,
'F': 0.1,
'G': 0.05,
'HH': 0.05,
'JH': 0.05,
'K': 0.05,
'L': 0.05,
'M': 0.15,
'N': 0.15,
'NG': 0.15,
'NX': 0.05,
'P': 0.05,
'Q': 0.075,
'R': 0.05,
'S': 0.1,
'SH': 0.05,
'T': 0.075,
'TH': 0.1,
'V': 0.05,
'Y': 0.05,
'W': 0.05,
'WH': 0.05,
'Z': 0.05,
'ZH': 0.05
}
def add_space_between_events(events, connect=False):
new_events = []
for i in range(1, len(events)):
token_a, freq_a, start_time_a, end_time_a = events[i-1][-1]
token_b, freq_b, start_time_b, end_time_b = events[i][0]
if token_a in (' ', '') and len(events[i-1]) == 1:
new_events.append(events[i-1])
elif token_a not in (' ', '') and token_b not in (' ', ''):
new_events.append(events[i-1])
if connect:
new_events.append([[' ', 0, end_time_a, start_time_b]])
else:
new_events.append([[' ', 0, end_time_a, end_time_a]])
else:
new_events.append(events[i-1])
if new_events[-1][0][0] != ' ':
new_events.append([[' ', 0, end_time_a, end_time_a]])
new_events.append(events[-1])
return new_events
def adjust_words(events):
new_events = []
for event in events:
if len(event) == 1 and event[0][0] == ' ':
new_events.append(event)
else:
for e in event:
if e[0][0].isupper():
new_events.append([e])
else:
new_events[-1].extend([e])
return new_events
def adjust_extensions(events, phoneme_durations):
if len(events) == 1:
return events
idx_last_vowel = None
n_consonants_after_last_vowel = 0
target_ids = np.arange(len(events))
for i in range(len(events)):
token = re.sub('[0-9{}]', '', events[i][0])
if idx_last_vowel is None and token not in phoneme_durations:
idx_last_vowel = i
n_consonants_after_last_vowel = 0
else:
if token == '_' and not n_consonants_after_last_vowel:
events[i][0] = events[idx_last_vowel][0]
elif token == '_' and n_consonants_after_last_vowel:
events[i][0] = events[idx_last_vowel][0]
start = idx_last_vowel + 1
target_ids[start:start+n_consonants_after_last_vowel] += 1
target_ids[i] -= n_consonants_after_last_vowel
elif token in phoneme_durations:
n_consonants_after_last_vowel += 1
else:
n_consonants_after_last_vowel = 0
idx_last_vowel = i
new_events = [0] * len(events)
for i in range(len(events)):
new_events[target_ids[i]] = events[i]
# adjust time of consonants that were repositioned
for i in range(1, len(new_events)):
if new_events[i][2] < new_events[i-1][2]:
new_events[i][2] = new_events[i-1][2]
new_events[i][3] = new_events[i-1][3]
return new_events
def adjust_consonant_lengths(events, phoneme_durations):
t_init = events[0][2]
idx_last_vowel = None
for i in range(len(events)):
task = re.sub('[0-9{}]', '', events[i][0])
if task in phoneme_durations:
duration = phoneme_durations[task]
if idx_last_vowel is None: # consonant comes before any vowel
events[i][2] = t_init
events[i][3] = t_init + duration
else: # consonant comes after a vowel, must offset
events[idx_last_vowel][3] -= duration
for k in range(idx_last_vowel+1, i):
events[k][2] -= duration
events[k][3] -= duration
events[i][2] = events[i-1][3]
events[i][3] = events[i-1][3] + duration
else:
events[i][2] = t_init
events[i][3] = events[i][3]
t_init = events[i][3]
idx_last_vowel = i
t_init = events[i][3]
return events
def adjust_consonants(events, phoneme_durations):
if len(events) == 1:
return events
start = 0
split_ids = []
t_init = events[0][2]
# get each substring group
for i in range(1, len(events)):
if events[i][2] != t_init:
split_ids.append((start, i))
start = i
t_init = events[i][2]
split_ids.append((start, len(events)))
for (start, end) in split_ids:
events[start:end] = adjust_consonant_lengths(
events[start:end], phoneme_durations)
return events
def adjust_event(event, hop_length=256, sampling_rate=22050):
tokens, freq, start_time, end_time = event
if tokens == ' ':
return [event] if freq == 0 else [['_', freq, start_time, end_time]]
return [[token, freq, start_time, end_time] for token in tokens]
def musicxml2score(filepath, bpm=60):
track = {}
beat_length_seconds = 60/bpm
data = m21.converter.parse(filepath)
for i in range(len(data.parts)):
part = data.parts[i].flat
events = []
for k in range(len(part.notesAndRests)):
event = part.notesAndRests[k]
if isinstance(event, m21.note.Note):
freq = event.pitch.frequency
token = event.lyrics[0].text if len(event.lyrics) > 0 else ' '
start_time = event.offset * beat_length_seconds
end_time = start_time + event.duration.quarterLength * beat_length_seconds
event = [token, freq, start_time, end_time]
elif isinstance(event, m21.note.Rest):
freq = 0
token = ' '
start_time = event.offset * beat_length_seconds
end_time = start_time + event.duration.quarterLength * beat_length_seconds
event = [token, freq, start_time, end_time]
if token == '_':
raise Exception("Unexpected token {}".format(token))
if len(events) == 0:
events.append(event)
else:
if token == ' ':
if freq == 0:
if events[-1][1] == 0:
events[-1][3] = end_time
else:
events.append(event)
elif freq == events[-1][1]: # is event duration extension ?
events[-1][-1] = end_time
else: # must be different note on same syllable
events.append(event)
else:
events.append(event)
track[part.partName] = events
return track
def track2events(track):
events = []
for e in track:
events.extend(adjust_event(e))
group_ids = [i for i in range(len(events))
if events[i][0] in [' '] or events[i][0].isupper()]
events_grouped = []
for i in range(1, len(group_ids)):
start, end = group_ids[i-1], group_ids[i]
events_grouped.append(events[start:end])
if events[-1][0] != ' ':
events_grouped.append(events[group_ids[-1]:])
return events_grouped
def events2eventsarpabet(event):
if event[0][0] == ' ':
return event
# get word and word arpabet
word = ''.join([e[0] for e in event if e[0] not in('_', ' ')])
word_arpabet = get_arpabet(word, CMUDICT)
if word_arpabet[0] != '{':
return event
word_arpabet = word_arpabet.split()
# align tokens to arpabet
i, k = 0, 0
new_events = []
while i < len(event) and k < len(word_arpabet):
# single token
token_a, freq_a, start_time_a, end_time_a = event[i]
if token_a == ' ':
new_events.append([token_a, freq_a, start_time_a, end_time_a])
i += 1
continue
if token_a == '_':
new_events.append([token_a, freq_a, start_time_a, end_time_a])
i += 1
continue
# two tokens
if i < len(event) - 1:
j = i + 1
token_b, freq_b, start_time_b, end_time_b = event[j]
between_events = []
while j < len(event) and event[j][0] == '_':
between_events.append([token_b, freq_b, start_time_b, end_time_b])
j += 1
if j < len(event):
token_b, freq_b, start_time_b, end_time_b = event[j]
token_compound_2 = (token_a + token_b).lower()
# single arpabet
arpabet = re.sub('[0-9{}]', '', word_arpabet[k])
if k < len(word_arpabet) - 1:
arpabet_compound_2 = ''.join(word_arpabet[k:k+2])
arpabet_compound_2 = re.sub('[0-9{}]', '', arpabet_compound_2)
if i < len(event) - 1 and token_compound_2 in PHONEME2GRAPHEME[arpabet]:
new_events.append([word_arpabet[k], freq_a, start_time_a, end_time_a])
if len(between_events):
new_events.extend(between_events)
if start_time_a != start_time_b:
new_events.append([word_arpabet[k], freq_b, start_time_b, end_time_b])
i += 2 + len(between_events)
k += 1
elif token_a.lower() in PHONEME2GRAPHEME[arpabet]:
new_events.append([word_arpabet[k], freq_a, start_time_a, end_time_a])
i += 1
k += 1
elif arpabet_compound_2 in PHONEME2GRAPHEME and token_a.lower() in PHONEME2GRAPHEME[arpabet_compound_2]:
new_events.append([word_arpabet[k], freq_a, start_time_a, end_time_a])
new_events.append([word_arpabet[k+1], freq_a, start_time_a, end_time_a])
i += 1
k += 2
else:
k += 1
# add extensions and pauses at end of words
while i < len(event):
token_a, freq_a, start_time_a, end_time_a = event[i]
if token_a in (' ', '_'):
new_events.append([token_a, freq_a, start_time_a, end_time_a])
i += 1
return new_events
def event2alignment(events, hop_length=256, sampling_rate=22050):
frame_length = float(hop_length) / float(sampling_rate)
n_frames = int(events[-1][-1][-1] / frame_length)
n_tokens = np.sum([len(e) for e in events])
alignment = np.zeros((n_tokens, n_frames))
cur_event = -1
for event in events:
for i in range(len(event)):
if len(event) == 1 or cur_event == -1 or event[i][0] != event[i-1][0]:
cur_event += 1
token, freq, start_time, end_time = event[i]
alignment[cur_event, int(start_time/frame_length):int(end_time/frame_length)] = 1
return alignment[:cur_event+1]
def event2f0(events, hop_length=256, sampling_rate=22050):
frame_length = float(hop_length) / float(sampling_rate)
n_frames = int(events[-1][-1][-1] / frame_length)
f0s = np.zeros((1, n_frames))
for event in events:
for i in range(len(event)):
token, freq, start_time, end_time = event[i]
f0s[0, int(start_time/frame_length):int(end_time/frame_length)] = freq
return f0s
def event2text(events, convert_stress, cmudict=None):
text_clean = ''
for event in events:
for i in range(len(event)):
if i > 0 and event[i][0] == event[i-1][0]:
continue
if event[i][0] == ' ' and len(event) > 1:
if text_clean[-1] != "}":
text_clean = text_clean[:-1] + '} {'
else:
text_clean += ' {'
else:
if event[i][0][-1] in ('}', ' '):
text_clean += event[i][0]
else:
text_clean += event[i][0] + ' '
if convert_stress:
text_clean = re.sub('[0-9]', '1', text_clean)
text_encoded = text_to_sequence(text_clean, [], cmudict)
return text_encoded, text_clean
def remove_excess_frames(alignment, f0s):
excess_frames = np.sum(alignment.sum(0) == 0)
alignment = alignment[:, :-excess_frames] if excess_frames > 0 else alignment
f0s = f0s[:, :-excess_frames] if excess_frames > 0 else f0s
return alignment, f0s
def get_data_from_musicxml(filepath, bpm, phoneme_durations=None,
convert_stress=False):
if phoneme_durations is None:
phoneme_durations = PHONEMEDURATION
score = musicxml2score(filepath, bpm)
data = {}
for k, v in score.items():
# ignore empty tracks
if len(v) == 1 and v[0][0] == ' ':
continue
events = track2events(v)
events = adjust_words(events)
events_arpabet = [events2eventsarpabet(e) for e in events]
# make adjustments
events_arpabet = [adjust_extensions(e, phoneme_durations)
for e in events_arpabet]
events_arpabet = [adjust_consonants(e, phoneme_durations)
for e in events_arpabet]
events_arpabet = add_space_between_events(events_arpabet)
# convert data to alignment, f0 and text encoded
alignment = event2alignment(events_arpabet)
f0s = event2f0(events_arpabet)
alignment, f0s = remove_excess_frames(alignment, f0s)
text_encoded, text_clean = event2text(events_arpabet, convert_stress)
# convert data to torch
alignment = torch.from_numpy(alignment).permute(1, 0)[:, None].float()
f0s = torch.from_numpy(f0s)[None].float()
text_encoded = torch.LongTensor(text_encoded)[None]
data[k] = {'rhythm': alignment,
'pitch_contour': f0s,
'text_encoded': text_encoded}
return data
if __name__ == "__main__":
import argparse
# Get defaults so it can work with no Sacred
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--filepath", required=True)
args = parser.parse_args()
get_data_from_musicxml(args.filepath, 60)
| mellotron-master | mellotron_utils.py |
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| mellotron-master | audio_processing.py |
import random
import torch
from tensorboardX import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir):
super(Tacotron2Logger, self).__init__(logdir)
def log_training(self, reduced_loss, grad_norm, learning_rate, duration,
iteration):
self.add_scalar("training.loss", reduced_loss, iteration)
self.add_scalar("grad.norm", grad_norm, iteration)
self.add_scalar("learning.rate", learning_rate, iteration)
self.add_scalar("duration", duration, iteration)
def log_validation(self, reduced_loss, model, y, y_pred, iteration):
self.add_scalar("validation.loss", reduced_loss, iteration)
_, mel_outputs, gate_outputs, alignments = y_pred
mel_targets, gate_targets = y
# plot distribution of parameters
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
# plot alignment, mel target and predicted, gate target and predicted
idx = random.randint(0, alignments.size(0) - 1)
self.add_image(
"alignment",
plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
iteration, dataformats='HWC')
self.add_image(
"mel_target",
plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"mel_predicted",
plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_targets[idx].data.cpu().numpy(),
torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| mellotron-master | logger.py |
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from loss_scaler import DynamicLossScaler, LossScaler
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
class FP16_Optimizer(object):
"""
FP16_Optimizer is designed to wrap an existing PyTorch optimizer,
and enable an fp16 model to be trained using a master copy of fp32 weights.
Args:
optimizer (torch.optim.optimizer): Existing optimizer containing initialized fp16 parameters. Internally, FP16_Optimizer replaces the passed optimizer's fp16 parameters with new fp32 parameters copied from the original ones. FP16_Optimizer also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy after each step.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale fp16 gradients computed by the model. Scaled gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so static_loss_scale should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any static_loss_scale option.
"""
def __init__(self, optimizer, static_loss_scale=1.0, dynamic_loss_scale=False):
if not torch.cuda.is_available:
raise SystemError('Cannot use fp16 without CUDA')
self.fp16_param_groups = []
self.fp32_param_groups = []
self.fp32_flattened_groups = []
for i, param_group in enumerate(optimizer.param_groups):
print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
for param in param_group['params']:
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
elif param.type() == 'torch.cuda.FloatTensor':
print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
fp32_flattened_this_group = None
if len(fp16_params_this_group) > 0:
fp32_flattened_this_group = _flatten_dense_tensors(
[param.detach().data.clone().float() for param in fp16_params_this_group])
fp32_flattened_this_group = Variable(fp32_flattened_this_group, requires_grad = True)
fp32_flattened_this_group.grad = fp32_flattened_this_group.new(
*fp32_flattened_this_group.size())
# python's lovely list concatenation via +
if fp32_flattened_this_group is not None:
param_group['params'] = [fp32_flattened_this_group] + fp32_params_this_group
else:
param_group['params'] = fp32_params_this_group
self.fp16_param_groups.append(fp16_params_this_group)
self.fp32_param_groups.append(fp32_params_this_group)
self.fp32_flattened_groups.append(fp32_flattened_this_group)
# print("self.fp32_flattened_groups = ", self.fp32_flattened_groups)
# print("self.fp16_param_groups = ", self.fp16_param_groups)
self.optimizer = optimizer.__class__(optimizer.param_groups)
# self.optimizer.load_state_dict(optimizer.state_dict())
self.param_groups = self.optimizer.param_groups
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
def zero_grad(self):
"""
Zero fp32 and fp16 parameter grads.
"""
self.optimizer.zero_grad()
for fp16_group in self.fp16_param_groups:
for param in fp16_group:
if param.grad is not None:
param.grad.detach_() # This does appear in torch.optim.optimizer.zero_grad(),
# but I'm not sure why it's needed.
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_param_groups:
for param in group:
params.append(param)
for group in self.fp32_param_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _copy_grads_fp16_to_fp32(self):
for fp32_group, fp16_group in zip(self.fp32_flattened_groups, self.fp16_param_groups):
if len(fp16_group) > 0:
# This might incur one more deep copy than is necessary.
fp32_group.grad.data.copy_(
_flatten_dense_tensors([fp16_param.grad.data for fp16_param in fp16_group]))
def _downscale_fp32(self):
if self.loss_scale != 1.0:
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
param.grad.data.mul_(1./self.loss_scale)
def clip_fp32_grads(self, clip=-1):
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
if clip > 0:
return torch.nn.utils.clip_grad_norm(fp32_params, clip)
def _copy_params_fp32_to_fp16(self):
for fp16_group, fp32_group in zip(self.fp16_param_groups, self.fp32_flattened_groups):
if len(fp16_group) > 0:
for fp16_param, fp32_data in zip(fp16_group,
_unflatten_dense_tensors(fp32_group.data, fp16_group)):
fp16_param.data.copy_(fp32_data)
def state_dict(self):
"""
Returns a dict containing the current state of this FP16_Optimizer instance.
This dict contains attributes of FP16_Optimizer, as well as the state_dict
of the contained Pytorch optimizer.
Untested.
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict.
Untested.
"""
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, step should be called after fp16_optimizer_obj.backward(loss).
step updates the fp32 master copy of parameters using the optimizer supplied to
FP16_Optimizer's constructor, then copies the updated fp32 params into the fp16 params
originally referenced by Fp16_Optimizer's constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, step may be called without a prior call to self.backward(loss).
However, the user should take care that any loss.backward() call within the closure
has been replaced by fp16_optimizer_obj.backward(loss).
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to FP16_Optimizer's constructor. closure should call zero_grad on the FP16_Optimizer object, compute the loss, call .backward(loss), and return the loss.
Closure example::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. note::
The only changes that need to be made compared to
`ordinary optimizer closures`_ are that "optimizer" itself should be an instance of
FP16_Optimizer, and that the call to loss.backward should be replaced by
optimizer.backward(loss).
.. warning::
Currently, calling step with a closure is not compatible with dynamic loss scaling.
.. _`ordinary optimizer closures`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
if closure is not None and isinstance(self.loss_scaler, DynamicLossScaler):
raise TypeError("Using step with a closure is currently not "
"compatible with dynamic loss scaling.")
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
print("OVERFLOW! Skipping step. Attempted loss scale: {}".format(scale))
return
if closure is not None:
self._step_with_closure(closure)
else:
self.optimizer.step()
self._copy_params_fp32_to_fp16()
return
def _step_with_closure(self, closure):
def wrapped_closure():
if self.first_closure_call_this_step:
"""
We expect that the fp16 params are initially fresh on entering self.step(),
so _copy_params_fp32_to_fp16() is unnecessary the first time wrapped_closure()
is called within self.optimizer.step().
"""
self.first_closure_call_this_step = False
else:
"""
If self.optimizer.step() internally calls wrapped_closure more than once,
it may update the fp32 params after each call. However, self.optimizer
doesn't know about the fp16 params at all. If the fp32 params get updated,
we can't rely on self.optimizer to refresh the fp16 params. We need
to handle that manually:
"""
self._copy_params_fp32_to_fp16()
"""
Our API expects the user to give us ownership of the backward() call by
replacing all calls to loss.backward() with optimizer.backward(loss).
This requirement holds whether or not the call to backward() is made within
a closure.
If the user is properly calling optimizer.backward(loss) within "closure,"
calling closure() here will give the fp32 master params fresh gradients
for the optimizer to play with,
so all wrapped_closure needs to do is call closure() and return the loss.
"""
temp_loss = closure()
return temp_loss
self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
def backward(self, loss, update_fp32_grads=True):
"""
fp16_optimizer_obj.backward performs the following conceptual operations:
fp32_loss = loss.float() (see first Note below)
scaled_loss = fp32_loss*loss_scale
scaled_loss.backward(), which accumulates scaled gradients into the .grad attributes of the
fp16 model's leaves.
fp16 grads are then copied to the stored fp32 params' .grad attributes (see second Note).
Finally, fp32 grads are divided by loss_scale.
In this way, after fp16_optimizer_obj.backward, the fp32 parameters have fresh gradients,
and fp16_optimizer_obj.step may be called.
.. note::
Converting the loss to fp32 before applying the loss scale provides some
additional safety against overflow if the user has supplied an fp16 value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
fp16_optimizer_obj.backward.
.. note::
The gradients found in an fp16 model's leaves after a call to
fp16_optimizer_obj.backward should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may silently change over time).
If the user wants to inspect gradients after a call to fp16_optimizer_obj.backward,
he/she should query the .grad attribute of FP16_Optimizer's stored fp32 parameters.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_fp32_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay this copy, which is useful to eliminate redundant fp16->fp32 grad copies if fp16_optimizer_obj.backward is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling fp16_optimizer_obj.update_fp32_grads before calling fp16_optimizer_obj.step.
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_fp32_grads=False)
optimizer.backward(loss2, update_fp32_grads=False)
optimizer.update_fp32_grads()
"""
self.loss_scaler.backward(loss.float())
if update_fp32_grads:
self.update_fp32_grads()
def update_fp32_grads(self):
"""
Copy the .grad attribute from stored references to fp16 parameters to
the .grad attribute of the master fp32 parameters that are directly
updated by the optimizer. :attr:`update_fp32_grads` only needs to be called if
fp16_optimizer_obj.backward was called with update_fp32_grads=False.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._copy_grads_fp16_to_fp32()
self._downscale_fp32()
@property
def loss_scale(self):
return self.loss_scaler.loss_scale
| mellotron-master | fp16_optimizer.py |
from math import sqrt
import numpy as np
from numpy import finfo
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from layers import ConvNorm, LinearNorm
from utils import to_gpu, get_mask_from_lengths
from modules import GST
drop_rate = 0.5
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
return model
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask, attention_weights=None):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
if attention_weights is None:
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=drop_rate, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim,
hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hparams.n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), drop_rate, self.training)
x = F.dropout(self.convolutions[-1](x), drop_rate, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(Encoder, self).__init__()
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
if x.size()[0] > 1:
print("here")
x_embedded = []
for b_ind in range(x.size()[0]): # TODO: Speed up
curr_x = x[b_ind:b_ind+1, :, :input_lengths[b_ind]].clone()
for conv in self.convolutions:
curr_x = F.dropout(F.relu(conv(curr_x)), drop_rate, self.training)
x_embedded.append(curr_x[0].transpose(0, 1))
x = torch.nn.utils.rnn.pad_sequence(x_embedded, batch_first=True)
else:
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), drop_rate, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def inference(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), drop_rate, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, hparams):
super(Decoder, self).__init__()
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.encoder_embedding_dim = hparams.encoder_embedding_dim + hparams.token_embedding_size + hparams.speaker_embedding_dim
self.attention_rnn_dim = hparams.attention_rnn_dim
self.decoder_rnn_dim = hparams.decoder_rnn_dim
self.prenet_dim = hparams.prenet_dim
self.max_decoder_steps = hparams.max_decoder_steps
self.gate_threshold = hparams.gate_threshold
self.p_attention_dropout = hparams.p_attention_dropout
self.p_decoder_dropout = hparams.p_decoder_dropout
self.p_teacher_forcing = hparams.p_teacher_forcing
self.prenet_f0 = ConvNorm(
1, hparams.prenet_f0_dim,
kernel_size=hparams.prenet_f0_kernel_size,
padding=max(0, int(hparams.prenet_f0_kernel_size/2)),
bias=False, stride=1, dilation=1)
self.prenet = Prenet(
hparams.n_mel_channels * hparams.n_frames_per_step,
[hparams.prenet_dim, hparams.prenet_dim])
self.attention_rnn = nn.LSTMCell(
hparams.prenet_dim + hparams.prenet_f0_dim + self.encoder_embedding_dim,
hparams.attention_rnn_dim)
self.attention_layer = Attention(
hparams.attention_rnn_dim, self.encoder_embedding_dim,
hparams.attention_dim, hparams.attention_location_n_filters,
hparams.attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
hparams.attention_rnn_dim + self.encoder_embedding_dim,
hparams.decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
hparams.decoder_rnn_dim + self.encoder_embedding_dim,
hparams.n_mel_channels * hparams.n_frames_per_step)
self.gate_layer = LinearNorm(
hparams.decoder_rnn_dim + self.encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def get_end_f0(self, f0s):
B = f0s.size(0)
dummy = Variable(f0s.data.new(B, 1, f0s.size(1)).zero_())
return dummy
def initialize_decoder_states(self, memory, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs)
if len(gate_outputs.size()) > 1:
gate_outputs = gate_outputs.transpose(0, 1)
else:
gate_outputs = gate_outputs[None]
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input, attention_weights=None):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
self.attention_cell = F.dropout(
self.attention_cell, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask, attention_weights)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
self.decoder_cell = F.dropout(
self.decoder_cell, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths, f0s):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
# audio features
f0_dummy = self.get_end_f0(f0s)
f0s = torch.cat((f0s, f0_dummy), dim=2)
f0s = F.relu(self.prenet_f0(f0s))
f0s = f0s.permute(2, 0, 1)
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
if len(mel_outputs) == 0 or np.random.uniform(0.0, 1.0) <= self.p_teacher_forcing:
decoder_input = torch.cat((decoder_inputs[len(mel_outputs)],
f0s[len(mel_outputs)]), dim=1)
else:
decoder_input = torch.cat((self.prenet(mel_outputs[-1]),
f0s[len(mel_outputs)]), dim=1)
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference(self, memory, f0s):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
f0_dummy = self.get_end_f0(f0s)
f0s = torch.cat((f0s, f0_dummy), dim=2)
f0s = F.relu(self.prenet_f0(f0s))
f0s = f0s.permute(2, 0, 1)
mel_outputs, gate_outputs, alignments = [], [], []
while True:
if len(mel_outputs) < len(f0s):
f0 = f0s[len(mel_outputs)]
else:
f0 = f0s[-1] * 0
decoder_input = torch.cat((self.prenet(decoder_input), f0), dim=1)
mel_output, gate_output, alignment = self.decode(decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
if torch.sigmoid(gate_output.data) > self.gate_threshold:
break
elif len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference_noattention(self, memory, f0s, attention_map):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
f0_dummy = self.get_end_f0(f0s)
f0s = torch.cat((f0s, f0_dummy), dim=2)
f0s = F.relu(self.prenet_f0(f0s))
f0s = f0s.permute(2, 0, 1)
mel_outputs, gate_outputs, alignments = [], [], []
for i in range(len(attention_map)):
f0 = f0s[i]
attention = attention_map[i]
decoder_input = torch.cat((self.prenet(decoder_input), f0), dim=1)
mel_output, gate_output, alignment = self.decode(decoder_input, attention)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, hparams):
super(Tacotron2, self).__init__()
self.mask_padding = hparams.mask_padding
self.fp16_run = hparams.fp16_run
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.embedding = nn.Embedding(
hparams.n_symbols, hparams.symbols_embedding_dim)
std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(hparams)
self.decoder = Decoder(hparams)
self.postnet = Postnet(hparams)
if hparams.with_gst:
self.gst = GST(hparams)
self.speaker_embedding = nn.Embedding(
hparams.n_speakers, hparams.speaker_embedding_dim)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, speaker_ids, f0_padded = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
speaker_ids = to_gpu(speaker_ids.data).long()
f0_padded = to_gpu(f0_padded).float()
return ((text_padded, input_lengths, mel_padded, max_len,
output_lengths, speaker_ids, f0_padded),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
inputs, input_lengths, targets, max_len, \
output_lengths, speaker_ids, f0s = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
embedded_inputs = self.embedding(inputs).transpose(1, 2)
embedded_text = self.encoder(embedded_inputs, input_lengths)
embedded_speakers = self.speaker_embedding(speaker_ids)[:, None]
embedded_gst = self.gst(targets, output_lengths)
embedded_gst = embedded_gst.repeat(1, embedded_text.size(1), 1)
embedded_speakers = embedded_speakers.repeat(1, embedded_text.size(1), 1)
encoder_outputs = torch.cat(
(embedded_text, embedded_gst, embedded_speakers), dim=2)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, targets, memory_lengths=input_lengths, f0s=f0s)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def inference(self, inputs):
text, style_input, speaker_ids, f0s = inputs
embedded_inputs = self.embedding(text).transpose(1, 2)
embedded_text = self.encoder.inference(embedded_inputs)
embedded_speakers = self.speaker_embedding(speaker_ids)[:, None]
if hasattr(self, 'gst'):
if isinstance(style_input, int):
query = torch.zeros(1, 1, self.gst.encoder.ref_enc_gru_size).cuda()
GST = torch.tanh(self.gst.stl.embed)
key = GST[style_input].unsqueeze(0).expand(1, -1, -1)
embedded_gst = self.gst.stl.attention(query, key)
else:
embedded_gst = self.gst(style_input)
embedded_speakers = embedded_speakers.repeat(1, embedded_text.size(1), 1)
if hasattr(self, 'gst'):
embedded_gst = embedded_gst.repeat(1, embedded_text.size(1), 1)
encoder_outputs = torch.cat(
(embedded_text, embedded_gst, embedded_speakers), dim=2)
else:
encoder_outputs = torch.cat(
(embedded_text, embedded_speakers), dim=2)
mel_outputs, gate_outputs, alignments = self.decoder.inference(
encoder_outputs, f0s)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
def inference_noattention(self, inputs):
text, style_input, speaker_ids, f0s, attention_map = inputs
embedded_inputs = self.embedding(text).transpose(1, 2)
embedded_text = self.encoder.inference(embedded_inputs)
embedded_speakers = self.speaker_embedding(speaker_ids)[:, None]
if hasattr(self, 'gst'):
if isinstance(style_input, int):
query = torch.zeros(1, 1, self.gst.encoder.ref_enc_gru_size).cuda()
GST = torch.tanh(self.gst.stl.embed)
key = GST[style_input].unsqueeze(0).expand(1, -1, -1)
embedded_gst = self.gst.stl.attention(query, key)
else:
embedded_gst = self.gst(style_input)
embedded_speakers = embedded_speakers.repeat(1, embedded_text.size(1), 1)
if hasattr(self, 'gst'):
embedded_gst = embedded_gst.repeat(1, embedded_text.size(1), 1)
encoder_outputs = torch.cat(
(embedded_text, embedded_gst, embedded_speakers), dim=2)
else:
encoder_outputs = torch.cat(
(embedded_text, embedded_speakers), dim=2)
mel_outputs, gate_outputs, alignments = self.decoder.inference_noattention(
encoder_outputs, f0s, attention_map)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
| mellotron-master | model.py |
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| mellotron-master | stft.py |
import torch
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1).float() for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
'''
This version of DistributedDataParallel is designed to be used in conjunction with the multiproc.py
launcher included with this example. It assumes that your run is using multiprocess with 1
GPU/process, that the model is on the correct device, and that torch.set_device has been
used to set the device.
Parameters are broadcasted to the other processes on initialization of DistributedDataParallel,
and will be allreduced at the finish of the backward pass.
'''
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
#fallback for PyTorch 0.3
if not hasattr(dist, '_backend'):
self.warn_on_half = True
else:
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
for p in self.module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
'''
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
'''
def apply_gradient_allreduce(module):
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
| mellotron-master | distributed.py |
import random
import os
import re
import numpy as np
import torch
import torch.utils.data
import librosa
import layers
from utils import load_wav_to_torch, load_filepaths_and_text
from text import text_to_sequence, cmudict
from yin import compute_yin
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio, text and speaker ids
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms and f0s from audio files.
"""
def __init__(self, audiopaths_and_text, hparams, speaker_ids=None):
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
self.text_cleaners = hparams.text_cleaners
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.stft = layers.TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
self.sampling_rate = hparams.sampling_rate
self.filter_length = hparams.filter_length
self.hop_length = hparams.hop_length
self.f0_min = hparams.f0_min
self.f0_max = hparams.f0_max
self.harm_thresh = hparams.harm_thresh
self.p_arpabet = hparams.p_arpabet
self.cmudict = None
if hparams.cmudict_path is not None:
self.cmudict = cmudict.CMUDict(hparams.cmudict_path)
self.speaker_ids = speaker_ids
if speaker_ids is None:
self.speaker_ids = self.create_speaker_lookup_table(
self.audiopaths_and_text)
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def create_speaker_lookup_table(self, audiopaths_and_text):
speaker_ids = np.sort(np.unique([x[2] for x in audiopaths_and_text]))
d = {int(speaker_ids[i]): i for i in range(len(speaker_ids))}
return d
def get_f0(self, audio, sampling_rate=22050, frame_length=1024,
hop_length=256, f0_min=100, f0_max=300, harm_thresh=0.1):
f0, harmonic_rates, argmins, times = compute_yin(
audio, sampling_rate, frame_length, hop_length, f0_min, f0_max,
harm_thresh)
pad = int((frame_length / hop_length) / 2)
f0 = [0.0] * pad + f0 + [0.0] * pad
f0 = np.array(f0, dtype=np.float32)
return f0
def get_data(self, audiopath_and_text):
audiopath, text, speaker = audiopath_and_text
text = self.get_text(text)
mel, f0 = self.get_mel_and_f0(audiopath)
speaker_id = self.get_speaker_id(speaker)
return (text, mel, speaker_id, f0)
def get_speaker_id(self, speaker_id):
return torch.IntTensor([self.speaker_ids[int(speaker_id)]])
def get_mel_and_f0(self, filepath):
audio, sampling_rate = load_wav_to_torch(filepath)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
f0 = self.get_f0(audio.cpu().numpy(), self.sampling_rate,
self.filter_length, self.hop_length, self.f0_min,
self.f0_max, self.harm_thresh)
f0 = torch.from_numpy(f0)[None]
f0 = f0[:, :melspec.size(1)]
return melspec, f0
def get_text(self, text):
text_norm = torch.IntTensor(
text_to_sequence(text, self.text_cleaners, self.cmudict, self.p_arpabet))
return text_norm
def __getitem__(self, index):
return self.get_data(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded, gate padded and speaker ids
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
speaker_ids = torch.LongTensor(len(batch))
f0_padded = torch.FloatTensor(len(batch), 1, max_target_len)
f0_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
speaker_ids[i] = batch[ids_sorted_decreasing[i]][2]
f0 = batch[ids_sorted_decreasing[i]][3]
f0_padded[i, :, :f0.size(1)] = f0
model_inputs = (text_padded, input_lengths, mel_padded, gate_padded,
output_lengths, speaker_ids, f0_padded)
return model_inputs
| mellotron-master | data_utils.py |
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
| mellotron-master | loss_function.py |
import numpy as np
from scipy.io.wavfile import read
import torch
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
| mellotron-master | utils.py |
import os
import time
import argparse
import math
from numpy import finfo
import torch
from distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from model import load_model
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams,
speaker_ids=trainset.speaker_ids)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
if hparams.distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn, train_sampler
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, reduced_val_loss))
logger.log_validation(val_loss, model, y, y_pred, iteration)
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn, train_sampler = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
if train_sampler is not None:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
start = time.perf_counter()
if iteration > 0 and iteration % hparams.learning_rate_anneal == 0:
learning_rate = max(
hparams.learning_rate_min, learning_rate * 0.5)
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
if not is_overflow and rank == 0:
duration = time.perf_counter() - start
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
| mellotron-master | train.py |
# adapted from https://github.com/patriceguyot/Yin
import numpy as np
def differenceFunction(x, N, tau_max):
"""
Compute difference function of data x. This corresponds to equation (6) in [1]
This solution is implemented directly with Numpy fft.
:param x: audio data
:param N: length of data
:param tau_max: integration window size
:return: difference function
:rtype: list
"""
x = np.array(x, np.float64)
w = x.size
tau_max = min(tau_max, w)
x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
size = w + tau_max
p2 = (size // 32).bit_length()
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)
fc = np.fft.rfft(x, size_pad)
conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]
return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - 2 * conv
def cumulativeMeanNormalizedDifferenceFunction(df, N):
"""
Compute cumulative mean normalized difference function (CMND).
This corresponds to equation (8) in [1]
:param df: Difference function
:param N: length of data
:return: cumulative mean normalized difference function
:rtype: list
"""
cmndf = df[1:] * range(1, N) / np.cumsum(df[1:]).astype(float) #scipy method
return np.insert(cmndf, 0, 1)
def getPitch(cmdf, tau_min, tau_max, harmo_th=0.1):
"""
Return fundamental period of a frame based on CMND function.
:param cmdf: Cumulative Mean Normalized Difference function
:param tau_min: minimum period for speech
:param tau_max: maximum period for speech
:param harmo_th: harmonicity threshold to determine if it is necessary to compute pitch frequency
:return: fundamental period if there is values under threshold, 0 otherwise
:rtype: float
"""
tau = tau_min
while tau < tau_max:
if cmdf[tau] < harmo_th:
while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]:
tau += 1
return tau
tau += 1
return 0 # if unvoiced
def compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500,
harmo_thresh=0.1):
"""
Compute the Yin Algorithm. Return fundamental frequency and harmonic rate.
:param sig: Audio signal (list of float)
:param sr: sampling rate (int)
:param w_len: size of the analysis window (samples)
:param w_step: size of the lag between two consecutives windows (samples)
:param f0_min: Minimum fundamental frequency that can be detected (hertz)
:param f0_max: Maximum fundamental frequency that can be detected (hertz)
:param harmo_tresh: Threshold of detection. The yalgorithmù return the first minimum of the CMND function below this treshold.
:returns:
* pitches: list of fundamental frequencies,
* harmonic_rates: list of harmonic rate values for each fundamental frequency value (= confidence value)
* argmins: minimums of the Cumulative Mean Normalized DifferenceFunction
* times: list of time of each estimation
:rtype: tuple
"""
tau_min = int(sr / f0_max)
tau_max = int(sr / f0_min)
timeScale = range(0, len(sig) - w_len, w_step) # time values for each analysis window
times = [t/float(sr) for t in timeScale]
frames = [sig[t:t + w_len] for t in timeScale]
pitches = [0.0] * len(timeScale)
harmonic_rates = [0.0] * len(timeScale)
argmins = [0.0] * len(timeScale)
for i, frame in enumerate(frames):
# Compute YIN
df = differenceFunction(frame, w_len, tau_max)
cmdf = cumulativeMeanNormalizedDifferenceFunction(df, tau_max)
p = getPitch(cmdf, tau_min, tau_max, harmo_thresh)
# Get results
if np.argmin(cmdf) > tau_min:
argmins[i] = float(sr / np.argmin(cmdf))
if p != 0: # A pitch was found
pitches[i] = float(sr / p)
harmonic_rates[i] = cmdf[p]
else: # No pitch, but we compute a value of the harmonic rate
harmonic_rates[i] = min(cmdf)
return pitches, harmonic_rates, argmins, times
| mellotron-master | yin.py |
# adapted from https://github.com/KinglittleQ/GST-Tacotron/blob/master/GST.py
# MIT License
#
# Copyright (c) 2018 MagicGirl Sakura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class ReferenceEncoder(nn.Module):
'''
inputs --- [N, Ty/r, n_mels*r] mels
outputs --- [N, ref_enc_gru_size]
'''
def __init__(self, hp):
super().__init__()
K = len(hp.ref_enc_filters)
filters = [1] + hp.ref_enc_filters
convs = [nn.Conv2d(in_channels=filters[i],
out_channels=filters[i + 1],
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1)) for i in range(K)]
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(
[nn.BatchNorm2d(num_features=hp.ref_enc_filters[i])
for i in range(K)])
out_channels = self.calculate_channels(hp.n_mel_channels, 3, 2, 1, K)
self.gru = nn.GRU(input_size=hp.ref_enc_filters[-1] * out_channels,
hidden_size=hp.ref_enc_gru_size,
batch_first=True)
self.n_mel_channels = hp.n_mel_channels
self.ref_enc_gru_size = hp.ref_enc_gru_size
def forward(self, inputs, input_lengths=None):
out = inputs.view(inputs.size(0), 1, -1, self.n_mel_channels)
for conv, bn in zip(self.convs, self.bns):
out = conv(out)
out = bn(out)
out = F.relu(out)
out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
N, T = out.size(0), out.size(1)
out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
if input_lengths is not None:
input_lengths = torch.ceil(input_lengths.float() / 2 ** len(self.convs))
input_lengths = input_lengths.cpu().numpy().astype(int)
out = nn.utils.rnn.pack_padded_sequence(
out, input_lengths, batch_first=True, enforce_sorted=False)
self.gru.flatten_parameters()
_, out = self.gru(out)
return out.squeeze(0)
def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
for _ in range(n_convs):
L = (L - kernel_size + 2 * pad) // stride + 1
return L
class STL(nn.Module):
'''
inputs --- [N, token_embedding_size//2]
'''
def __init__(self, hp):
super().__init__()
self.embed = nn.Parameter(torch.FloatTensor(hp.token_num, hp.token_embedding_size // hp.num_heads))
d_q = hp.ref_enc_gru_size
d_k = hp.token_embedding_size // hp.num_heads
self.attention = MultiHeadAttention(
query_dim=d_q, key_dim=d_k, num_units=hp.token_embedding_size,
num_heads=hp.num_heads)
init.normal_(self.embed, mean=0, std=0.5)
def forward(self, inputs):
N = inputs.size(0)
query = inputs.unsqueeze(1)
keys = torch.tanh(self.embed).unsqueeze(0).expand(N, -1, -1) # [N, token_num, token_embedding_size // num_heads]
style_embed = self.attention(query, keys)
return style_embed
class MultiHeadAttention(nn.Module):
'''
input:
query --- [N, T_q, query_dim]
key --- [N, T_k, key_dim]
output:
out --- [N, T_q, num_units]
'''
def __init__(self, query_dim, key_dim, num_units, num_heads):
super().__init__()
self.num_units = num_units
self.num_heads = num_heads
self.key_dim = key_dim
self.W_query = nn.Linear(in_features=query_dim, out_features=num_units, bias=False)
self.W_key = nn.Linear(in_features=key_dim, out_features=num_units, bias=False)
self.W_value = nn.Linear(in_features=key_dim, out_features=num_units, bias=False)
def forward(self, query, key):
querys = self.W_query(query) # [N, T_q, num_units]
keys = self.W_key(key) # [N, T_k, num_units]
values = self.W_value(key)
split_size = self.num_units // self.num_heads
querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0) # [h, N, T_q, num_units/h]
keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h]
values = torch.stack(torch.split(values, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h]
# score = softmax(QK^T / (d_k ** 0.5))
scores = torch.matmul(querys, keys.transpose(2, 3)) # [h, N, T_q, T_k]
scores = scores / (self.key_dim ** 0.5)
scores = F.softmax(scores, dim=3)
# out = score * V
out = torch.matmul(scores, values) # [h, N, T_q, num_units/h]
out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0) # [N, T_q, num_units]
return out
class GST(nn.Module):
def __init__(self, hp):
super().__init__()
self.encoder = ReferenceEncoder(hp)
self.stl = STL(hp)
def forward(self, inputs, input_lengths=None):
enc_out = self.encoder(inputs, input_lengths=input_lengths)
style_embed = self.stl(enc_out)
return style_embed
| mellotron-master | modules.py |
import torch
from librosa.filters import mel as librosa_mel_fn
from audio_processing import dynamic_range_compression, dynamic_range_decompression
from stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class ConvNorm2D(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm2D, self).__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
groups=1, bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y, ref_level_db = 20, magnitude_power=1.5):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| mellotron-master | layers.py |
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("logs/{}_GPU_{}.log".format(job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
| mellotron-master | multiproc.py |
import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
cpu_sum = float(x.float().sum())
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
| mellotron-master | loss_scaler.py |
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| mellotron-master | text/cmudict.py |
""" from https://github.com/keithito/tacotron """
import re
import random
from text import cleaners
from text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
_words_re = re.compile(r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]{1,2}|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)")
def get_arpabet(word, dictionary):
word_arpabet = dictionary.lookup(word)
if word_arpabet is not None:
return "{" + word_arpabet[0] + "}"
else:
return word
def text_to_sequence(text, cleaner_names, dictionary=None, p_arpabet=1.0):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
dictionary: arpabet class with arpabet dictionary
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
clean_text = _clean_text(text, cleaner_names)
if dictionary is not None:
words = _words_re.findall(text)
clean_text = [
get_arpabet(word[0], dictionary)
if ((word[0] != '') and random.random() < p_arpabet) else word[1]
for word in words]
for i in range(len(clean_text)):
t = clean_text[i]
if t.startswith("{"):
sequence += _arpabet_to_sequence(t[1:-1])
else:
sequence += _symbols_to_sequence(t)
#sequence += space
else:
sequence += _symbols_to_sequence(clean_text)
break
sequence += text_to_sequence(m.group(1), cleaner_names, dictionary, p_arpabet)
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
| mellotron-master | text/__init__.py |
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| mellotron-master | text/numbers.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from text import cmudict
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_numbers = '0123456789'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as
# uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = list(_punctuation + _math + _special + _accented + _numbers + _letters) + _arpabet
| mellotron-master | text/symbols.py |
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| mellotron-master | text/cleaners.py |
#!/usr/bin/env python2
# Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import os
import sys
parser = argparse.ArgumentParser(
description='Generate a U-Boot boot script')
parser.add_argument('--debug', action='store_true',
help='turn on debugging prints')
parser.add_argument('--type', choices=['disk', 'net'], default='disk',
help='type of boot-script to generate; defaults to disk')
parser.add_argument('-o', dest='outfile', default='boot.scr',
help='output filename; defaults to boot.scr')
parser.add_argument('--zimage', default='zImage',
help='zImage filename to load; defaults to "zImage"')
parser.add_argument('--initrd',
help='initrd filename to load; defaults to no initrd')
parser.add_argument('--partuuid',
help='root partition (not filesystem) UUID. Not applicable if an initrd ' +
'is used. Defaults to auto-detection based on rootpart U-Boot ' +
'environment variable')
parser.add_argument('--fsuuid',
help='root filesystem UUID. Not applicable if an initrd ' +
'is used. Defaults to auto-detection based on rootpart U-Boot ' +
'environment variable')
parser.add_argument('--ro', action='store_const', const='ro', dest='rorw',
help='initially mount root filesystem read-only')
parser.add_argument('--rw', action='store_const', const='rw', dest='rorw',
help='initially mount root filesystem read-write (default)')
parser.add_argument('--no-con-ttyS0', action='store_true',
help='Disable console on ttyS0 UART')
parser.add_argument('--no-con-tty1', action='store_true',
help='Disable console on tty1 VT')
parser.add_argument('--earlyprintk', action='store_true',
help='Enable early printk')
# loglevel=8 ignore_loglevel
parser.add_argument('--noisy', action='store_true',
help='Enable noisy kernel log output, ignoring log level')
parser.add_argument('--cmdline',
help='Extra command-line options')
parser.add_argument('--dtb-dir', dest='dtbdir', default=None,
help='Search directory for dtbs on target; defaults to boot directory')
args = parser.parse_args()
if args.debug: print args
if args.initrd:
if args.partuuid or args.fsuuid:
raise Exception('--initrd used; --partuuid and --fsuuid should not be');
if args.type == 'net':
if not args.initrd and not (args.partuuid or args.fsuuid):
raise Exception('--type net used without initrd; --part/fsuuid should be')
if args.partuuid and args.fsuuid:
raise Exception('--partuuid and --fsuuid should not be used together')
if not args.rorw:
args.rorw = 'rw'
outfile_tmp = args.outfile + '.tmp'
f = file(outfile_tmp, 'wt')
if args.initrd:
root = 'root=/dev/ram'
elif args.partuuid:
root = 'root=PARTUUID=' + args.partuuid
elif args.fsuuid:
root = 'root=UUID=' + args.fsuuid
else:
f.write('part uuid ${devtype} ${devnum}:${rootpart} uuid\n')
root = 'root=PARTUUID=${uuid}'
if args.type == 'net':
f.write('dhcp\n')
load='tftpboot'
prefix=''
else:
load='load ${devtype} ${devnum}:${rootpart}'
prefix='/boot/'
if not args.dtbdir:
args.dtbdir = prefix
elif not args.dtbdir.endswith('/'):
args.dtbdir += '/'
f.write(load + ' ${kernel_addr_r} ' + prefix + args.zimage + '\n')
if args.initrd:
f.write(load + ' ${ramdisk_addr_r} ' + prefix + args.initrd + '\n')
f.write('ramdisk=${ramdisk_addr_r}:0x${filesize}\n')
f.write('setenv initrd_high 0xffffffff\n')
ramdisk = '${ramdisk}'
else:
ramdisk = '-'
f.write('''\
if test -n "${fdtfile}"; then
setenv _fdt ${fdtfile};
else
setenv _fdt ${soc}-${board}${boardver}.dtb;
fi
''')
f.write(load + ' ${fdt_addr_r} ' + args.dtbdir + '${_fdt}\n')
f.write('setenv _fdt\n')
bootargs = ''
if not args.no_con_ttyS0:
bootargs += 'console=ttyS0,115200n8 '
if not args.no_con_tty1:
bootargs += 'console=tty1 '
if args.noisy:
bootargs += 'ignore_loglevel '
if args.earlyprintk:
bootargs += 'earlyprintk '
bootargs += args.rorw + ' rootwait ' + root + ' '
if args.cmdline:
bootargs += args.cmdline + ' '
bootargs += '${extra_bootargs}'
f.write('setenv bootargs ' + bootargs + '\n')
f.write('bootz ${kernel_addr_r} ' + ramdisk + ' ${fdt_addr_r}\n')
f.close()
cmd = 'mkimage -A arm -O linux -T script -C none -a 0 -e 0 -n "Tegra Boot Script" -d ' + outfile_tmp + ' ' + args.outfile
print '+ ' + cmd
ret = os.system(cmd)
if ret:
sys.exit(1)
| tegra-uboot-scripts-master | gen-uboot-script.py |
#!/usr/bin/env python2
# Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import re
import struct
import sys
def unpack_u32_le(data, offset):
return struct.unpack('<I', data[offset:(offset+4)])[0]
def unpack_u64_le(data, offset):
return struct.unpack('<Q', data[offset:(offset+8)])[0]
def hexdump(d, indices):
s = ''
for i in indices:
s += "%02x" % ord(d[i])
return s
def find_uuid_mbr(dev, pnum, pdata):
if pnum < 1 or pnum > 0xff:
raise Exception('Invalid partition number %d' % pnum)
disk_uuid = unpack_u32_le(pdata, 0x1b8)
return '%08x-%02x' % (disk_uuid, pnum)
def find_uuid_efi(dev, pnum, pdata):
gpt_ver = unpack_u32_le(pdata, 512 + 8)
if gpt_ver != 0x00010000:
raise Exception('Unexpected GPT version 0x%08x' % gpt_ver)
num_parts = unpack_u32_le(pdata, 512 + 80)
if pnum < 1 or pnum > num_parts:
raise Exception('Invalid partition number %d' % pnum)
ptable_lba = unpack_u64_le(pdata, 512 + 72)
ptable_ent_size = unpack_u32_le(pdata, 512 + 84)
ptable_ent_offset = (ptable_lba * 512) + ((pnum - 1) * ptable_ent_size)
f = file(dev, 'rb')
f.seek(ptable_ent_offset )
type_uuid = f.read(16)
part_uuid = f.read(16)
f.close()
if type_uuid == '\x00' * 16:
raise Exception('Invalid partition number %d' % pnum)
s = hexdump(part_uuid, (3, 2, 1, 0))
s += '-'
s += hexdump(part_uuid, (5, 4))
s += '-'
s += hexdump(part_uuid, (7, 6))
s += '-'
s += hexdump(part_uuid, (8, 9))
s += '-'
s += hexdump(part_uuid, (10, 11, 12, 13, 14, 15))
return s
def find_uuid(dev, pnum):
f = file(dev, 'rb')
pdata = f.read(2 * 512)
f.close()
if pdata[511] != chr(0xaa):
raise Exception('MBR not present')
is_efi = pdata[512:520] == "EFI PART"
if is_efi:
return find_uuid_efi(dev, pnum, pdata)
else:
return find_uuid_mbr(dev, pnum, pdata)
parser = argparse.ArgumentParser(
description="Determine a partition's (not filesystem's) UUID")
parser.add_argument('--debug', action='store_true',
help='Turn on debugging prints')
parser.add_argument('device', metavar='DEVICE', type=str,
help='The partitioned device')
parser.add_argument('pnum', metavar='PART-NUM', type=int,
help='The partition number')
args = parser.parse_args()
if args.debug: print args
print find_uuid(args.device, args.pnum)
| tegra-uboot-scripts-master | part-uuid.py |
from yum.plugins import PluginYumExit, TYPE_CORE, TYPE_INTERACTIVE
from yum.packages import YumInstalledPackage
from yum.constants import *
from rpmUtils.miscutils import compareEVR
import sys
import os
import re
sys.path.insert(0,'/usr/share/yum-cli/')
import yum
from yum.Errors import *
from utils import YumUtilBase
from yum import _
import logging
import rpmUtils
requires_api_version = '2.3'
plugin_type = (TYPE_CORE)
KERNEL_PKG_NAME = 'kernel'
MODULE_PKG_BASENAME = 'kmod-nvidia'
MODULE_PKG_PATTERN = re.compile(MODULE_PKG_BASENAME + '-(branch-[0-9][0-9][0-9]|latest)$')
DRIVER_PKG_BASENAME = 'nvidia-driver'
DRIVER_PKG_PATTERN = re.compile(DRIVER_PKG_BASENAME + '-(branch-[0-9][0-9][0-9]|latest)$')
DEPEND_ON_KMOD_PATTERNS = [DRIVER_PKG_PATTERN]
def msg(conduit, message):
conduit.info(1, 'NVIDIA: ' + message)
def init_hook(conduit):
"""This is just here to make sure the plugin was loaded correctly.
Eventually this should just go away."""
# conduit.info(2, '#### NVIDIA ####')
def addErase(conduit, tsInfo, package):
"""additional sanity check that we only try to addErase() installed packages,
i.e. RPMInstalledPackage instances. If we add others here, e.g. just
YumAvailablePackages, the transaction fails later with a cryptic error message"""
if isinstance(package, YumInstalledPackage):
tsInfo.addErase(package)
else:
conduit.error(2, 'NVIDIA: tried erasing non-installed package ' + str(package) + '/' + str(type(package)))
raise AttributeError
def get_module_package(conduit, driverPackage, kernelPackage):
"""Return the corresponding kernel module package, given an installed driver package
and a kernel package."""
tsInfo = conduit.getTsInfo()
modName = get_module_pkg_name(driverPackage)
modRelease = get_module_pkg_release(kernelPackage, driverPackage)
# We search the DB first so we can be sure to get a YumInstalledPackage
# instance in case the module package is already installed and a
# YumAvailablePackage instance in case it ins't.
db = conduit.getRpmDB()
pkgs = db.searchNevra(modName, driverPackage.epoch, kernelPackage.version, \
modRelease, driverPackage.arch)
if pkgs:
# Assume len(pkgs) == 1, but don't assert
return pkgs[0]
try:
return conduit._base.getPackageObject((modName, driverPackage.arch,
driverPackage.epoch, kernelPackage.version,
modRelease))
except:
pass
tsInfo.deselect('kernel')
return None
def install_modules_for_kernels(conduit, driverPackage, kernelPackages):
"""Install kernel module packages for all given kernel packages"""
tsInfo = conduit.getTsInfo()
db = conduit.getRpmDB()
newestKernel = get_most_recent_kernel(conduit, kernelPackages)
modPo = get_module_package(conduit, driverPackage, newestKernel)
if modPo is None:
modName = get_module_pkg_name(driverPackage)
msg(conduit, 'No kernel module package ' + modName + ' for ' + \
str(newestKernel) + ' and ' + str(driverPackage) + ' found. ' + \
'Ignoring the new kernel')
return False
if db.contains(po = modPo):
return True
tsInfo.addTrueInstall(modPo)
return True
def installing_kernels(conduit, kernelPackages, driverPackage):
"""When installing new kernels, we need to also install the driver module packages
for each of them."""
tsInfo = conduit.getTsInfo()
db = conduit.getRpmDB()
# Remove the kernel module package for all other kernels
newestKernel = get_most_recent_kernel(conduit, kernelPackages)
allKernels = list(kernelPackages)
allKernels.extend(db.returnPackages(patterns=[KERNEL_PKG_NAME]))
# Will install the kernel module package for the newest one of the kernel packages
success = install_modules_for_kernels(conduit, driverPackage, kernelPackages)
if not success:
return
for k in allKernels:
if k != newestKernel:
modPo = get_module_package(conduit, driverPackage, k)
if db.contains(po = modPo):
addErase(conduit, tsInfo, modPo)
def erasing_kernels(conduit, kernelPackages, driverPackage):
"""When erasing kernel modules, we want to remove their driver kernel module
packages, provided they are installed at all."""
db = conduit.getRpmDB()
tsInfo = conduit.getTsInfo()
currentlyInstalledKernels = db.searchNames([KERNEL_PKG_NAME])
# This is the list of kernels we will have installed after the given ones were removed.
remainingKernels = list(set(currentlyInstalledKernels) - set(kernelPackages))
assert(len(remainingKernels) > 0)
newestRemainingKernel = sorted(remainingKernels, cmp = compare_po, reverse = True)[0]
newestModPo = get_module_package(conduit, driverPackage, newestRemainingKernel)
# Remove kernel module packages for all the kernels we remove
for k in kernelPackages:
modPo = get_module_package(conduit, driverPackage, k)
if newestModPo != modPo and db.contains(po = modPo):
addErase(conduit, tsInfo, modPo)
# Install the kernel module package for the now most recent kernel
if not db.contains(po = newestModPo):
tsInfo.addTrueInstall(newestModPo)
def erasing_driver(conduit, driverPackage):
"""When removing the driver package, we automatically remove all the installed
kernel module packages."""
db = conduit.getRpmDB()
tsInfo = conduit.getTsInfo()
modPackages = db.returnPackages(patterns=[MODULE_PKG_BASENAME + '*'])
for modPo in modPackages:
addErase(conduit, tsInfo, modPo)
def installing_driver(conduit, driverPackage, installingKernels):
"""We call this when installing the DRIVER_PKG_BASENAME package. If that happens,
we need to install kernel module packages for all the installed kernels,
as well as the kernels we additionally install in the current transaction"""
db = conduit.getRpmDB()
tsInfo = conduit.getTsInfo()
install_modules_for_kernels(conduit, driverPackage, [])
def postresolve_hook(conduit):
db = conduit.getRpmDB()
tsInfo = conduit.getTsInfo()
erasePkgs = tsInfo.getMembersWithState(output_states=[TS_ERASE, TS_UPDATED])
installPkgs = tsInfo.getMembersWithState(output_states=[TS_INSTALL, TS_TRUEINSTALL,
TS_UPDATE])
# Append a '*' to all the package names in our list
installedDriverPackage = db.returnPackages(patterns=[DRIVER_PKG_BASENAME + '*'])
# The above query for the rpm database returns all packages starting with
# the DRIVER_PKG_BASENAME, but all the subpackages of the nvidia-driver
# package start with 'nvidia-driver', so filter the list out for the correct
# package names.
for k in list(installedDriverPackage):
if not is_driver_po(k):
installedDriverPackage.remove(k)
installingDriverPackage = None
erasingDriverPackage = None
installingKernels = []
erasingKernels = []
for pkg in installPkgs:
if match_list(DEPEND_ON_KMOD_PATTERNS, pkg.name):
installingDriverPackage = pkg.po
break
for pkg in erasePkgs:
if match_list(DEPEND_ON_KMOD_PATTERNS, pkg.name):
erasingDriverPackage = pkg.po
break
for pkg in erasePkgs:
if pkg.po.name == KERNEL_PKG_NAME:
erasingKernels.append(pkg.po)
for pkg in installPkgs:
if pkg.po.name == KERNEL_PKG_NAME:
installingKernels.append(pkg.po)
# Since this is a postresolve hook, yum might've already added a kernel module
# package, to satisfy the dependency the nvidia-driver package has. However,
# we will handle that ourselves so remove all of them here.
for member in tsInfo.getMembers():
if MODULE_PKG_PATTERN.match(member.name):
tsInfo.deselect(member.name)
if installingDriverPackage:
installing_driver(conduit, installingDriverPackage, list(installingKernels))
if erasingDriverPackage:
erasing_driver(conduit, erasingDriverPackage)
if installedDriverPackage:
if installingKernels and not installingDriverPackage:
installing_kernels(conduit, installingKernels, installedDriverPackage[0])
if erasingKernels:
erasing_kernels(conduit, erasingKernels, installedDriverPackage[0])
def preresolve_hook(conduit):
tsInfo = conduit.getTsInfo()
moduleUpgrades = filter(lambda m: MODULE_PKG_PATTERN.match(m.name), tsInfo.getMembers())
# Not interesting for us
if not moduleUpgrades:
return
# Stop yum from automatically updating our packages, since we do it ourselves.
# This is technically not necessary, but we need to implement all the
# kmod package update handling ourselves anyway.
# This should really be the only one
po = moduleUpgrades[0]
tsInfo.deselect(po.name)
def match_list(patternList, pkg):
for p in patternList:
if p.match(pkg):
return True;
return False
def is_driver_po(po):
return DRIVER_PKG_PATTERN.match(po.name) and 'dkms' not in po.name
def get_module_pkg_name(driverPackage):
return driverPackage.name.replace(DRIVER_PKG_BASENAME, MODULE_PKG_BASENAME)
def get_module_pkg_release(kernelPackage, driverPackage):
"""In our scheme, we build up the kmod package release field from the
kernel release field as well as the driver version."""
start = kernelPackage.release[:kernelPackage.release.rfind('.')]
end = kernelPackage.release[kernelPackage.release.rfind('.'):]
return start + '.r' + driverPackage.version + end
def compare_po(po1, po2):
return compareEVR((po1.epoch, po1.version, po1.release),
(po2.epoch, po2.version, po2.release))
def get_most_recent_kernel(conduit, additional=[]):
db = conduit.getRpmDB()
kernels = list(additional)
kernels.extend(db.returnPackages(patterns=[KERNEL_PKG_NAME]))
return sorted(kernels, cmp = compare_po, reverse = True)[0]
| yum-packaging-nvidia-plugin-main | nvidia-yum.py |
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
from functools import cmp_to_key
from dnf.cli.option_parser import OptionParser
import dnf
import dnf.cli
import dnf.sack
import libdnf.transaction
DRIVER_PKG_NAME = 'nvidia-driver'
KERNEL_PKG_NAME = 'kernel'
KERNEL_PKG_REAL = 'kernel-core'
KMOD_PKG_PREFIX = 'kmod-nvidia'
def is_kmod_pkg(pkg):
return pkg.name.startswith(KMOD_PKG_PREFIX) and 'dkms' not in pkg.name
def remove_release_dist(release):
return release[0:release.rfind('.')]
def evr_key(po, sack):
func = cmp_to_key(sack.evr_cmp)
return func(str(po.epoch) + ':' + str(po.version) + '-' + str(po.release))
def ver_cmp_pkgs(sack, po1, po2):
return sack.evr_cmp(str(po1.epoch) + ':' + str(po1.version) + '-' + str(po1.release),
str(po2.epoch) + ':' + str(po2.version) + '-' + str(po2.release));
def revive_msg(var, msg, val = ''):
if var is not None:
print(msg)
return val
class NvidiaPlugin(dnf.Plugin):
name = 'nvidia'
def __init__(self, base, cli):
super(NvidiaPlugin, self).__init__(base, cli)
self.base = base
self.cli = cli
def sack(self, debug = None):
# run as command
if debug == True:
base = self.base()
base.read_all_repos()
base.fill_sack()
sack = base.sack
# run as plugin
else:
sack = self.base.sack
# check installed
installed_drivers = sack.query().installed().filter(name = DRIVER_PKG_NAME)
installed_kernel = list(sack.query().installed().filter(name = KERNEL_PKG_NAME))
installed_modules = list(sack.query().installed().filter(name__substr = KMOD_PKG_PREFIX))
# driver not installed
if not installed_drivers and debug is None:
return
# container/chroot
if not installed_kernel and debug is None:
return
# The most recent installed kernel package
installed_kernels = sorted(installed_kernel, reverse = True, key = lambda p: evr_key(p, sack))
if len(installed_kernels) > 0:
installed_kernel = installed_kernels[0]
available_kernels = sack.query().available().filter(name = KERNEL_PKG_NAME)
available_k_cores = sack.query().available().filter(name = KERNEL_PKG_REAL)
available_drivers = sack.query().available().filter(name = DRIVER_PKG_NAME)
dkms_kmod_modules = sack.query().available().filter(name__substr = "dkms")
available_modules = sack.query().available().filter(name__substr = KMOD_PKG_PREFIX).difference(dkms_kmod_modules)
# Print debugging if running from CLI
if installed_kernel:
revive_msg(debug, '\ninstalled kernel: ' + str(installed_kernel))
if installed_modules:
string_modules = ' '.join([str(elem) for elem in installed_modules])
revive_msg(debug, '\ninstalled kmod(s): ' + str(string_modules))
if available_kernels:
string_kernels = ' '.join([str(elem) for elem in available_kernels])
revive_msg(debug, '\navailable ' + KERNEL_PKG_NAME + '(s): ' + str(string_kernels))
if available_k_cores:
string_cores = ' '.join([str(elem) for elem in available_k_cores])
revive_msg(debug, '\navailable ' + KERNEL_PKG_REAL + '(s): ' + str(string_cores))
if available_drivers:
string_drivers = ' '.join([str(elem) for elem in available_drivers])
revive_msg(debug, '\navailable driver(s): ' + str(string_drivers))
if available_modules:
string_all_modules = ' '.join([str(elem) for elem in available_modules])
revive_msg(debug, '\navailable kmod(s): ' + str(string_all_modules))
# DKMS stream enabled
if installed_modules and 'dkms' in string_modules:
return
# Installed driver
try:
driver = installed_drivers[0]
except:
return
# Exclude all available kernels which are newer than the most recent installed
# kernel AND do NOT have a kmod package
for kernelpkg in available_kernels:
if ver_cmp_pkgs(sack, kernelpkg, installed_kernel) != 1:
continue
# Matching kernel-core package
try:
k_corepkg = [i for i in available_k_cores if i.version == kernelpkg.version and i.release == kernelpkg.release][0]
except:
print('Unable to find matching ' + KERNEL_PKG_REAL + ' package')
# Iterate through drivers in stream
for a_driver in available_drivers:
# Get package name
kmod_pkg_name = KMOD_PKG_PREFIX + '-' + str(a_driver.version) + '-' + \
str(kernelpkg.version) + '-' + str(remove_release_dist(kernelpkg.release))
# Append object
if 'kmod_pkg' in locals():
kmod_pkg = sack.query().available().filter(name = kmod_pkg_name, version = a_driver.version).union(kmod_pkg)
else:
kmod_pkg = sack.query().available().filter(name = kmod_pkg_name, version = a_driver.version)
# kmod for kernel and driver combination not available
if not kmod_pkg:
# Exclude kernel packages
try:
sack.add_excludes([kernelpkg])
sack.add_excludes([k_corepkg])
print('NOTE: Skipping kernel installation since no kernel module package ' + str(kmod_pkg_name) + \
' for kernel version ' + str(kernelpkg.version) + '-' + str(kernelpkg.release) + \
' and NVIDIA driver ' + str(driver.version) + ' could be found')
except Exception as error:
print('WARNING: kernel exclude error', error)
def resolved(self):
transaction = self.base.transaction
# XXX This is a workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1658517
sack = dnf.sack._rpmdb_sack(self.base)
for pkg in transaction.remove_set:
if pkg.name == DRIVER_PKG_NAME:
# We are removing a driver package, through an
# actual remove or an upgrade. Remove all
# kmod packages belonging to it as well.
installed_kmods = sack.query().installed().filter(version = pkg.version)
# The above query only selects by version since we don't know
# the exact name of the kmod package. Look here for them by prefix
# and remove them if they match the version of the driver
# we're removing right now.
for kmod in installed_kmods:
if is_kmod_pkg(kmod):
transaction.add_erase(kmod)
@dnf.plugin.register_command
class NvidiaPluginCommand(dnf.cli.Command):
aliases = ('nvidia-plugin',)
summary = 'Helper plugin for DNF to manage precompiled NVIDIA driver streams'
def run(self):
nvPlugin = NvidiaPlugin(dnf.Base, dnf.cli.Cli)
nvPlugin.sack(True)
print("---")
| yum-packaging-nvidia-plugin-main | nvidia-dnf.py |
# Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
debug = False
socs = {}
boards = {}
configs = {}
def _load_something(path, filetype, name, saveindict):
fn = os.path.join(path, name + '.' + filetype)
d = {}
execfile(fn, globals(), d)
saveindict[name] = d[filetype]
def _load_soc(path, socname):
if socs.has_key(socname):
return
if debug: print 'load soc', socname
_load_something(path, 'soc', socname, socs)
def _load_board(path, boardname):
if boards.has_key(boardname):
return
if debug: print 'load board', boardname
_load_something(path, 'board', boardname, boards)
_load_soc(path, boards[boardname]['soc'])
def _load_config(path, configname):
if configs.has_key(configname):
return
if debug: print 'load config', configname
_load_something(path, 'config', configname, configs)
_load_board(path, configs[configname]['board'])
def load_configs(path):
fns = os.listdir(path)
for fn in fns:
if not fn.endswith('.config'):
continue
cfgname = fn[:-7]
_load_config(path, cfgname)
if __name__ == '__main__':
debug = True
load_configs('configs')
print
print socs
print
print boards
print
print configs
| tegra-uboot-flasher-scripts-master | tegraboardconfigs.py |
# ali_funcs.py 3/27/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Alibaba (ali) Cloud Service Provider specific functions
#
# HELPTEXT: "Alibaba Cloud Service Provider"
#
import json
import time
import subprocess
from cspbaseclass import CSPBaseClass
from cspbaseclass import Which
from cspbaseclass import error, trace, trace_do, debug, debug_stop
##############################################################################
# some Alibaba defaults values that will vary based on users
#
# default_key_name: User will need to create their own security key and
# specify it's name here.
# region: The Alibaba region that they wish to run in. Note that
# GPU instances might not be avaliable at all sites
# user: Login user name for instance. May be hardcoded by ISP
# based on the image_name being selected.
##############################################################################
default_key_name = "my-security-key-name"
default_region = "my-region-name"
default_user = "my-user-name"
##############################################################################
# What image and instance type to bring up.
#
# default_image_name: Name of OS template that instance will be created with
# default_instance_type: The default name that defines the memory and cpu sizes
# and the gpu types for the instance. Changes
# default_choices: Avaliable instance types that user can select with
# This will probably be different per region, and will
# continue to change over time. Used as a pre-check in
# command parser to verify choice before sending to csp
##############################################################################
default_image_name_international = "NVIDIA GPU Cloud Virtual Machine Image 18.03.0"
default_image_name_china = "NVIDIA GPU Cloud VM Image 18.03.0"
# Note different names for chinese marketplace verses international marketplace
default_image_name = default_image_name_international
if (False): # non GPU choices for script testing..
default_instance_type = "ecs.sn1.medium"
default_choices = ['ecs.sn1.small', 'ecs.sn1.large', 'ecs.sn1.xlarge', # compute optimized
'ecs.sn2.small', 'ecs.sn2.large', 'ecs.sn2.xlarge'], # general purpose
else: # GPU instances - normal usage
default_instance_type = "ecs.gn5-c4g1.xlarge"
default_choices = ['ecs.gn5-c4g1.xlarge', 'ecs.gn5-c8g1.2xlarge', # gn6 are nvidia P100
'ecs.gn5-c4g1.2xlarge', 'ecs.gn5-c8g1.4xlarge'
'ecs.gn5-c28g1.7xlarge', 'ecs.gn5-c8g1.8xlarge',
'ecs.gn5-c28g1.14xlarge', 'ecs.gn5-c8g1.14xlarge'],
TIMEOUT_1 = (60 * 4) # create, start, terminate
TIMEOUT_2 = (60 * 4) # stop, ping
##############################################################################
# CSPClass
#
# Cloud Service Provided primitive access functions
##############################################################################
class CSPClass(CSPBaseClass):
''' Cloud Service Provider Class for alibaba'''
##############################################################################
# CSPSetupOK
#
# checks to see that user has ability to create and minipulate VM's on this
# CSP. Want to check that up front, instead of later when actually talking
# to the CSP.
#
# does things like verifing that the CLI is installed on the machine, and
# whatever else is quick and easy to check
#
# Should also check to see that can actually connect with the CSP provider
# (network connection) and that network is reliable.
#
def CSPSetupOK(self):
''' quick check to verify alibaba command line interface is installed '''
fullpath = Which("aliyuncli") # does cli application exist?
if (fullpath == None):
return 1 # error, cli app not found
else:
# TODO: verify network connection to CSP
# TODO: check login setup correctly
return 0
##############################################################################
# ArgOptions
#
# alibaba specific argument parser. This extends or overrides default argument
# parsing that is set up in ncsp.py/add_common_options() function
#
# All arguments set up here and in the common code are saved/restored from
# the csp specific args file. See my_class.ArgSave/RestoreFromFile(parser)
# in the base class for implementation.
#
def ArgOptions(self, parser):
''' Alibaba specific option parser '''
# set up Alibaba specific fields of the parser
region_list = self.GetRegionsCached()
parser.add_argument('--RegionId', dest='region',
default=default_region, required=False,
choices=region_list, # query, keeps changing
help='region in which to create the VM')
parser.add_argument('--instance_type', dest='instance_type', # 'size' on azure, use 'instance-type' as common name
default=default_instance_type, required=False,
choices=default_choices, # should query list if can (region dependent?)
help='VM instance (type) to create')
parser.add_argument('--auto-ngc-login', dest='auto_ngc_login', action='store_true',
default=False, required=False,
help='Enable NGC auto login using the Azure Key Vault')
parser.add_argument('--keyvault', dest='keyvault',
default=None, required=False,
help='Azure Key Vault name that contains the NGC API Key')
parser.add_argument('--apikey', dest='apikey',
default=None, required=False,
help='NGC API Key to store in the vault')
parser.add_argument('--bandwidth_out', dest='bandwidth_out',
default=10, required=False, type=int,
help='Internet Max Bandwidth Out (1 to 200 Bbps)')
parser.add_argument('--charge_type', dest='charge_type',
default='PostPaid', required=False,
choices=['PostPaid', 'PrePaid'],
help='Instance Charge Type')
parser.add_argument('--image_owner_alias', dest='image_owner_alias',
default='marketplace', required=False,
choices=['system', 'self', 'others', 'marketplace'],
help='Image owner')
parser.set_defaults(image_name=default_image_name);
parser.set_defaults(key_name=default_key_name)
parser.set_defaults(user=default_user)
# ping-ability makes starting/stopping more traceable, but this
# features is disabled by default, and explicidly needs to be
# enabled in the Networks Security Group -- see ICMP option
parser.set_defaults(pingable=1) # alibaba instances we created support pings
###########################################################################
# ArgSanity
#
# alibaba class specific argument checks, Called after the argument parser has run
# on the user options as a hook to verify that arguments are correct
#
# 'parser' is the structure returned from argparse.ArgumentParser()
#
# Returns 0 success
# 1 something is wrong, stop
#
def ArgSanity(self, parser, args):
''' Alibaba Arg sanity checking '''
rc = 0
if args.bandwidth_out < 1 or args.bandwidth_out > 200:
error("bandwidth must be between 1 an 200")
rc = 1
return(rc) # 0 good, 1 stop
###########################################################################
# overrides common method in base class
def DoCmdNoError(self, cmd):
''' ali specifc Blocking command -- returns command output, doesn't report error'''
debug(1, cmd)
self.Log(cmd)
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output, errval = child.communicate() # returns data from stdout, stderr
debug(3, output) # full output for trace
# print "cmd: %s " % cmd
# print "child.returncode: %d " % child.returncode
# print "errval: %s " % errval
# print "output:\n%s " % output
# ali error output is in json format -- kind of...
# {
# "Message": "The specified InstanceId does not exist.",
# "Code": "InvalidInstanceId.NotFound"
# }
# Detail of Server Exception:
#
# HTTP Status: 404 Error:InvalidInstanceId.NotFound The specified InstanceId does not exist. RequestID: C66FB5EA-FA09-41B2-AD69-9A68BCCE0B4A
if child.returncode != 0 and errval == "":
pos = output.find('}')
if (pos == -1):
return(child.returncode, "", errval)
jsonbuf = output[:pos+1] # only the stuff before the first '}'
decoded_output = json.loads(jsonbuf)
errval = decoded_output['Message']
return (child.returncode, output, errval) # pass back retcode, stdout, stderr
###########################################################################
# GetRunStatus
#
# Returns the running status of the instance as a string, like 'running'
# 'terminated', 'pending' etc.. This will be somewhat the same across
# all CSPs, but since it comes from them you should not depend upon
# an exact value out of CSP specific code
#
# Returns: string describing state
#
def GetRunStatus(self, args):
''' Returns running-state of instance from describe-instance-status '''
cmd = "aliyuncli ecs DescribeInstanceAttribute"
cmd += " --InstanceId %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
run_state = "Terminated" # assume: doesn't exist any longer
if (retcode == 0): # did actually grab a real live status ?
decoded_output = json.loads(output)
id = decoded_output['InstanceId']
if (id.__len__() > 0 and id == args.vm_id):
run_state = decoded_output['Status']
# return the value, should be something like "running" or "pending" or ""
self.Inform(run_state)
return(run_state);
##############################################################################
# From image file name, Find the ID of the AMI instance that will be loaded
#
# Get the image ID of the "NVIDIA GPU Cloud Virtual Machine Image" that we created.
# Note that currently (10/2017) the ID of this image changes whenever we update
# the image. This query here does a name-to-id lookup. The name should remain constant.
def GetImageId(self, args):
# if already have the ID, can skip this step. Note "None" as string from args file
if (args.image_id != "" and args.image_id != None and args.image_id != "None"):
return 0
# query name, to return id
cmd = "aliyuncli ecs DescribeImages"
cmd += " --RegionId %s" % args.region
cmd += " --ImageName \"%s\"" % args.image_name
cmd += " --ImageOwnerAlias %s" % args.image_owner_alias
retcode, output, errval = self.DoCmd(cmd)
if (retcode != 0):
error(errval)
return 1
# decode the JSON output
decoded_output = json.loads(output)
trace(2, json.dumps(decoded_output, indent=4, sort_keys=True))
args.image_id = decoded_output['Images']['Image'][0]['ImageId']
return 0
###########################################################################
# GetIPSetupCorrectly
#
# Called after the instance is created in order to get if needed, and
# verify that a public IP address has been returned for the VM
#
# Some CSP's, like azure and alibaba return the IP address from another
# function that sets up the public IP. This needs to be called in the
# CreateVM function in that case.
#
# Other CSP's like aws, return the IP address for you "free" of charge
# as part of the instance information for the VM. This might be returned
# only after the VM creation has been completed.
#
# This function is genericly called after the VM has been found to be
# running, to either simply verify that we have a valid IP address in
# the first case above, or to ask the CSP for it and then verify it
# in the second case.
#
# public IP value will be in args.vm_id
#
# This function can do other cross-checks to validate other setups like
# checking if the SSH key-name returned from the CSP is the same as we
# sent it. Checks like this are optional, but highly desirable.
#
# Returns: 0 success
# 1 fails, invalid IP or can't get it
#
def GetIPSetupCorrectly(self, args):
''' called after 'running' status to get IP. Does nothing for Alibaba '''
if (args.vm_ip == ""): # this ip value should have been set in Create
error("No IP for VM: \"%s\"" % args.vm_name)
return(1)
# TODO: see if new IP (which we query for RIGHT NOW is different than
# the vm_ip that was gathered before. Alibaba is NOT suppose to
# change the IP address once it's created for the life of
# the VM.. but that's an ass-u-m(e)-tion because it was seen
# to move more than once.
#
return 0
##############################################################################
# CSP specific Network Security Group Functions
#
# ShowSecurityGroups Displays NSG (network security groups) in region
# ExistingSecurityGroup Does NSG exist?
# CreateSecurityGroup Creates a NSG from a name, and adds rules
# DeleteSecurityGroup Deletes a NSG
##############################################################################
##############################################################################
# ShowSecurityGroups
#
# This function shows basic information about your account's security groups
# for your region.
#
# Intended to be informative only, as each CSP will probably supply different
# type of information.
#
# Returns: 0 one or more Netwroks Security Groups found in region
# 1 error, or no NSG's defined in region
#
def ShowSecurityGroups(self, args):
''' Displays all current security groups '''
cmd = 'aliyuncli ecs DescribeSecurityGroups'
cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --PageSize 50" # default is 10, max is 50
cmd += " --output json"
cmd += " --filter SecurityGroups.SecurityGroup[].SecurityGroupName"
retcode, output, errval = self.DoCmd(cmd) # call the Alibaba command
if (retcode != 0): # check for return code
error ("Problems describing security groups")
return 1
print output # see function below for example of output
return(0)
##############################################################################
# ExistingSecurityGroup
#
# Given a name of a security group in args.nsg_name, this function sees
# if it currently exists on the CSP
#
# This entire application is written assuming that once a security group is
# created, it doesn't need to really change much for the lifetime of the
# universe. Therefor we don't delete them unless specificly asked for
#
# The purpose of this function is to decide if we need to create a Network
# Security Group, or to return the id of that existing group in args.nsg_id
#
# Returns: 0 if security group args.nsg_name currently exists and is valid
# 1 need to create a group
#
def ExistingSecurityGroup(self, args):
''' Does the security group name currently exist ? get it if it does '''
trace(2, "\"%s\"" % (args.nsg_name))
if (args.nsg_name == "" or args.nsg_name == None or args.nsg_name == "None"):
error("NetworkSecurityGroup name is \"%s\"" % args.nsg_name)
return 1
# can it be found by name? -- get list of all names first
cmd = 'aliyuncli ecs DescribeSecurityGroups'
cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --PageSize 50" # default is 10, max is 50
cmd += " --output json"
cmd += " --filter SecurityGroups.SecurityGroup[].SecurityGroupName"
retcode, output, errval = self.DoCmd(cmd) # call the Alibaba command
if (retcode != 0): # check for return code
error ("Problems describing security groups")
return 1
# returns a Json object like:
# [
# "NexposeSG",
# "NewtonSG",
# "sg-rj93y8iuj33uosositpw"
# ]
#
# Use json converter to make it into a list
# [u'NexposeSG', u'NewtonSG', u'sg-rj93y8iuj33uosositpw']
decoded_output = json.loads(output) # convert json format to python structure
# does the list contain our requested security group name?
if (args.nsg_name in decoded_output):
# yes it does, now go back and find the index into the list of names
# then go back and pull the json record for that idx and filter it
# for the SecurityGroupId id.
idx = 0
for item in decoded_output:
if (unicode(args.nsg_name) == item):
# print "List contains SG name \"%s\" at index %d" % (args.nsg_name, idx)
cmd = 'aliyuncli ecs DescribeSecurityGroups'
cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --PageSize 50" # default is 10, max is 50
cmd += " --output json"
cmd += " --filter SecurityGroups.SecurityGroup["
cmd += str(idx) # index to string
cmd += "].SecurityGroupId"
retcode, output, errval = self.DoCmd(cmd) # call the Alibaba command
if (retcode != 0): # check for return code
error ("Problems describing security groups")
return False
trace(3, output)
# existing Security group ID is saved in the args structure
#
# just to make it more of a pain because it's not hard enough
# it's necessary to remove the surrounding qoute charaters from
# the group id here
args.nsg_id = (output.replace('"', '')).strip() # remove surrounding qoutes
# use strip() to remove newline
trace(2, "args.nsg_id: \"%s\"" % args.nsg_id)
return 0
idx = idx + 1
# returns 1 if did not find security group
trace(2, "Did not find security group: \"%s\"" % args.nsg_name)
return 1
##############################################################################
# CreateSecurityGroup
#
# Creates a full network security group by the name of args.nsg_name, saves the
# value in args.nsg_id
#
# Any additional rules required for the security group to set up ssh, ssl and
# ping are added to the group here before it is returned.
#
# If the CSP has object-taging feature, the new security group should be
# tagged with a unique name so it can be identified later.
#
# IMPORTANT: if you can create a rule to make the VM pingable (a good thing
# for initial development), be sure to call following in ArgOptions
# so that the ping feature will be used when needed by this app
#
# "parser.set_defaults(pingable=1)"
#
def CreateSecurityGroup(self, args):
''' creates security group. saves it in args.nsg_id '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
# create security group
cmd = 'aliyuncli ecs CreateSecurityGroup'
cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --SecurityGroupName \"%s\"" % args.nsg_name # "NvidiaSG"
retcode, output, errval = self.DoCmd(cmd) # call the Alibaba command
if (retcode != 0): # check for return code
error ("Problems creating security group")
return 1
# decode the JSON output
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
security_group_id = decoded_output['SecurityGroupId']
# new Security group ID is saved in the args structure
args.nsg_id = security_group_id
# A new security group will not have any rules in it.
# The following commands will open inbound ports 22 (for SSH),
# 443 (for HTTPS), and 5000 (for DIGITS6):
cmd = 'aliyuncli ecs AuthorizeSecurityGroup'
cmd += ' --RegionId %s' % args.region # us-west-1
cmd += ' --SecurityGroupId %s' % security_group_id # "sg-rj999tz2kpxehy7obsjn"
cmd += ' --IpProtocol tcp --PortRange 22/22 --SourceCidrIp 0.0.0.0/0'
cmd += ' --Policy accept --Description SSH'
self.DoCmd(cmd)
cmd = 'aliyuncli ecs AuthorizeSecurityGroup'
cmd += ' --RegionId %s' % args.region # us-west-1
cmd += ' --SecurityGroupId %s' % security_group_id # "sg-rj999tz2kpxehy7obsjn"
cmd += ' --IpProtocol tcp --PortRange 443/443 --SourceCidrIp 0.0.0.0/0'
cmd += ' --Policy accept --Description HTTPS'
self.DoCmd(cmd)
cmd = 'aliyuncli ecs AuthorizeSecurityGroup'
cmd += ' --RegionId %s' % args.region # us-west-1
cmd += ' --SecurityGroupId %s' % security_group_id # "sg-rj999tz2kpxehy7obsjn"
cmd += ' --IpProtocol tcp --PortRange 5000/5000 --SourceCidrIp 0.0.0.0/0'
cmd += ' --Policy accept --Description DIGITS6'
self.DoCmd(cmd)
cmd = 'aliyuncli ecs AuthorizeSecurityGroup'
cmd += ' --RegionId %s' % args.region # us-west-1
cmd += ' --SecurityGroupId %s' % security_group_id # "sg-rj999tz2kpxehy7obsjn"
cmd += ' --IpProtocol icmp --PortRange -1/-1' # Is value Ok? (-1/8 for Alibaba?)
cmd += ' --SourceCidrIp 0.0.0.0/0'
cmd += ' --Policy accept --Description \"Support for ping\"'
self.DoCmd(cmd)
# The following command will open all outbound ports:
cmd = 'aliyuncli ecs AuthorizeSecurityGroupEgress'
cmd += ' --RegionId %s' % args.region # us-west-1
cmd += ' --SecurityGroupId %s' % security_group_id # "sg-rj999tz2kpxehy7obsjn"
cmd += ' --IpProtocol all --PortRange -1/-1 --DestCidrIp 0.0.0.0/0'
cmd += ' --Policy accept --Description \"All open!\"'
retcode, output, errval = self.DoCmd(cmd) # call the Alibaba command
if (retcode != 0): # check for return code
error ("Problems setting up security group rules")
return 1
return 0 # happy return
##############################################################################
# DeleteSecurityGroup
#
# Delets the security group specified at args.nsg_id, and clears that value
#
# If group Rules attached to the NSG need to be individually deleted, that
# must also be done here if not done automaticly by the CSP
#
def DeleteSecurityGroup(self, args):
''' deletes the security group '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
for retrycnt in range(0, 5): # deleting right after deleteVM errors
self.Inform("DeleteNSG")
cmd = 'aliyuncli ecs DeleteSecurityGroup'
cmd += ' --RegionId %s' % args.region # us-west-1
cmd += ' --SecurityGroupId %s' % args.nsg_id # "sg-rj999tz2kpxehy7obsjn"
cmd += " 2> /dev/null" # don't show errors
retcode, output, errval = self.DoCmdNoError(cmd) # call the Alibaba command, ignore error
if (retcode == 0): # check for error code
args.nsg_id = "" # clear out the id
break
trace(3-retrycnt, "Problems deleting security group \"%s\" retry:%d" % (args.nsg_name, retrycnt))
time.sleep(retrycnt)
return retcode
##############################################################################
# CSP specific VM functions
#
# CreateVM Creates a complete fully running VM
# StartVM Starts a VM if it was stopped, returns running
# StopVM Stops the VM if it is currently running
# RestartVM Resets VM, may not quite be same as Stop/Start
# DeleteVM Removes from the CSP a running or stopped VM
##############################################################################
##############################################################################
# CreateVM
#
# Creates a new VM, and returns when it is fully running.
#
# Note that due to simple way that this code saves it's peristent
# data (the id, user name, ... ), only 1 instance can be created
# at a time. Nothing preventing multiple VM's other than way to save/reference
# the id values. The CSPClass.Delete function removes the saved references
#
# The "args" option specify the CSP specific name, disk size, instance type,
# or any other parameter required to fully define the VM that is to be created
#
# Before creating the VM, effort is made to verify that all the supplied
# parameters, such as the SSH key name are valid.
#
# Network Security Group (NSG) is created if needed.
#
# Returns: 0 successful, VM fully created, up and ssh-able
# 1 failure, VM not created for one of many possible reasons
#
# Man page: https://www.alibabacloud.com/help/doc-detail/25499.htm?spm=a3c0i.o51771en.b99.190.3eb7831cDsO1p3
#
def CreateVM(self, args):
''' Creates a new VM. 'args' holds parameters '''
if (args.vm_id != "None" and args.vm_id != None):
error("Instance \"%s\" already exists, run 'deleteVM' first, or 'clean' if stale arg list" % args.vm_id)
return 1
args.vm_ip = "" # make sure IP address is clear
# ssh key file, builds path from options, checks existance
retcode = self.CheckSSHKeyFilePath(args, ".pem")
if (retcode != 0):
return(retcode)
# security group, create if neeeded, does nothing if already exists
# should move this step outside this VM create so that better reflects
# real VM timing?
retcode = self.CreateNSG(args) # sets args.nsg_id
if (retcode != 0):
return(retcode)
trace(2, "nsg_id: \"%s\" %s" % (args.nsg_name, args.nsg_id))
# look up image-name, return region specific image id
# TODO: saw this 'aliyuncli ecs describe-images' fail with network error
# check if connection to Alibaba is working before calling this
self.Inform("GetImageId")
if (self.GetImageId(args) != 0):
return 1
trace(2, "image_id: \"%s\" %s" % (args.image_name, args.image_id))
# with security group and image id, we can now create the instance
self.Inform("CreateInstance")
cmd = 'aliyuncli ecs CreateInstance'
cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --ImageId %s" % args.image_id # m-rj9gjqbdwtwlhtgqjeov"
cmd += " --SecurityGroupId %s" % args.nsg_id # sg-rj999tz2kpxehy7obsjn"
cmd += " --InstanceType %s" % args.instance_type # ecs.gn5-c4g1.xlarge
cmd += " --InstanceName %s" % args.vm_name # Name to create VM: "newton-gn5-1gpu"
cmd += " --InternetMaxBandwidthOut %d" % args.bandwidth_out # 10
cmd += " --InstanceChargeType %s" % args.charge_type # PostPaid
cmd += " --KeyPairName %s" % args.key_name # baseos-alibaba-siliconvalley
retcode, output, errval = self.DoCmd(cmd) # call the Alibaba command
if (retcode != 0): # check for return code
error ("Problems creating VM \"%s\"" % args.vm_name)
return 1
# decode the JSON output
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
args.vm_id = decoded_output['InstanceId']
# with Alibaba, Instances created via CLI are not automatically given a public IP address.
# To assign a public IP address to the instance you just created
# note -- this may not work immediatly after creating VM. try a few times
args.vm_ip = ""
for retrycnt in range(0, 4):
self.Inform("AllocatePublicIpAddress")
cmd = 'aliyuncli ecs AllocatePublicIpAddress'
cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --InstanceId %s" % args.vm_id # i-rj9a0iw25hryafj0fm4v
cmd += " 2> /dev/null" # don't show errors (the timeout)
retcode, output, errval = self.DoCmdNoError(cmd) # call the Alibaba command, no errors
if (retcode == 0): # check for error code
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
args.vm_ip = decoded_output['IpAddress']
break # got IP we think -- done now
trace(3-retrycnt, "Problems allocating IP address for %s, retry:%d" % (args.vm_id, retrycnt))
time.sleep(retrycnt)
if (args.vm_ip == ""):
error ("Unable to allocating IP address for \"%s\"" % args.vm_name)
return 1
# print "args.vm_ip: %s" % args.vm_ip
# save vm ID and other fields setup here so don't use them if error later
# do this again later when we are fully started
self.ArgSaveToFile(args)
# unlike Alibaba or azure, alibaba does not automaticly start an instance
# when it is created. Start it here to be consistent
retcode = self.StartVM(args)
return 0
##############################################################################
# StartVM
#
# Starts a Stopped VM, returns it in a fully running state, where we have
# the correct IP address if it changed, and can ssh into the VM
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be started, or invalid ID supplied
#
def StartVM(self, args):
''' Starts the VM '''
rc = 1 # assume error
if (self.CheckID(args) == False):
return 1
# get run status and check current state
status = self.GetRunStatus(args)
if (status == "Running"):
return 0 # already running, simply return
elif (status == "Stopping" ):
buf = "%s is in %s state, can't start running now" % (args.vm_name, status)
error(buf)
elif (status == "Stopped" or status == "null"):
rc = 0 # ok to start VM
else:
buf = "id %s is in \"%s\" state, not sure can start running" % (args.vm_id, status)
error(buf)
if (rc != 0):
return rc # unexpected status
self.Inform("StartVM")
# start the VM
cmd = "aliyuncli ecs StartInstance"
cmd += " --InstanceId %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
if (retcode == 0):
retcode = self.WaitTillRunning(args, "Running", TIMEOUT_1)
return retcode # 0: succcess, 1: failure
##############################################################################
# StopVM
#
# Stops a running VM. No persistent resouces are deallocated, as it's expected
# that the VM will be started again.
#
# Note that most CSP's will continue to charge the customer for the allocated
# resources, even in a Stopped state.
#
# Returns: 0 VM fully stopped
# 1 unable to stop VM. May be invalid ID or connection to CSP
#
def StopVM(self, args):
''' Stop the VM '''
if (self.CheckID(args) == False):
return 1
retcode = self.CheckRunStatus(args, "Running")
if (retcode != 0):
error ("Not running")
return retcode
self.Inform("StopVM")
cmd = "aliyuncli ecs StopInstance"
cmd += " --InstanceId %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
if (retcode == 0):
status = self.GetRunStatus(args)
# The instance becomes "Stopping" after a successful API request,
# and the instance becomes "Stopped" after it is stopped successfully.
if (status != "Stopping"):
buf = "Asked VM to stop, but status = \"%s\"" % (status)
error(buf)
retcode = 1
else:
retcode = self.WaitForRunStatus(args, "Stopped", TIMEOUT_2)
return retcode # 0 success, 1 failure
##############################################################################
# RestartVM
#
# This function restarts a currently running VM
#
# Returns with the VM in a fully running state, where we have it's public IP
# address and can ssh into it
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be reset, or invalid ID supplied
#
def RestartVM(self, args): # also known as 'RebootInstance' on Alibaba
''' Restarts the VM '''
if (self.CheckID(args) == False):
return 1
retcode = self.CheckRunStatus(args, "Running")
if (retcode != 0):
error ("Not running")
return retcode
self.Inform("RestartVM")
cmd = "aliyuncli ecs RebootInstance"
cmd += " --InstanceId %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
# currently running, with Alibaba, status never becomes "un-running"
# durring a restart -- so we check when it FAILS to ping to know if
# restart actually occured. Then we simply wait till it's back up
# again - pingable and ssh-able to know it's running
if (retcode == 0):
if (args.pingable == 1):
retcode = self.WaitForPing(args, False, TIMEOUT_2)
else:
time.sleep(5) # let VM go down enough so SSH stops (we hope)
retcode = 0 # fake success, since ping isn't supported
if (retcode != 0):
error("never went un-pingable. Did VM restart?")
else:
retcode = self.WaitTillRunning(args, "Running", TIMEOUT_1)
return retcode # 0: succcess, 1: failure
##############################################################################
# DeleteVM
#
# Deletes a VM and releases of it's resources other than the Network Security
# Group.
#
# Returns: 0 success, VM and all it's resource are gone
# 1 problems..
#
def DeleteVM(self, args):
''' delete the vm and all the pieces '''
if (self.CheckID(args) == False):
return 1
# with alibaba, can only release in instance that is in the Stopped state
self.StopVM(args)
# command to Delete the Instance.
cmd = "aliyuncli ecs DeleteInstance"
cmd += " --InstanceId %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
# Is error handled ok? What if problems deleting? -- instance left around?
if (retcode == 0):
self.Clean(args) # remove file with the persistent id, ip address, ..
self.m_args_fname = "" # clear name, so won't write back args when done
return retcode
##############################################################################
# CSP specific utility functions
#
# ShowRunning Shows all the account's running VM's
# GetRegions Returns proper list of regions
##############################################################################
##############################################################################
# ShowRunning
#
# CSP specific information function to print out the name, type, description
# and start time of all the running instances in the region
#
# Returns: 0 1 or more running instances were found in CSP's args.region
# 1 no running instances found
#
def ShowRunning(self, args):
''' Shows list of running instances of account, independent of the current zone '''
lines_printed = 0
cmd = "aliyuncli ecs DescribeInstances"
# cmd += " --RegionId %s" % args.region # us-west-1
cmd += " --PageSize 50" # default is 10, max is 50
retcode, output, errval = self.DoCmd(cmd)
if (retcode == 0):
decoded_output = json.loads(output)
# output looks like (with zero instaces)
# {
# "TotalCount": 0,
# "PageNumber": 1,
# "RequestId": "67D9A0C9-9393-49E9-B097-68DC739B2A85",
# "PageSize": 10,
# "Instances": {
# "Instance": []
# }
# }
count = decoded_output["TotalCount"]
if (count == 0):
print("# %s: No running instances found" % self.m_class_name )
return 1
for idx in range(0, count):
instance = decoded_output["Instances"]["Instance"][idx]
if (instance["Status"] == "Running"):
if (lines_printed == 0):
print("# %s:" % self.m_class_name )
print(" %-36s %-16s %10s \"%s\"" %
(instance["InstanceId"],
# TODO - add in the region here !
instance["InstanceType"],
instance["CreationTime"][0:10],
instance["InstanceName"]))
lines_printed += 1
return 0 # 0 have a list of running instances, 1 fail or empty list
##############################################################################
# GetRegions
#
# Returns a list of regions where VMs can be created by this CSP.
#
# These are basiclly the names of the CSP's data centers... Each data center
# may offer different resoures. Don't care about that here. Just need the
# name.
#
# Used in a choice-list in the arg parser when user gives a non-default
# region name to catch invalid names before any real processing is done
#
# Returns: list of names
def GetRegions(self):
''' Returns a list of region names for the CSP '''
mylist = []
cmd = "aliyuncli ecs DescribeRegions"
retcode, output, errval = self.DoCmd(cmd)
if ( retcode == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output["Regions"]["Region"]) # number of regions
for idx in range(0, items):
name = decoded_output["Regions"]["Region"][idx]["RegionId"]
mylist.append(str(name))
return mylist
| ngc-examples-master | ncsp/ali_funcs.py |
# gcp_funcs.py 3/27/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Google Cloud Service Provider specific functions
#
# HELPTEXT: "Google Cloud Service Provider"
#
# See: https://cloud.google.com/sdk/docs/scripting-gcloud
#
import json
import time
import sys
from cspbaseclass import CSPBaseClass
from cspbaseclass import Which
from cspbaseclass import error, trace, trace_do, debug, debug_stop
import os
##############################################################################
# some gcloud aws defaults values that will vary based on users
#
# default_key_name: User will need to create their own security key and
# specify it's name here.
# region: The gcp region that they wish to run in. Note that
# GPU instances might not be avaliable at all sites
# user: Login user name for instance. May be hardcoded by ISP
# based on the image_name being selected.
##############################################################################
default_key_name = "my-security-key-name"
default_region = "my-region-name"
default_user = "my-user-name"
default_project = "my-project"
default_service_account = "my-service-account"
##############################################################################
# What image and instance type to bring up.
#
# default_image_name: Name of OS template that instance will be created with
# default_instance_type: The default name that defines the memory and cpu sizes
# and the gpu types for the instance. Changes
# default_choices: Avaliable instance types that user can select with
# This will probably be different per region, and will
# continue to change over time. Used as a pre-check in
# command parser to verify choice before sending to csp
##############################################################################
default_image_project = "nvidia-ngc-public"
default_image_name = "nvidia-gpu-cloud-image"
default_instance_type = "n1-standard-1"
default_instance_type_choices = ['n1-standard-1', 'n1-standard-8', 'n1-standard-16', 'n1-standard-32', 'n1-standard-64']
default_maintenance_policy = "TERMINATE"
default_scopes = ["https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring.write","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append"]
default_boot_disk_size = 32
default_boot_disk_type = "pd-standard"
default_boot_disk_type_choices = ["pd-standard"]
default_min_cpu_platform = "Automatic"
default_min_cpu_platform_choices= ["Automatic", "Intel Groadwell", "Intel Skylake"]
default_subnet = "default"
default_accelerator_type = "nvidia-tesla-p100"
default_accelerator_type_choices= ["nvidia-tesla-p100"]
default_accelerator_count = 0
default_accelerator_count_choices=[0,1,2,4] # up to 4x P100s
TIMEOUT_1 = (60 * 2) # create, start, terminate
TIMEOUT_2 = (60 * 1) # stop, ping
##############################################################################
# CSPClass
#
# Cloud Service Provided primitive access functions
##############################################################################
class CSPClass(CSPBaseClass):
''' Cloud Service Provider Class for gcp '''
##############################################################################
# CSPSetupOK
#
# checks to see that user has ability to create and minipulate VM's on this
# CSP. Want to check that up front, instead of later when actually talking
# to the CSP.
#
# does things like verifing that the CLI is installed on the machine, and
# whatever else is quick and easy to check
#
# Should also check to see that can actually connect with the CSP provider
# (network connection) and that network is reliable.
#
def CSPSetupOK(self):
''' quick check to verify Google gcloud command line interface is installed '''
fullpath = Which("gcloud") # does cli application exist?
if (fullpath == None):
return 1 # error, not found
else:
# TODO: verify network connection to CSP
# TODO: check login setup correctly
return 0
##############################################################################
# ArgOptions
#
# gcp specific argument parser. This extends or overrides default argument
# parsing that is set up in ncsp.py/add_common_options() function
#
# All arguments set up here and in the common code are saved/restored from
# the csp specific args file. See my_class.ArgSave/RestoreFromFile(parser)
# in the base class for implementation.
#
def ArgOptions(self, parser):
''' gcp specific option parser '''
region_list = self.GetRegionsCached()
parser.add_argument('--region', dest='region',
default=default_region, required=False,
choices=region_list, # regions change, this is queried output
help='region in which to create the VM')
parser.add_argument('--project', dest='project',
default=default_project, required=False,
help='is the project in which to create the VM')
parser.add_argument('--image_project', dest='image_project',
default=default_image_project, required=False,
help='is the image project to which the image belongs')
parser.add_argument('--service_account', dest='service_account',
default=default_service_account, required=False,
help='service account')
parser.add_argument('--maintenance_policy', dest='maintenance_policy',
default=default_maintenance_policy, required=False,
help='maintenance_policy')
parser.add_argument('--subnet', dest='subnet',
default=default_subnet, required=False,
help='subnet')
parser.add_argument('--scopes', dest='scopes',
default=default_scopes, required=False,
help='scopes')
parser.add_argument('--boot_disk_size', dest='boot_disk_size',
default=default_boot_disk_size, required=False, type=int,
help='disk boot size')
parser.add_argument('--boot_disk_type', dest='boot_disk_type',
default=default_boot_disk_type, required=False,
choices=default_boot_disk_type_choices,
help='disk boot type')
parser.add_argument('--min_cpu_platform', dest='min_cpu_platform',
default=default_min_cpu_platform, required=False,
choices=default_min_cpu_platform_choices,
help='min_cpu_platform')
parser.add_argument('--accelerator_type', dest='accelerator_type',
default=default_accelerator_type, required=False,
choices=default_accelerator_type_choices,
help='GPU accelerator type')
parser.add_argument('--accelerator_count', dest='accelerator_count',
default=default_accelerator_count, required=False, type=int,
choices=default_accelerator_count_choices,
help='Number of GPU accelerators to attach to instance')
parser.add_argument('--instance_type', dest='instance_type', # 'size' on azure, use 'instance-type' as common name
default=default_instance_type, required=False,
choices=default_instance_type_choices,
help='VM instance (type) to create')
parser.add_argument('--vpcid', dest='vpcid',
default=None, required=False,
help='gcp VPC id')
# these override the common/default values from add_common_options
# with this csp's specific values
parser.set_defaults(image_name=default_image_name);
parser.set_defaults(key_name=default_key_name)
parser.set_defaults(user=default_user);
# ping-ability makes starting/stopping more traceable, but this
# features is disabled by default, and explicidly needs to be
# enabled in the Networks Security Group -- see ICMP option
parser.set_defaults(pingable=1) # gcloud instances we created support pings (alibaba not)
###########################################################################
# ArgSanity
#
# CSP class specific argument checks, Called after the argument parser has run
# on the user options as a hook to verify that arguments are correct
#
# 'parser' is the structure returned from argparse.ArgumentParser()
#
# Returns 0 success
# 1 something is wrong, stop
#
def ArgSanity(self, parser, args):
''' gcp Parser Argument sanity checking '''
# print args
return 0 # do nothing for now
###########################################################################
# GetRunStatus
#
# Returns the running status of the instance as a string, like 'running'
# 'terminated', 'pending' etc.. This will be somewhat the same across
# all CSPs, but since it comes from them you should not depend upon
# an exact value out of CSP specific code
#
# Returns: string describing state
#
def GetRunStatus(self, args):
''' Returns running-state of instance from describe-instance-status '''
if (self.CheckID(args) == False):
return 1
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances describe"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note gclould takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems describe VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
run_state = decoded_output['status']
# returns something like "RUNNING" or "STOPPED"
self.Inform(run_state)
return(run_state);
###########################################################################
# GetIPSetupCorrectly
#
# Called after the instance is created in order to get if needed, and
# verify that a public IP address has been returned for the VM
#
# Some CSP's, like azure and alibaba return the IP address from another
# function that sets up the public IP. This needs to be called in the
# CreateVM function in that case.
#
# Other CSP's like aws, return the IP address for you "free" of charge
# as part of the instance information for the VM. This might be returned
# only after the VM creation has been completed.
#
# This function is genericly called after the VM has been found to be
# running, to either simply verify that we have a valid IP address in
# the first case above, or to ask the CSP for it and then verify it
# in the second case.
#
# public IP value will be in args.vm_id
#
# This function can do other cross-checks to validate other setups like
# checking if the SSH key-name returned from the CSP is the same as we
# sent it. Checks like this are optional, but highly desirable.
#
# Returns: 0 success
# 1 fails, invalid IP or can't get it
#
def GetIPSetupCorrectly(self, args):
''' called after 'running' status to get IP. Does nothing for Google '''
# With google, it looks like the IP address gets changed when restarting
# from 'stop'. -- SO we must clear it in our stop command !
#
# If we don't have IP run "describe" and get it.
# If we have it, simply return it
if (args.vm_ip != ""): # this ip value should have been set in Create
# print "GetIPSetupCorrectly: already have ip:%s" % args.vm_ip
return 0 # so we don't need to get it
# don't have IP value, hopefully VM is in running state and will
# have a IP that we can get
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances describe"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems describe VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# ip value that was all that was really needed
args.vm_ip = decoded_output['networkInterfaces'][0]['accessConfigs'][0]['natIP']
# sanity -- is VM id returned same as what we got from Create?
# got the value for free, might as well check it
vm_id = decoded_output['id']
if (vm_id != args.vm_id):
error ("Sanity - Returned vm_id:%s != vm_id value from create: %s" % (vm_id, args.vm_id))
return 1
# check status -- we should be RUNNING
status = decoded_output['status']
if (status != "RUNNING"):
error ("Shouldn't we be RUNNING? -- current status is \"$status\"")
return(0)
##############################################################################
# CSP specific Network Security Group Functions
#
# ShowSecurityGroups Displays NSG (network security groups) in region
# ExistingSecurityGroup Does NSG exist?
# CreateSecurityGroup Creates a NSG from a name, and adds rules
# DeleteSecurityGroup Deletes a NSG
##############################################################################
##############################################################################
# ShowSecurityGroups
#
# This function shows basic information about your account's security groups
# for your region.
#
# Intended to be informative only, as each CSP will probably supply different
# type of information.
#
# Returns: 0 one or more Netwroks Security Groups found in region
# 1 error, or no NSG's defined in region
#
def ShowSecurityGroups(self, args):
''' Displays all current security groups '''
error ("gcp (google cloud) does not use network security groups")
return 1 # no NSG's found
# 1 or more NSG's found
##############################################################################
# ExistingSecurityGroup
#
# Given a name of a security group in args.nsg_name, this function sees
# if it currently exists on the CSP
#
# Google cloud does not use security groups
#
# Returns: 0 do nothing
#
def ExistingSecurityGroup(self, args):
''' Does the security group name currently exist ? get it if it does'''
trace(2, "\"%s\"" % (args.nsg_name))
error ("gcp (google cloud) does not use network security groups")
return 0
##############################################################################
# CreateSecurityGroup
#
# Creates a full network security group by the name of args.nsg_name, saves the
# value in args.nsg_id
#
# Google cloud does not use security groups
#
# Returns: 0 do nothing
#
def CreateSecurityGroup(self, args):
''' creates security group. saves it in args.nsg_id '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
error ("gcp (google cloud) does not use network security groups")
return 1
##############################################################################
# DeleteSecurityGroup
#
# Deletes the security group specified at args.nsg_id, and clears that value
#
# Google cloud does not use security groups
#
# Returns: 0 do nothing
#
def DeleteSecurityGroup(self, args):
''' deletes the security group '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
error ("gcp (google cloud) does not use network security groups")
return 1
##############################################################################
# CSP specific VM functions
#
# CreateVM Creates a complete fully running VM
# StartVM Starts a VM if it was stopped, returns running
# StopVM Stops the VM if it is currently running
# RestartVM Resets VM, may not quite be same as Stop/Start
# DeleteVM Removes from the CSP a running or stopped VM
##############################################################################
##############################################################################
# CreateVM
#
# Creates a new VM, and returns when it is fully running.
#
# Note that due to simple way that this code saves it's peristent
# data (the id, user name, ... ), only 1 instance can be created
# at a time. Nothing preventing multiple VM's other than way to save/reference
# the id values. The CSPClass.Delete function removes the saved references
#
# The "args" option specify the CSP specific name, disk size, instance type,
# or any other parameter required to fully define the VM that is to be created
#
# Before creating the VM, effort is made to verify that all the supplied
# parameters, such as the SSH key name are valid.
#
# Network Security Group (NSG) is created if needed.
#
# Returns: 0 successful, VM fully created, up and ssh-able
# 1 failure, VM not created for one of many possible reasons
#
def CreateVM(self, args):
''' Creates a new VM. 'args' holds parameters '''
if (args.vm_id != "None" and args.vm_id != None):
error("Instance \"%s\" already exists, run 'deleteVM' first, or 'clean' if stale arg list" % args.vm_id)
return 1
# make sure our persistant IP address is clear
args.vm_ip = ""
# public ssh key file, builds path from options, checks existance
# this sets args.key_file to "keyfile.pub" (better known as "id_rsa.pub")
retcode = self.CheckSSHKeyFilePath(args, ".pub")
if (retcode != 0):
return(retcode)
keyfile_pub = args.key_file
# print "keyfile_pub:%s" % keyfile_pub
# however other than in the createVM, the private Key file
# is required for all the local ssh'ing that we will be doing
retcode = self.CheckSSHKeyFilePath(args, "")
if (retcode != 0):
return(retcode)
# ssh key file, builds path from options, checks existance
# metadata consists of user name, and the "ssh key" file
#
# Note that where we pass azure the name of our public ssh key,
# with Google the entire public key string is passsed in the metadata
#
# Example:
# metadata = "ssh-keys=newtonl:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDbzMfRh2nXbcwqqVjGvMgOqD3FyJHk4hGdXofLfBAsfQtZQbUg208yWqPEdFgPVyw8zwhd2WAEnaRSK6TmNOok5qgCydpjxbqoCNIfdhfOSFl+T6veiibzQ2UyWolxNPaQ4IPE4FdQsNDM37lsQNCFyZfBaqfbTSmDi5W8Odoqf7E2tfXcLD4gsFpexM4bgK43aaOCp/ekCiJi+Y13MJTw5VmLIdLgJZ/40oMRpK6nZcipbkHkVQEV9mLpTKDLG/xvb7gRzFiXbp4qgF9dWQKqIkfL4UNpcKTjYXqmdt2okoeDGVhQ0AnVM1pHKIyVulV5c17jz7wyj+0UaizAFvSh [email protected]"
#
# Note: The first few characters of the id_rsa.pub file is "ssh-rsa AAAAB3..."
# don't need to explicitly pass in "ssh-rsa" here. Don't over complicate it
#
with open(keyfile_pub, "r") as f:
ssh_rsa_data = f.read();
metadata="ssh-keys=%s:%s" % (args.user, ssh_rsa_data)
# with Google, don't need to create a network security group.
# mostly inherit defaults from the main scription
# neat thing with Google, is that we can specify GPU's at VM init time
# with other CSPs, number/type of GPU's is a function of the "instance_type"
accelerator_count = 0 # used for delay before ping below
if ( args.accelerator_type != None and args.accelerator_type != ""
and args.accelerator_type != "None" and args.accelerator_count > 0):
accelerator = "%s,count=%d" %(args.accelerator_type, args.accelerator_count)
accelerator_count = args.accelerator_count
# if adding GPUs, add additional info to the VM name
#
# Google GPU 'accelerator' types are of form: nvidia-tesla-p100 - too long for VM name which is
# limited to 61 chars - so strip of last what's after last '-' as name
#
# Remember with google, names must all be lowercase numbers/letters
if (args.vm_name.find("gpu") == -1): # haven't added "gpu" yet
type = args.accelerator_type[args.accelerator_type.rfind("-")+1:]
args.vm_name += "-%dx%sgpu" %(args.accelerator_count, type)
else:
accelerator = None # don't assign gpus
# Create the VM
# NOTE: with gcp, it's not necessary to assign it Network Security Groups
# when creating the VM's -- Called "network firewall rules", they are
# added later after the VM is created.
self.Inform("CreateVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " --project \"%s\" " % args.project # "my-project"
cmd += "instances create \"%s\"" % args.vm_name # "pbradstr-Fri-2018Mar02-181931"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # reduces noize output
cmd += " --machine-type \"%s\"" % args.instance_type # "n1-standard-1"
cmd += " --subnet \"%s\"" % args.subnet # default
cmd += " --metadata \"%s\"" % metadata
cmd += " --maintenance-policy \"%s\"" % args.maintenance_policy # "TERMINATE"
cmd += " --service-account \"%s\"" % args.service_account # "[email protected]"
# cmd += " --scopes %s" % args.scopes # https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring.write","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \
if ( accelerator != None ): # optional if we want GPUs
cmd += " --accelerator type=%s" % accelerator # nvidia-tesla-p100,count=1"
cmd += " --min-cpu-platform \"%s\"" % args.min_cpu_platform # "Automatic"
cmd += " --image \"%s\"" % args.image_name # "nvidia-gpu-cloud-image-20180227"
cmd += " --image-project \"%s\"" % args.image_project # "nvidia-ngc-public"
cmd += " --boot-disk-size %d" % args.boot_disk_size # 32, in GB
cmd += " --boot-disk-type \"%s\"" % args.boot_disk_type # "pd-standard"
cmd += " --boot-disk-device-name \"%s\"" % args.vm_name # assume same as VM name
# To break big command into individual options per line for debugging
# echo $V | sed -e $'s/ --/\\\n --/g'
# execute the command
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems creating VM \"%s\"" % args.vm_name)
return rc
# Get the returend information, pull out the vmID and (if possible)
# the public IP address of the VM
#
# NOTE: with gcp, IP address is assigned in output from 'create' commmand
# don't need to poll for it (we waited for command to complete instead)
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# FYI: reason why [0] is user here is that json output format could
# possibly supply more than one instance of data. Since our request
# is specific to one instance, the [0] grouping is kind of redundant
args.vm_id = decoded_output[0]['id'] # may not actually need the ID, all vm_name based
args.vm_ip = decoded_output[0]['networkInterfaces'][0]['accessConfigs'][0]['natIP']
# save vm ID and other fields setup here so don't use them if error later
# actually don't care if it's fully running, (that would be nice) but
# need to save the VM id here since we need to delete it in any case
self.ArgSaveToFile(args)
# Google has a habbit of reusing the IP addresses, way more than any other
# csp that I've tested. But since this is an old IP with a new VM, if that
# IP exists in the known_hosts file, it's going to cause problems when
# we try to ssh into it (as will happen right away with "WaitTillRunning"
# Blow away value in known-hosts now. Note that it's also removed when
# the VM is deleted... but done here on create if forgot or removed some
# other way. (TODO: This step needed on other CSPs ? )
self.DeleteIPFromSSHKnownHostsFile(args)
# quick sanity check -- verify the name returned from the create command
# is the same as we were given
returned_name = decoded_output[0]["name"]
# print("name:%s" % returned_name)
if (decoded_output[0]["name"] != args.vm_name):
error ("sanity check: vm name returned \"%s\" != vm_name \"%s\" given to create command" % (returned_name, args.vm_name))
json.dumps(decoded_output, indent=4, sort_keys=True)
return 1
# Seeing an error here on gcloud only where
#
# 1) VM is up in gcloud web page, and can ssh into it there from the web page
# 2) the first ping in WaitTillRunning succeeds
# 3) the ssh in WaitTillRunning fails with a timeout
# 4) any further ping or ssh fails
# 5) see #1
#
# A delay before the first ping seems to workaround the problem
# 5 seconds is not enough, got 30% error rates. 10 seconds seems
# to work at least with"n1-standard-1" instances and no gpus
#
# Adding and additional 10 seconds per GPU. Emperical value
#
delay = 10 + (accelerator_count * 10)
debug (0, "WORKAROUND: external network connect - sleep for %d seconds before ping" % (delay))
time.sleep(delay) # wait a few seconds before ANY command to vm
# Another sanity check -- gcp will return from create only once the
# vm is up and running. This code here (which comes from aws implementation)
# wait's till we can ping and ssh into the VM. It should take little
# time here with gcp, but on the other hand it's a good confidence booster
# to know that we have checked and hav verified that can ping and ssh into
# the vm.
if (rc == 0):
rc = self.WaitTillRunning(args, "RUNNING", TIMEOUT_1)
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
debug(2, "createVM returning %d" % rc)
return rc # 0: succcess, 1: failure
##############################################################################
# StartVM
#
# Starts a Stopped VM, returns it in a fully running state, where we have
# the correct IP address if it changed, and can ssh into the VM
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be started, or invalid ID supplied
#
def StartVM(self, args):
''' Starts the VM '''
rc = 1 # assume error
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False): # checks for a valid VM id
return 1 #
# Get run status and check current state
# The strings being checked here may be CSP specific.
status = self.GetRunStatus(args)
if (status == "RUNNING"):
return 0 # already running, simply return
elif (status == "stopping"):
buf = "%s is in %s state, can't start running now" % (args.vm_id, status)
error(buf)
elif (status == "TERMINATED" or status == "null"):
rc = 0 # ok to proceed
else:
buf = "id %s is in \"%s\" state, not sure can start running" % (args.vm_id, status)
error(buf)
if (rc != 0):
return rc # unexpected status
# start the VM
self.Inform("StartVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances start"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# CSP specific - verify that the VM is fully up and running, and that
# we have it's IP address and can ssh into it.
#
# Some CSP's may return from their StartVM in this state, so this call
# is optional
if (rc == 0):
rc = self.WaitTillRunning(args, "RUNNING", TIMEOUT_1) # running
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
return rc # 0: succcess, 1: failure
##############################################################################
# StopVM
#
# Stops a running VM. No persistent resouces are deallocated, as it's expected
# that the VM will be started again.
#
# Note that most CSP's will continue to charge the customer for the allocated
# resources, even in a Stopped state.
#
# Returns: 0 VM fully stopped
# 1 unable to stop VM. May be invalid ID or connection to CSP
#
def StopVM(self, args):
''' Stop the VM '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
# Checks status. Note that "running" string may be CSP specific
retcode = self.CheckRunStatus(args, "RUNNING") # running
if (retcode != 0):
error ("Not running")
return retcode
# Stop the VM
self.Inform("StopVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances stop"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# The CSP may return from the above command once the request
# for stopping has been received. However we don't want to
# return from this function until we are actually positive that
# the VM has compleatly stopped. This check will be CSP specific
if (rc == 0):
# make sure our persistant IP address is clear -
# google changes IP address after stop. So make sure
# the next time we need it, we go and ask for it
args.vm_ip = ""
# get status
status = self.GetRunStatus(args)
# CSP specific..
# The instance becomes "stopping" after a successful API request,
# and the instance becomes "stopped" after it is stopped successfully.
if (status != "TERMINATED"): # "stopping" - transiant state
error("Asked VM to stop, but status = \"%s\"" % (status))
rc = 1
else:
rc = self.WaitForRunStatus(args, "TERMINATED", TIMEOUT_2) # stopped
# return 0 only when the VM is fully stopped
return rc # 0: succcess, 1: failure
##############################################################################
# RestartVM
#
# This function restarts a currently running VM
#
# Returns with the VM in a fully running state, where we have it's public IP
# address and can ssh into it
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be reset, or invalid ID supplied
#
def RestartVM(self, args): # also known as 'reboot' on aws
''' Restarts the VM '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
# can only restart a VM if it's currently running.
# This "running" string may be CSP specific
retcode = self.CheckRunStatus(args, "RUNNING") # running
if (retcode != 0):
error ("Not running")
return retcode
# Restart the VM
self.Inform("RestartVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances start"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
# this code is CSP specific.
#
# on aws after "reset", the status never becomes "un-running"
# anytime durring the reset procss -- so we check when it FAILS
# to ping to know if estart actually occured. Then we simply wait
# till it's back up again - pingable and ssh-able to know it's
# running
#
# Ability to ping the VM is also CSP specific, and is normally
# setup in the Network Security Group as a specific rule.
if (retcode == 0):
if (args.pingable == 1):
rc = self.WaitForPing(args, False, TIMEOUT_2)
print "Saw Pingable rc=%d" % rc
else:
time.sleep(5) # let VM go down enough so SSH stops (we hope)
rc = 0 # fake success, since ping isn't supported
if (rc != 0):
error("never went un-pingable. Did VM restart?")
else:
rc = self.WaitTillRunning(args, "RUNNING", TIMEOUT_1) # running
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
return rc # 0: succcess, 1: failure
##############################################################################
# DeleteVM
#
# Deletes a VM and releases of it's resources other than the Network Security
# Group.
#
# Returns: 0 success, VM and all it's resource are gone
# 1 problems..
#
def DeleteVM(self, args):
''' delete the vm and all the pieces '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
self.Inform("DeleteVM")
cmd = "gcloud --format=\"json\" beta compute"
cmd += " instances delete"
cmd += " --zone \"%s\"" % args.region # "us-west1-b"
cmd += " --quiet" # 'quiet' prevents prompting "do you want to delete y/n?"
cmd += " \"%s\" " % args.vm_name # note takes VM Name, not a uuid as with aws/azure..
rc, output, errval = self.DoCmd(cmd)
if (rc != 0): # check for return code
error ("Problems deleting VM \"%s\"" % args.vm_name)
return rc
# allocated them in Create, we probably need to deallocate them here
# CSP_Sepecific_Dealloc(stuff...)
# Is error handled ok? What if problems deleting? -- instance left around?
#
# This cleans out everything in the internal args file, so that user must
# fully specify any options on the next create. This is the easiest/safest
# way to make sure any CSP specific ID parmaters, like the VM id also
# get cleared... Really Big hammer, but squishes everything fairly
#
if (rc == 0): # successful so far?
self.Clean(args) # remove file with the persistent id, ip address, ..
self.m_args_fname = "" # clear name, so won't write back args when done
return rc # 0: succcess, 1: failure
##############################################################################
# CSP specific utility functions
#
# ShowRunning Shows all the account's running VM's
# GetRegions Returns proper list of regions
##############################################################################
##############################################################################
# ShowRunning
#
# CSP specific information function to print out the name, type, description
# and start time of all the running instances in the region
#
# Returns: 0 1 or more running instances were found in CSP's args.region
# 1 no running instances found
#
def ShowRunning(self, args):
''' Shows list of running instances within region of account '''
# CSP_SpecificShowRunning(args.region)
rc = 0
mylist = []
cmd = "gcloud --format=\"json\" beta compute instances list"
rc, output, errval = self.DoCmd(cmd)
if ( rc == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output) # number of instances
lines_printed = 0
for idx in range(0, items):
status = decoded_output[idx]["status"] # UP or ??
if (status == "RUNNING"):
name = decoded_output[idx]["name"] # "gpu-stress-test"
id = decoded_output[idx]["id"] # "6069200451247196266"
machineType = decoded_output[idx]["machineType"] # "https://www.googleapis.com/compute/beta/projects/my-project/zones/us-central1-a/machineTypes/n1-standard-32-p100x4"
cpuPlatform = decoded_output[idx]["cpuPlatform"] # "Unknown CPU Platform"
creationTimestamp = decoded_output[idx]["creationTimestamp"] # "2017-08-18T16:21:42.196-07:00"
zone = decoded_output[idx]["zone"] # "https://www.googleapis.com/compute/beta/projects/my-project/zones/us-east1-d"
# pull interesting data out of longer fields that were gathered above
launch_time = creationTimestamp[0:10]
# VM machine type running on
i = machineType.rfind('/')
if (i != -1):
type = machineType[i+1:] # from last '/'
else:
type = machineType # unexpected format, take it all
# datacenter region the VM is running in
i = zone.rfind('/')
if (i != -1):
tzone = zone[i+1:] # from last '/'
else:
tzone = zone # unexpected format, take it all
if (lines_printed == 0):
print("# %s:" % self.m_class_name )
print(" %-20s %-16s %-32s %10s \"%s\"" %(id, tzone, type, launch_time, name))
lines_printed += 1
if (lines_printed == 0):
print("%s: No running instances found" % self.m_class_name )
return 0
##############################################################################
# GetRegions
#
# Returns a list of regions where VMs can be created by this CSP.
#
# These are basiclly the names of the CSP's data centers... Each data center
# may offer different resoures. Don't care about that here. Just need the
# name.
#
# Used in a choice-list in the arg parser when user gives a non-default
# region name to catch invalid names before any real processing is done
#
# Returns: list of names
def GetRegions(self):
''' Returns a list of region names for the CSP '''
mylist = []
cmd = "gcloud --format=\"json\" beta compute regions list"
rc, output, errval = self.DoCmd(cmd)
if ( rc == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output) # number of regions
for idx in range(0, items):
name = decoded_output[idx]["name"] # asia-east1
status = decoded_output[idx]["status"] # UP or ??
if (status == "UP"):
mylist.append(str(name)) # only include running farms
return mylist # list is empty if no regions
| ngc-examples-master | ncsp/gcp_funcs.py |
# template_funcs.py 3/23/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# <CSP> specific class Template functions - starting point for new <CSP> development
# Copy this file to your CSP specific name, and fill in functions
#
# The following line is use with the help text to provide info for this
# class without the expense of actually importing the python class.
#
# HELPTEXT: "Template sample code for not yet developed <CSP>"
#
# 12 Step Program to create a new CSP interface:
#
# 0) Be able to create/destroy and look at VM's through the CSP's
# web interface, and try writing a few hand-built scripts to do
# basic minipulations on them.
# 1) Change this text string "<CSP>" to your CSP name
# (avoid using a '-' in name, will be future feature)
# 2) Implement CSPSetupOK()
# 3) Get the 'ShowRunning' function to work, will learn how to create/parse
# your CSP's interface by doing this
# 4) Implement the Network Security Group functions to first list, then
# create and delete the NSG's
# 5) Starting with the CSP's simplest/cheapest VM instance type,
# implement the "CreateVM" function, and start getting all the csp
# specific argument parsing in ArgOptions. Get to the point where
# you have the VM id, and the IP address.
# 6) Use the output form CreateVM, make sure it's Nam, ID and IP address
# are saved in the in the args, and use it to now DeleteVMs
# 7) Implement GetRunStatus to get current run status. Get idea of what
# it does while stopping, starting and terminated states
# 8) Dig into WaitTillRunning, and get ping and ssh working, and
# go back an make sure CreateVM is using it correctly
# 9) Get StopVM. StartVM and RestartVM working
# 10) Run ./stest <CSP> to verify that basic commands are working
# from a external script, and interface is stable
# 11) Start playing with options for different InstanceTypes and
# number/type of GPUs
# 12) Run the 'test' command to get full timing on everything
# and start experimenting with different CPU and GPU parameters
#
import json
import time
import sys
from cspbaseclass import CSPBaseClass
from cspbaseclass import Which
from cspbaseclass import error, trace, trace_do, debug, debug_stop
##############################################################################
# some <CSP> defaults values that will vary based on users
#
# default_key_name: User will need to create their own security key and
# specify it's name here.
# region: The <CSP> region that they wish to run in. Note that
# GPU instances might not be avaliable at all sites
# user: Login user name for instance. May be hardcoded by ISP
# based on the image_name being selected.
##############################################################################
default_key_name = "my-security_key-name" # "my-security_key-name"
default_region = "my-region-name" # "my-region-name"
default_user = "my-user-name" # "my-user-name"
##############################################################################
# What image and instance type to bring up.
#
# default_image_name: Name of OS template that instance will be created with
# default_instance_type: The default name that defines the memory and cpu sizes
# and the gpu types for the instance. Changes
# default_choices: Avaliable instance types that user can select with
# This will probably be different per region, and will
# continue to change over time. Used as a pre-check in
# command parser to verify choice before sending to csp
##############################################################################
default_image_name = "Generic <CSP> starter AMI*"
default_instance_type = "type1.small" # 1gpu, 4gpu and 8gpu instances
default_choices = ['type1.small', 'type1.med', 'type1.large']
TIMEOUT_1 = (60 * 4) # create, start, terminate
TIMEOUT_2 = (60 * 4) # stop, ping
##############################################################################
# CSPClass
#
# Cloud Service Provided primitive access functions
##############################################################################
class CSPClass(CSPBaseClass):
''' Cloud Service Provider Class for <CSP> '''
##############################################################################
# CSPSetupOK
#
# checks to see that user has ability to create and minipulate VM's on this
# CSP. Want to check that up front, instead of later when actually talking
# to the CSP.
#
# does things like verifing that the CLI is installed on the machine, and
# whatever else is quick and easy to check
#
# Should also check to see that can actually connect with the CSP provider
# (network connection) and that network is reliable.
#
def CSPSetupOK(self):
''' quick check to verify our <CSP> command line interface is installed '''
return 0 # TODO: initial debug -- remove this line.
fullpath = Which("<CSP>cli") # change to your actual CSP's user command
if (fullpath == None):
return 1 # error, not found
else:
# TODO: verify network connection to CSP
# TODO: check login setup correctly
return 0
##############################################################################
# ArgOptions
#
# <CSP> specific argument parser. This extends or overrides default argument
# parsing that is set up in ncsp.py/add_common_options() function
#
# All arguments set up here and in the common code are saved/restored from
# the csp specific args file. See my_class.ArgSave/RestoreFromFile(parser)
# in the base class for implementation.
#
def ArgOptions(self, parser):
''' <CSP> specific option parser '''
region_list = self.GetRegionsCached()
parser.add_argument('--region', dest='region',
default=default_region, required=False,
choices=region_list, # regions change, queried outpu
help='region in which to create the VM')
parser.add_argument('--instance_type', dest='instance_type', # 'size' on azure, use 'instance-type' as common name
default=default_instance_type, required=False,
choices=default_choices,
help='VM instance (type) to create')
parser.add_argument('--vpcid', dest='vpcid',
default=None, required=False,
help='<CSP> VPC id')
# these override the common/default values from add_common_options
# with this csp's specific values
parser.set_defaults(image_name=default_image_name);
parser.set_defaults(key_name=default_key_name)
parser.set_defaults(user=default_user);
# ping-ability makes starting/stopping more traceable, but this
# features is disabled by default, and explicidly needs to be
# enabled in the Networks Security Group -- see ICMP option
parser.set_defaults(pingable=1) # set to 1 if <CSP> instances we created support pings
###########################################################################
# ArgSanity
#
# CSP class specific argument checks, Called after the argument parser has run
# on the user options as a hook to verify that arguments are correct
#
# 'parser' is the structure returned from argparse.ArgumentParser()
#
# Returns 0 success
# 1 something is wrong, stop
#
def ArgSanity(self, parser, args):
''' <CSP> Parser Argument sanity checking '''
# print args
return 0 # do nothing for now
###########################################################################
# GetRunStatus
#
# Returns the running status of the instance as a string, like 'running'
# 'terminated', 'pending' etc.. This will be somewhat the same across
# all CSPs, but since it comes from them you should not depend upon
# an exact value out of CSP specific code
#
# Returns: string describing state
#
def GetRunStatus(self, args):
''' Returns running-state of instance from describe-instance-status '''
if (self.CheckID(args) == False):
return 1
run_state = "unknown"
self.Inform(run_state)
return(run_state);
###########################################################################
# GetImageId
#
# This might be CSP specific - some CSPs require an ID value for the
# template that they use to create a VM from (aws, alibaba), while others
# (azure) take the full name.
#
# For the cases where a name-to-id lookup is required, this is the function
# that does it.
#
# Name comes from "args.image_name" as a string, and can be given a optional
# argument by the user
#
# This ID value is returned in "args.image_id", and will be something
# like "# ami-8ee326f6"
#
# Returns: 0 success
# 1 Name is unknown, no ID foud
#
def GetImageId(self, args):
# call the function to see of "args.image_name" exists at CSP
# CSP_Specific_ImageNameToIdLookup(args.image_name, args.region)
rc = 0
if (rc == 0):
args.image_id = "ami-unknown-id"
return(0)
###########################################################################
# GetIPSetupCorrectly
#
# Called after the instance is created in order to get if needed, and
# verify that a public IP address has been returned for the VM
#
# Some CSP's, like azure and alibaba return the IP address from another
# function that sets up the public IP. This needs to be called in the
# CreateVM function in that case.
#
# Other CSP's like aws, return the IP address for you "free" of charge
# as part of the instance information for the VM. This might be returned
# only after the VM creation has been completed.
#
# This function is genericly called after the VM has been found to be
# running, to either simply verify that we have a valid IP address in
# the first case above, or to ask the CSP for it and then verify it
# in the second case.
#
# public IP value will be in args.vm_id
#
# This function can do other cross-checks to validate other setups like
# checking if the SSH key-name returned from the CSP is the same as we
# sent it. Checks like this are optional, but highly desirable.
#
# Returns: 0 success
# 1 fails, invalid IP or can't get it
#
def GetIPSetupCorrectly(self, args):
''' called after 'running' status to get IP. Does nothing for Alibaba '''
debug(1, "ip: %s keyname: \"%s\"" % (args.vm_ip, args.key_name))
# Very CSP specific - may be picked up in CreateVM
args.vm_ip = "1-2-3-4-imaginary.fake.com" # main responsibilty of this function
return 0
##############################################################################
# CSP specific Network Security Group Functions
#
# ShowSecurityGroups Displays NSG (network security groups) in region
# ExistingSecurityGroup Does NSG exist?
# CreateSecurityGroup Creates a NSG from a name, and adds rules
# DeleteSecurityGroup Deletes a NSG
##############################################################################
##############################################################################
# ShowSecurityGroups
#
# This function shows basic information about your account's security groups
# for your region.
#
# Intended to be informative only, as each CSP will probably supply different
# type of information.
#
# Returns: 0 one or more Netwroks Security Groups found in region
# 1 error, or no NSG's defined in region
#
def ShowSecurityGroups(self, args):
''' Displays all current security groups '''
# dummy list of groups to have something to display
output = []
output.append({ "GroupId":"sg_dummy_1", "GroupName":"NSG_Dummy1", "Description":"Desc of Dummy1" })
output.append({ "GroupId":"sg_dummy_2", "GroupName":"NSG_Dummy2", "Description":"Desc of Dummy2" })
output.append({ "GroupId":"sg_dummy_3", "GroupName":"NSG_Dummy3", "Description":"Desc of Dummy3" })
# Have a list of security groups. display them
items = len(output)
for idx in range(0, items):
print "%2d %-12s \"%s\" \"%s\"" % (idx,
output[idx]["GroupId"],
output[idx]["GroupName"],
output[idx]["Description"])
if (items == 0):
return 1 # no NSG's found
else:
return 0 # 1 or more NSG's found
##############################################################################
# ExistingSecurityGroup
#
# Given a name of a security group in args.nsg_name, this function sees
# if it currently exists on the CSP
#
# This entire application is written assuming that once a security group is
# created, it doesn't need to really change much for the lifetime of the
# universe. Therefor we don't delete them unless specificly asked for
#
# The purpose of this function is to decide if we need to create a Network
# Security Group, or to return the id of that existing group in args.nsg_id
#
# Returns: 0 if security group args.nsg_name currently exists and is valid
# 1 need to create a group
#
def ExistingSecurityGroup(self, args):
''' Does the security group name currently exist ? get it if it does'''
trace(2, "\"%s\"" % (args.nsg_name))
if (args.nsg_name == "" or args.nsg_name == None or args.nsg_name == "None"):
error("NetworkSecurityGroup name is \"%s\"" % args.nsg_name)
return 1
args.nsg_id=None # if set, we know it exists.
trace(2, "Did not find security group: \"%s\"" % args.nsg_name)
return 1
##############################################################################
# CreateSecurityGroup
#
# Creates a full network security group by the name of args.nsg_name, saves the
# value in args.nsg_id
#
# Any additional rules required for the security group to set up ssh, ssl and
# ping are added to the group here before it is returned.
#
# If the CSP has object-taging feature, the new security group should be
# tagged with a unique name so it can be identified later.
#
# IMPORTANT: if you can create a rule to make the VM pingable (a good thing
# for initial development), be sure to call following in ArgOptions
# so that the ping feature will be used when needed by this app
#
# "parser.set_defaults(pingable=1)"
#
def CreateSecurityGroup(self, args):
''' creates security group. saves it in args.nsg_id '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
# CSP_Specific_CreateNSG(args.nsg_name, ...)
rc = 0
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
if (rc != 0): # check for return code
error ("Problems creating VM \"%s\"" % args.vm_name)
return rc
# get the NSG id of the new security group
args.nsg_id = "sg_FakeNSGID"
debug(1, "args.nsg_id <--- %s" % args.nsg_id)
# tag the NSG id if needed (CSP specific)
# CSP_Specific_TagGroup(args.nsg_id, args.nsg_name)
# Security rules -- make a list of ingress and outgress rules - easy to change
# slow, but this code is rarely used. understandability is more important
# note unlike aws/alibaba ingress/outgress both in same rule set - as "Direction" field
# Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection
# TBD: rule for pinging? -- for aws/alibaba - this is a 'icmp' rule. Not allowed here
#
# The actual fields required in this table will be CSP specific
rule = {}
rule[0] = {"Direction":"Inbound", "Name":"AllowSSH", "IpProtocol":"tcp", "ToPort":22, "FromPort":22, "Priority":1000, "Description":"For SSH" }
rule[1] = {"Direction":"Inbound", "Name":"HTTPS-in", "IpProtocol":"tcp", "ToPort":443, "FromPort":443, "Priority":1010, "Description":"For SSL" }
rule[2] = {"Direction":"Outbound", "Name":"HTTPS-out", "IpProtocol":"tcp", "ToPort":443, "FromPort":443, "Priority":110, "Description":"For SSL" }
rule[3] = {"Direction":"Inbound", "Name":"DIGITS6", "IpProtocol":"tcp", "ToPort":5000, "FromPort":5000,"Priority":1020, "Description":"For NVIDIA DIGITS6" }
# rule[1] = {"Name":"Ping", "IpProtocol":"icmp","ToPort":-1, "FromPort":8, "Priority":2000, "Description":"To allow to be pinged" }
outer_retcode = 0
for idx in range(0, len(rule)):
self.Inform("CreateNSG rule %s.%s" %(args.nsg_name, rule[idx]["Name"]) )
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
self.Inform(" ")
return outer_retcode
##############################################################################
# DeleteSecurityGroup
#
# Delets the security group specified at args.nsg_id, and clears that value
#
# If group Rules attached to the NSG need to be individually deleted, that
# must also be done here if not done automaticly by the CSP
#
def DeleteSecurityGroup(self, args):
''' deletes the security group '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
if (args.nsg_id == None):
error("NSG %s already deleted", args.nsg_name)
return(1)
# CSP_Specific_DeleteNSG(args.nsg_id)
rc = 0
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
args.nsg_id = None # remove id from args
return(rc)
##############################################################################
# CSP specific VM functions
#
# CreateVM Creates a complete fully running VM
# StartVM Starts a VM if it was stopped, returns running
# StopVM Stops the VM if it is currently running
# RestartVM Resets VM, may not quite be same as Stop/Start
# DeleteVM Removes from the CSP a running or stopped VM
##############################################################################
##############################################################################
# CreateVM
#
# Creates a new VM, and returns when it is fully running.
#
# Note that due to simple way that this code saves it's peristent
# data (the id, user name, ... ), only 1 instance can be created
# at a time. Nothing preventing multiple VM's other than way to save/reference
# the id values. The CSPClass.Delete function removes the saved references
#
# The "args" option specify the CSP specific name, disk size, instance type,
# or any other parameter required to fully define the VM that is to be created
#
# Before creating the VM, effort is made to verify that all the supplied
# parameters, such as the SSH key name are valid.
#
# Network Security Group (NSG) is created if needed.
#
# Returns: 0 successful, VM fully created, up and ssh-able
# 1 failure, VM not created for one of many possible reasons
#
def CreateVM(self, args):
''' Creates a new VM. 'args' holds parameters '''
if (args.vm_id != "None" and args.vm_id != None):
error("Instance \"%s\" already exists, run 'deleteVM' first, or 'clean' if stale arg list" % args.vm_id)
return 1
args.vm_ip = "" # make sure IP address is clear
# ssh key file, builds path from options, checks existance
# CSP_Specific_Check_Key(args.key_path, args.key_name)
rc = 0
if (rc != 0):
return rc # ssh keys not setup correctly
# security group, create if neeeded, does nothing if already exists
# consider moving this step outside this VM create to better
# reflect the real VM timing?
self.Inform("CreateNSG")
if (self.CreateNSG(args) != 0): # sets args.nsg_id
return 1
trace(2, "nsg_id: \"%s\" %s" % (args.nsg_name, args.nsg_id))
# look up image-name, return region specific image id
self.Inform("GetImageId")
if (self.GetImageId(args) != 0):
return 1
trace(2, "image_id: \"%s\" %s" % (args.image_name, args.image_id))
# Create the VM
self.Inform("CreateVM")
# CSP_specific_CreateVM(args.vm_name, ...)
rc = 0
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
if (rc != 0):
return rc # unable to create the VM
# Get the returend information, pull out the vmID and (if possible)
# the public IP address of the VM
args.vm_id = "vm_dummyID"
args.vm_ip = "" # don't have IP until we see VM running
# CSP Specific - Name your instance if not done from above CreateVM
self.Inform("create-tags")
# CSP_specific_tagVM(args.vm_id, args.vm_name)
rc = 0 # success
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
# This code will be CSP specific, since some CSP's will not return
# from their 'createVM' function untill the VM is fully running.
# Otherwise wait till the instance is up and running, pingable and
# ssh-able the "running" string used here will be CSP specific
#
# Note that this function major responsiblity is to set args.vm_ip
if (rc == 0):
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
rc = self.WaitTillRunning(args, "unknown", TIMEOUT_1)
# save vm ID and other fields setup here so don't use them if error later
# actually don't care if it's fully running, (that would be nice) but
# need to save the VM id here since we need to delete it in any case
self.ArgSaveToFile(args)
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
debug(2, "createVM returning %d" % rc)
return rc # 0: succcess, 1: failure
##############################################################################
# StartVM
#
# Starts a Stopped VM, returns it in a fully running state, where we have
# the correct IP address if it changed, and can ssh into the VM
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be started, or invalid ID supplied
#
def StartVM(self, args):
''' Starts the VM '''
rc = 1 # assume error
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False): # checks for a valid VM id
return 1 #
# Get run status and check current state
# The strings being checked here may be CSP specific.
status = self.GetRunStatus(args)
if (status == "running"):
return 0 # already running, simply return
elif (status == "stopping"):
buf = "%s is in %s state, can't start running now" % (args.vm_id, status)
error(buf)
elif (status == "stopped" or status == "null"):
rc = 0 # ok to proceed
elif (status == "unknown"):
rc = 0 # TEMPLATE DEVELOPMENT CODE - remove this check!
else:
buf = "id %s is in \"%s\" state, not sure can start running" % (args.vm_id, status)
error(buf)
if (rc != 0):
return rc # unexpected status
# start the VM
self.Inform("StartVM")
# CSP_Specific_StartVM(args.vm_id, args.region, ...)
rc = 0
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
# CSP specific - verify that the VM is fully up and running, and that
# we have it's IP address and can ssh into it.
#
# Some CSP's may return from their StartVM in this state, so this call
# is optional
if (rc == 0):
rc = self.WaitTillRunning(args, "unknown", TIMEOUT_1) # running
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
return rc # 0: succcess, 1: failure
##############################################################################
# StopVM
#
# Stops a running VM. No persistent resouces are deallocated, as it's expected
# that the VM will be started again.
#
# Note that most CSP's will continue to charge the customer for the allocated
# resources, even in a Stopped state.
#
# Returns: 0 VM fully stopped
# 1 unable to stop VM. May be invalid ID or connection to CSP
#
def StopVM(self, args):
''' Stop the VM '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
# Checks status. Note that "running" string may be CSP specific
retcode = self.CheckRunStatus(args, "unknown") # running
if (retcode != 0):
error ("Not running")
return retcode
# Stop the VM
self.Inform("StopVM")
# CSP_Specific_StopVM(args.vm_id, args.region)
rc = 0
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
# The CSP may return from the above command once the request
# for stopping has been received. However we don't want to
# return from this function until we are actually positive that
# the VM has compleatly stopped. This check will be CSP specific
if (rc == 0):
status = self.GetRunStatus(args)
# CSP specific..
# The instance becomes "stopping" after a successful API request,
# and the instance becomes "stopped" after it is stopped successfully.
if (status != "unknown"): # "stopping" - transiant state
error("Asked VM to stop, but status = \"%s\"" % (status))
rc = 1
else:
rc = self.WaitForRunStatus(args, "unknown", TIMEOUT_2) # stopped
# return 0 only when the VM is fully stopped
return rc # 0: succcess, 1: failure
##############################################################################
# RestartVM
#
# This function restarts a currently running VM
#
# Returns with the VM in a fully running state, where we have it's public IP
# address and can ssh into it
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be reset, or invalid ID supplied
#
def RestartVM(self, args): # also known as 'reboot' on aws
''' Restarts the VM '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
# can only restart a VM if it's currently running.
# This "running" string may be CSP specific
retcode = self.CheckRunStatus(args, "unknown") # running
if (retcode != 0):
error ("Not running")
return retcode
# Restart the VM
self.Inform("RestartVM")
# CSP_SepecificRestartVM(args.vm_id, args.region)
rc = 0;
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
# this code is CSP specific.
#
# For instance, on aws after "reset", the status never changes to
# any "un-running" value during the reset procss -- so we must poll
# via ping to know if restart actually occured. It's pingable, it's
# not pingable, it becomes pingabe once again.
#
# Then we simply wait till it's back up again - pingable and
# ssh-able to know it's running
#
# Ability to ping the VM is also CSP specific, and this 'pingable'
# flag is normally setup in the Network Security Group as a specific rule.
if (retcode == 0):
if (args.pingable == 1):
rc = self.WaitForPing(args, False, TIMEOUT_2)
else:
time.sleep(5) # let VM go down enough so SSH stops (we hope)
rc = 0 # fake success, since ping isn't supported
if (rc != 0):
error("never went un-pingable. Did VM restart?")
else:
rc = self.WaitTillRunning(args, "unknown", TIMEOUT_1) # running
# returns 0 only if VM is fully up and running, we have it's public IP
# and can ssh into it
return rc # 0: succcess, 1: failure
##############################################################################
# DeleteVM
#
# Deletes a VM and releases of it's resources other than the Network Security
# Group.
#
# Returns: 0 success, VM and all it's resource are gone
# 1 problems..
#
def DeleteVM(self, args):
''' delete the vm and all the pieces '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False):
return 1
self.Inform("DeleteVM")
# CSP_SpecificDeleteVM(args.vm_id, args.region)
rc = 0
time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep!
# CSP specific..
#
# Some CSP's may initiate deletling the VM and then immediatly
# return while all the work is still occuring in the DataCenter
#
# Since we don't want to return until the VM an all the pieces
# are fully deallocated, wait here. As usual, the status string we
# are looking for here may be CSP specific, not 'unknown'
if ( rc == 0 ):
rc = self.WaitForRunStatus(args, "unknown", TIMEOUT_1) # terminated
# CSP specific
#
# Some CSP's like azure and alibaba may have additional resouces like
# IP address or disks that need to be specificly deleted. Basiclly if we
# allocated them in Create, we probably need to deallocate them here
# CSP_Sepecific_Dealloc(stuff...)
# Is error handled ok? What if problems deleting? -- instance left around?
#
# This cleans out everything in the internal args file, so that user must
# fully specify any options on the next create. This is the easiest/safest
# way to make sure any CSP specific ID parmaters, like the VM id also
# get cleared... Really Big hammer, but squishes everything fairly
#
if (rc == 0): # successful so far?
self.Clean(args) # remove file with the persistent id, ip address, ..
self.m_args_fname = "" # clear name, so won't write back args when done
return rc # 0: succcess, 1: failure
##############################################################################
# CSP specific utility functions
#
# ShowRunning Shows all the account's running VM's
# GetRegions Returns proper list of regions
##############################################################################
##############################################################################
# ShowRunning
#
# CSP specific information function to print out the name, type, description
# and start time of all the running instances in the region
#
# Returns: 0 1 or more running instances were found in CSP's args.region
# 1 no running instances found
#
def ShowRunning(self, args):
''' Shows list of running instances within region of account '''
# CSP_SpecificShowRunning(args.region)
rc = 0
if (rc == 0):
output = []
output.append({ "InstanceId":"i-1234123412341234", "InstanceType":"invented.micro", "LaunchTime":"2018-02-29", "Description":"Fake image 1234" })
output.append({ "InstanceId":"i-5678567856785678", "InstanceType":"invented.biggee", "LaunchTime":"2018-02-30", "Description":"Fake image 5678" })
items = len(output)
lines_printed = 0
for idx in range(0, items):
print(" %-36s %-16s %10s \"%s\"" %
(output[idx]["InstanceId"],
output[idx]["InstanceType"],
output[idx]["LaunchTime"],
output[idx]["Description"]))
lines_printed += 1
if (lines_printed == 0):
print("No running instances found in %s" % args.region)
return 1
return 0
##############################################################################
# GetRegions
#
# Returns a list of regions where VMs can be created by this CSP.
#
# These are basiclly the names of the CSP's data centers... Each data center
# may offer different resoures. Don't care about that here. Just need the
# name.
#
# Used in a choice-list in the arg parser when user gives a non-default
# region name to catch invalid names before any real processing is done
#
# Returns: list of names
def GetRegions(self):
''' Returns a list of region names for the CSP '''
mylist = ["north", "south", "east", "west"]
return mylist
##############################################################################
# CSP specific baseclass override functions
#
# These are only here to make template class appear to work before it's
# attahed to the CSP. REMOVE THEM FOR FLIGHT and let real baseclass functions
# work
#
# Ping Pings the fake ip address
# Ssh SSH's into VM,
# WaitForPing Pings ip address, waits for ping to stop or start
# WaitTillCanSSH
#
# REMOVE ALL THESE BASECLASS OVERRIDE FUNCTIONS WHEN TALKING TO A REAL CSP
##############################################################################
def Ping(self, args):
''' fake ping into vm '''
time.sleep(1)
print("66 bytes from %s: icmp_seq=0 ttl=999 time=0.0 ms" % args.vm_ip)
print("66 bytes from %s: icmp_seq=1 ttl=999 time=0.0 ms" % args.vm_ip)
print("66 bytes from %s: icmp_seq=2 ttl=999 time=0.0 ms" % args.vm_ip)
return 0
def Ssh(self, args, doprint, argv):
''' fake SSH into instance, maybe running a command then returning '''
rc = 0
stdoutstr = "fake output"
stderrstr = ""
time.sleep(1)
# requested to print it? (do this for add-hoc cmds from user)
if doprint:
print stdoutstr
# return the values, so parser code can play with it
return rc, stdoutstr, stderrstr
def WaitForPing(self, args, state, timeout):
''' fake Attempts to Ping, or not to Ping VM, waits till get a response '''
time.sleep(1)
return (0)
def WaitTillCanSSH(self, args, sshcmd, timeout):
''' fake Spins till gets a ssh response from the VM '''
time.sleep(1)
return(0)
| ngc-examples-master | ncsp/template_funcs.py |
#!/usr/bin/python
# csp 3/22/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Top level generic CSP (Cloud Service Provider) interface
#
# Demonstrates how to use python to create a consistent interface
# across multiple different cloud providers. Shows code for creating, starting,
# deleting and setting up ssh commands and sessions in those VM's
#
# csp.py outermost command interface
# cpsbaseclass.py the main class, and CSP independent functions
# <csp>_funcs.py contains CSP specific code
#
# CSP interfaces are dynammicly added if they are found in same directory
# as this application. File name is "<csp>_funcs.py". Currently supported
# csp types are:
#
# ali Aliababa
# aws Amazon
# azure Microsoft Azure
#
# Basic commans:
# csp help # basic help, lists which csps currently exist
# csp <csp> createVM # create security groups and VM with default settings
# csp <csp> ssh [cmd...] # ssh's and optionally runs cmd on VM
# csp <csp> deleteVM # destroy VM
# csp <csp> test # simple timing test of all the major csp commands
# csp help # overall command help
# csp <csp> --help # csp specific help
#
# Configuration parameters, VM ids, IP address, .. etc are save in persistent
# file, and updated as needed. See showArgs and cleanArgs commands
#
# csp <csp> --trace [0..3] turns on some minimal command/return tracing
#
import argparse
import time
import sys
import os
from cspbaseclass import error, trace, trace_do, trace_setlevel, debug_stop
###############################################################################
# simple timing class
###############################################################################
class TimeClass:
''' simple timing class '''
def __init__(self, outer_loop_value):
self.m_test_start = self.Now();
self.m_log_data=[]
self.m_log_idx=0
self.m_instanceTypeName="no name set yet"
self.m_outer_loop_value=outer_loop_value # which outer-loop is this create/delete cycle
def SetInstanceTypeName(self, instanceTypeName):
self.m_instanceTypeName = instanceTypeName # we loose this before it's printed
def InstanceTypeName(self):
return(self.m_instanceTypeName)
def Now(self): # current time, as a floating point number
ts = time.time()
return(ts)
def Diff(self, te, ts): # te is end, ts is start -- what's the difference?
diff = te - ts;
return diff
def Start(self): # return start time
ts = self.Now()
return ts
def End(self, taskname, loop, ts): # called at end, given start time and name
te = self.Now()
diff = self.Diff(te, ts)
name = taskname
name += '.'
name += str(loop)
print "%2s %-20s %8.2f" % ("", name, diff)
# simple list containing "name" and "diff" fields
self.m_log_data.append((name, diff))
self.m_log_idx += 1
###############################################################################
# reporting functions
###############################################################################
def SummaryInit(self, my_class, args):
''' Summary initialization - conclusions/sums '''
self.m_test_end = self.Now();
self.m_test_diff = self.Diff(self.m_test_end, self.m_test_start) # overall test time
def SummaryReport(self, my_class, args):
''' Summary report to display at end of job'''
print ""
print "#---------------------------------------------------------"
print "# %s %s image:%s" % (my_class.m_class_name, args.instance_type, args.image_name)
print ("# loop %d of %d start/stop/del:%d %s\n" %
(self.m_outer_loop_value+1, args.outer_loop_cnt,
args.inner_loop_cnt,
time.strftime("%Y%b%d-%a-%H%M", time.localtime())))
print "#"
print ""
for idx in range(0, self.m_log_data.__len__()):
val = self.m_log_data[idx]
print "%2d %-20s %8.2f" % (idx, val[0], val[1])
print "%2s %-20s %8.2f" % ("", "overall", self.m_test_diff) # done after InitSummary called
print ""
def SummaryLog(self, my_class, args):
''' Summary log - intent is to easily cut/paste to spreadsheet table '''
with open(my_class.m_log_path + "test", "a") as f: # test summary file
f.write( "\n" )
f.write( "# %s loop %d of %d start/stop/del:%d\n" %
(my_class.m_class_name,
self.m_outer_loop_value+1, args.outer_loop_cnt,
args.inner_loop_cnt))
f.write( "%s\n" % time.strftime("%Y-%m-%d", time.localtime()))
f.write( "%s\n" % args.image_name)
f.write( "%s\n" % (args.instance_type))
for idx in range(0, self.m_log_data.__len__()):
val = self.m_log_data[idx]
f.write( "%.2f\n" % val[1])
f.write( "%.2f\n" %self.m_test_diff) # done only after InitSummary called
##############################################################################
# generic timing test, how long does it take to do basic VM features?
def time_test(my_class, outer_loop_value, args):
''' generic CSP vm create/stop/start/reset/delete timing test '''
my_time = TimeClass(outer_loop_value)
# create/get id for Network Security Group
ts = my_time.Start()
rc = my_class.CreateNSG(args)
my_time.End("createNSG", 0, ts)
if (rc != 0):
return rc
ts = my_time.Start()
rc = my_class.CreateVM(args) # args is from parser.parse_args(argv)
my_time.End("createVM", 0, ts)
if (rc != 0):
error ("createVM returned %d, stopping test" % rc)
return rc
# type of VM created - size, number of CPUs, GPUs... defined by name
my_time.SetInstanceTypeName(args.instance_type)
# start/stop/restart loops, default is 2
loop = 0 # initialize value if loop isn't run (loop_cnt = 0)
for loop in range(0, args.inner_loop_cnt):
ts = my_time.Start()
my_class.StopVM(args)
my_time.End("stopVM", loop, ts)
time.sleep(5)
ts = my_time.Start()
my_class.StartVM(args)
my_time.End("startVM", loop, ts)
time.sleep(5)
ts = my_time.Start()
my_class.RestartVM(args)
my_time.End("restartVM", loop, ts)
time.sleep(5)
# delete vm
ts = my_time.Start()
my_class.DeleteVM(args)
my_time.End("deleteVM", loop, ts)
# delete Security Group
time.sleep(5) # for alibaba, need a delay before trying to delete NSG
# immediatly after deleting the VM -- the deleteNSG fails
ts = my_time.Start()
my_class.DeleteNSG(args)
my_time.End("deleteNSG", loop, ts)
# delete the persistent information - VM/NSG id, name..
my_class.Clean(args)
# final report
my_time.SummaryInit(my_class, args) # caculate any conclusions..
if (args.summary_report != 0): # extra possiblly redundant
my_time.SummaryReport(my_class, args) # but nicely formatted user report
my_time.SummaryLog(my_class, args) # cut/pasteable format in log file
# successful return
return 0
# get_csp_list
#
# Returns the list of all the csp's that we support (I.E all the files that
# end with _funcs.py).
#
# internal function
def get_csp_list():
''' returns a list of supported csps -- not including 'template' '''
csp_list=[]
import glob
filelist = glob.glob(module_path + "*_funcs.py") # ['test1/ali_funcs.py', 'test1/azure_funcs.py', ...
for name in filelist:
pos0 = name.rfind("/")
pos1 = name.rfind("_funcs.py")
csp_name = name[pos0+1:pos1] # remove the _funcs.py" from it
if (csp_name != 'template'):
csp_list.append(csp_name)
return(csp_list)
# show_csps
#
# Returns the list of all the csp's that we support (I.E all the files that
# end with _funcs.py).
#
# List can be used by further scripting
#
# for csp_name in $(./ncsp csps); do ./ncsp $csp_name running; done
#
def show_csps():
''' returns a list of supported csps -- not including 'template' '''
csp_list = get_csp_list()
for csp_name in csp_list:
print("%s " % csp_name)
return 0
# prints command line usage
def usage(module_path):
''' program usage help text '''
print('''\
Nvidia Cloud Service Provider common simple scriptable interface
usage:
ncsp cmd [options]
ncsp <csp> csp_cmd [options]
cmd: top level csp-independent commands
help overall application help
csps lists supported csps
''')
# show the <csp>_func.py files that have in directory
import glob
filelist = glob.glob(module_path + "*_funcs.py") # ['test1/ali_funcs.py', 'test1/azure_funcs.py', ...
print(" csp: name of the supported Cloud Service Provider (csp)")
# special case for 'all'.
print(" %-23s %s" % ("ALL", "Runs command on all CSP's one after each other"))
# now the rest of the files.
for filename in filelist:
pos0 = filename.rfind("/")
pos1 = filename.rfind("_funcs.py")
csp_name = filename[pos0+1:pos1]
# pull quoted string after HELPTEXT= from the file
#
helptext=""
try:
with open(filename, "r") as f:
for i, line in enumerate(f):
if (i > 10):
break;
idx = line.find("HELPTEXT:");
if (idx >= 0):
start = line.find("\"", idx+9);
end = line.find("\"", start+1)
if (start > idx and end > start):
helptext=line[start+1:end-1]
break
except:
helptext="" # could not open file, don't report error
print(" %-23s %s" % (csp_name, helptext))
# rest of the menu
print('''
csp_cmd:
CSP specific commands:
createVM[opts] create instance, use -h to see csp specific options
stopVM stop current instance
startVM start current instance
restartVM restart current instance
deleteVM delete (stop first) and destroy instance
test create/stop/start/restart/delete timing test
ping simple ping VM if possible - check connection
ssh [cmd] ssh into current VM instance, run command if given
status status of current instance
show verbose info about instance
Network Security Group commands:
createNSG [opts] creates network security group
deleteNSG deletes network security group
showNSGs shows all network security groups
CSP Query commands:
regions displays list of region names supported by csp
running display list of running instances in a region
General commands
validCSP returns 0 if csp name is supported, 1 elsewise
ip prints the ip value of the VM
args display persistent args file
clean clean cached files, restore args to defaults
help
--help csp specific argument help
''')
sys.exit(1)
def add_common_options(my_class, parser):
''' common arguments used in outer control and CSP sepecific features '''
parser.add_argument('--version', action='version', version="%(prog)s 0.0")
parser.add_argument('--trace', dest='trace', type=int, choices=xrange(0,4),
default=0, required=False,
help='trace level flag: 0:none, 1:cmd, 2:+info, 3:+output')
parser.add_argument('--inner_loop_cnt', dest='inner_loop_cnt', type=int, choices=xrange(0, 6),
default=2, required=False,
help='inner stop/start/reset test loops run')
parser.add_argument('--outer_loop_cnt', dest='outer_loop_cnt', type=int, choices=xrange(0, 6),
default=1, required=False,
help='outer over-all create/delete loops run')
parser.add_argument('--summary_report', dest='summary_report', type=int, choices=xrange(0, 2),
default=1, required=False,
help='show summary report at end of test')
# some computed defaults used for VM
my_user = os.environ["USER"];
my_vm_name = my_user + time.strftime("-%a-%Y%b%d-%H%M%S", time.localtime())
my_vm_name = my_vm_name.lower() # gcp (gcloud) wants all lower case names
my_nsg_name = my_user + "NSG" # for NetworkSecurity Group
# common VM arguments -- do it here so don't have to set up these args
# for every CSP. Gives them default values of "" so know if they are created or not
# CSP code can override any of these with parser.set_defaults(key);
parser.add_argument('--user', dest='user', # overridden in CSP specific code
default=None, required=False,
help='username for the VM')
parser.add_argument('--vm_name', dest='vm_name', # Name of VM
default=my_vm_name, required=False,
help='external name of the VM')
parser.add_argument('--vm_id', dest='vm_id', # set in CSP specific code
default=None, required=False,
help='id value of the VM')
parser.add_argument('--nsg_name', dest='nsg_name', # common: Name of Network Security Group
default=my_nsg_name, required=False,
help='Network Security Group Name')
parser.add_argument('--nsg_id', dest='nsg_id', # set in CSP specific code
default="", required=False,
help='Network Security Group ID')
parser.add_argument('--key_name', dest='key_name', # overridden in CSP specific code
default=None, required=False,
help='ssh key name')
parser.add_argument('--key_path', dest='key_path', # common: where ssh key files reside
default="~/.ssh/", required=False,
help='directory where ssh key files reside')
parser.add_argument('--key_file', dest='key_file', # computed in CSP specific code
default=None, required=False,
help='full path to ssh key file')
parser.add_argument('--image_name', dest='image_name', # overridden in CSP specific code
default=None, required=False,
help='name of the VM image to run')
parser.add_argument('--image_id', dest='image_id', # set in CSP specific code
default=None, required=False,
help='ID of the VM image to run')
parser.add_argument('--pingable', dest='pingable', # ping feature is optional to most VM network config
type=int, choices=xrange(0,2),
default=0, required=False, # default is not pingable
help='set to 1 if can ping IP address')
parser.add_argument('--ip', dest='vm_ip', # set in CSP specific dode
default="", required=False,
help='VM IP address')
# process_cmd
#
# command line processor - a big case statement
# see https://www.pydanny.com/why-doesnt-python-have-switch-case.html
#
# my_class is the CSPBaseClass, while argv are the additonal command line
# arguments that were passed in. This function is the top level command
# line parser for all the CSPs - this code is generic across all of them
#
# The 'createVM', 'stopVM' and the like functions are csp sepecific to change
# the state of a VM, and gather the proper IP address and set up the security
# rules.
#
# Commands like 'ssh', 'ping' use the IP address that was saved and allow
# access to that VM
#
#
#
def process_cmd(my_class, argv):
# first thing, verify that the connection to the CSP is up and
# running correctly (cli app downloaded, user logged in, etc...)
rc = my_class.CSPSetupOK() # csp name dependent function
if (rc != 0):
error("CSP \"%s\" access is not configured correctly, set it up first" % my_class.ClassName())
return rc # unhappy
# create the main command line argument parser class
parser = argparse.ArgumentParser(prog='csp',
description='CSP simple python interface for %s' % my_class.ClassName())
# common options arguments
add_common_options(my_class, parser)
# add in positional arguments
parser.add_argument('command', help="command to execute, run 'help' for details")
parser.add_argument('arguments', help="optional csp specific args run '-h' for details",
nargs=argparse.REMAINDER)
# class specific arguments
my_class.ArgOptions(parser) # csp dependent function
# update the defaults with values saved in file if that file exists
my_class.ArgRestoreFromFile(parser)
# actual argument parser, and any CSP class specific checks
# 'args' here contains all the argument and option values in this order
#
# 1) hardcoded defaults in arg-command, or programaticly determined
# 2) overridden by any value specifed in the saved args from last run (if saved)
# 3) overridden by any values specified on command line ]
#
# Then the command is run
#
# Then, At very end of this function, if commands were successful all the
# option values and computed/inquired values like CSP ID values are written
# back to a file -- to be picked up in #2 above.
args = parser.parse_args(argv)
# set global value used for trace level, as 'args' isn't passed around everywhere
trace_setlevel(args.trace)
# CSP class specific arg checks,
# bail here if something isn't set correctly
rc = my_class.ArgSanity(parser, args)
if (rc != 0):
error("In ArgSanity rc:%d" % rc)
return(rc)
# this is the command that is to be run, pull from the args
cmd = args.command
# commands to handle the persistent arg list --
if cmd == "clean":
my_class.Clean(args) # cleans out args an other cached files
return 0
elif cmd == "args":
my_class.ArgShowFile()
return 0
elif cmd == "help":
usage(my_class.m_module_path)
return 1
# print args if higher trace level
if (trace_do(2)):
print vars(args)
print "============"
print "cmd=%s" % cmd
rc = 0 # return value if forget to set below
# parse the commands
if cmd == "validCSP":
rc = 0 # invalid CSP name errors out above
elif cmd == "createNSG":
rc = my_class.CreateNSG(args)
elif cmd == "deleteNSG":
rc = my_class.DeleteNSG(args)
elif cmd == "showNSGs":
rc = my_class.ShowNSGs(args)
elif cmd == "createVM":
rc = my_class.CreateVM(args) # args is from parser.parse_args(argv)
elif cmd == "startVM":
rc = my_class.StartVM(args)
elif cmd == "stopVM":
rc = my_class.StopVM(args)
elif cmd == "restartVM":
rc = my_class.RestartVM(args)
elif cmd == "deleteVM":
rc = my_class.DeleteVM(args)
elif cmd == "ssh":
rc, stdoutstr, stderrstr = my_class.Ssh(args, True, argv[1:]) # args is historical and incl
elif cmd == "ping":
rc = my_class.Ping(args)
elif cmd == "status":
rc = my_class.Status(args)
elif cmd == "show":
rc = my_class.Show(args)
elif cmd == "boottime":
rc, kernel, user, total = my_class.KernelBootTime(args)
if (rc == 0):
print ("kernel:%s user:%s total:%s" % (kernel, user, total))
elif cmd == "running":
rc = my_class.ShowRunning(args)
elif cmd == "regions":
rc = my_class.ShowRegions(args)
elif cmd == "ip":
rc = my_class.ShowIP(args)
elif cmd == "test": # default is 1 outer create/delete loop
if (args.outer_loop_cnt <= 0):
error("outer_loop_cnt=0, no tests run")
else:
for loop in range(0, args.outer_loop_cnt):
rc = time_test(my_class, loop, args)
if (rc != 0):
break
time.sleep(30) # time between loops
if (rc != 0):
error("Test returned %d" % rc)
else:
error("Undefined command", cmd)
usage(my_class.m_module_path)
rc = 1
# save all the persistent args values to file after the above commands have
# run and modified them -- like the VM or SecurityGroup IDs
if (cmd != "DeleteVM"):
my_class.ArgSaveToFile(args)
if rc == None: # handle "None" return case -- should be an error?
error("No return code for cmd \"%s\"" % cmd)
rc = 2
return rc # exit code
###############################################################################
# do_csp_cmd
#
# Major magic of the code..
#
# dynamically based on the csp name, load a module "<csp>_funcs.py"
# and create its main class instance. This csp specific file will
# be in the same directory as the main module.
#
# To add a new CSP, simply create a csp-specific file of the given
# "csp".py name with interfaces that are same as the other examples
# and drop it into the directory with the other csp-specific files
#
# NOTE: find_module() does not handle dotted package names,
# so keep the file structure simple
#
# See: https://pymotw.com/2/imp/ (1/2018)
#
def do_csp_cmd(csp, argv):
''' import csp dependent class based on name, and run command on it '''
import imp
module_name = "%s_funcs" % csp
try:
f, filename, description = imp.find_module(module_name)
package = imp.load_module(module_name, f, filename, description)
my_class = package.CSPClass(csp, module_path)
except ImportError, err:
print "Error: CSP \"%s\" not supprted: %s" %(csp, err)
sys.exit(1) # unhappy return
# process the command line arguments on class (does all the work)
rc = process_cmd(my_class, sys.argv[2:])
return rc
###############################################################################
# main body of nsp application. Code starts here.
#
# Loads the csp specific csp module and does the work
#
# argv[0] is the full path to the prog name -- from it we can get
# the path where our modules will be, used for search later
try:
pos = sys.argv[0].rfind("/")
module_path = sys.argv[0][0:pos+1]
except:
module_path = sys.argv[0]
if (sys.argv.__len__() == 1): # no arguments, print usage
usage(module_path)
# if we have one arg, it's probably the csp name, but there are
# few special options like 'help' or 'csps' that are also allowed
arg1=sys.argv[1] # our csp name, like "aws",
if (arg1 == "help" or arg1[0:1] == '-'): # be nice if user is confused
usage(module_path) # usage exits, does not return
elif (arg1 == "csps"): # list all known CSP classes
rc = show_csps()
sys.exit(rc)
# from here on out, we are doing a CSP depenent function -- so
# need at least one more argument beyond the CSP name
csp = arg1 # name of the csp are we talking about
if (sys.argv.__len__() <= 2):
usage(module_path) # not enough args, exit with usage
# from here on, the argument list starts with the 2nd value
argv=sys.argv[2:]
# if csp is 'all', then run the given command on all of csp's that are
# active (don't complain about those CSP's that fail the CSPSetupOK test)
# Also don't run 'template' class -- we want the good stuff here
if (csp == "ALL"):
csp_list = get_csp_list()
for csp in csp_list:
rc = do_csp_cmd(csp, argv)
else:
# single csp is given -- run it.
# parse the rest of the command line and run it on the given CSP
rc = do_csp_cmd(csp, argv)
sys.exit(rc)
| ngc-examples-master | ncsp/ncsp.py |
# aws_funcs.py 3/23/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Amazon (aws) Cloud Service Provider specific functions
#
# HELPTEXT: "Amazon Cloud Service Provider"
#
import json
import time
import sys
from cspbaseclass import CSPBaseClass
from cspbaseclass import Which
from cspbaseclass import error, trace, trace_do, debug, debug_stop
import cmd
##############################################################################
# some Amazon aws defaults values that will vary based on users
#
# default_key_name: User will need to create their own security key and
# specify it's name here.
# region: The Alibaba region that they wish to run in. Note that
# GPU instances might not be avaliable at all sites
# user: Login user name for instance. May be hardcoded by ISP
# based on the image_name being selected.
##############################################################################
default_key_name = "my-security-key-name"
default_region = "my-region-name"
default_user = "my-user-name"
##############################################################################
# What image and instance type to bring up.
#
# default_image_name: Name of OS template that instance will be created with
# default_instance_type: The default name that defines the memory and cpu sizes
# and the gpu types for the instance. Changes
# default_choices: Avaliable instance types that user can select with
# This will probably be different per region, and will
# continue to change over time. Used as a pre-check in
# command parser to verify choice before sending to csp
##############################################################################
if (False): # non gpu version - quick non-gpu testing
default_image_name = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server*"
default_instance_type = "t2.micro"
default_choices = ['t2.micro']
else: # aws marketplace has nvidia volta image locked to only running on aws p3 type boxes
default_image_name = "NVIDIA Volta Deep Learning AMI*"
default_instance_type = "p3.2xlarge" # 1gpu, 4gpu and 8gpu instances
default_choices = ['p3.2xlarge', 'p3.8xlarge', 'p3.16xlarge']
TIMEOUT_1 = (60 * 4) # create, start, terminate
TIMEOUT_2 = (60 * 4) # stop, ping
##############################################################################
# CSPClass
#
# Cloud Service Provided primitive access functions
##############################################################################
class CSPClass(CSPBaseClass):
''' Cloud Service Provider Class '''
##############################################################################
# CSPSetupOK
#
# checks to see that user has ability to create and minipulate VM's on this
# CSP. Want to check that up front, instead of later when actually talking
# to the CSP.
#
# does things like verifing that the CLI is installed on the machine, and
# whatever else is quick and easy to check
#
# Should also check to see that can actually connect with the CSP provider
# (network connection) and that network is reliable.
#
def CSPSetupOK(self):
''' quick check to verify amazon aws command line interface is installed '''
fullpath = Which("aws") # does cli application exist?
if (fullpath == None):
return 1 # error, cli app not found
else:
# TODO: verify network connection to CSP
# TODO: check login setup correctly
return 0
##############################################################################
# ArgOptions
#
# aws specific argument parser. This extends or overrides default argument
# parsing that is set up in ncsp.py/add_common_options() function
#
# All arguments set up here and in the common code are saved/restored from
# the csp specific args file. See my_class.ArgSave/RestoreFromFile(parser)
# in the base class for implementation.
#
def ArgOptions(self, parser):
''' Aws specific option parser '''
region_list = self.GetRegionsCached()
parser.add_argument('--region', dest='region',
default=default_region, required=False,
choices=region_list, # regions change, queried outpu
help='region in which to create the VM')
parser.add_argument('--instance_type', dest='instance_type', # 'size' on azure, use 'instance-type' as common name
default=default_instance_type, required=False,
choices=default_choices,
help='VM instance (type) to create')
parser.add_argument('--vpcid', dest='vpcid',
default=None, required=False,
help='aws VPC id')
# these override the common/default values from add_common_options
# with this csp's specific values
parser.set_defaults(image_name=default_image_name);
parser.set_defaults(key_name=default_key_name)
parser.set_defaults(user=default_user);
# ping-ability makes starting/stopping more traceable, but this
# features is disabled by default, and explicidly needs to be
# enabled in the Networks Security Group -- see ICMP option
parser.set_defaults(pingable=1) # aws instances we created support pings (alibaba not)
###########################################################################
# ArgSanity
#
# aws class specific argument checks, Called after the argument parser has run
# on the user options as a hook to verify that arguments are correct
#
# 'parser' is the structure returned from argparse.ArgumentParser()
#
# Returns 0 success
# 1 something is wrong, stop
#
def ArgSanity(self, parser, args):
''' AWS Arg sanity checking '''
# print args
return 0 # do nothing for now
###########################################################################
# GetRunStatus
#
# Returns the running status of the instance as a string, like 'running'
# 'terminated', 'pending' etc.. This will be somewhat the same across
# all CSPs, but since it comes from them you should not depend upon
# an exact value out of CSP specific code
#
# Returns: string describing state
#
def GetRunStatus(self, args):
''' Returns running-state of instance from describe-instance-status '''
if (self.CheckID(args) == False):
return 1
cmd = "aws ec2 describe-instance-status"
cmd += " --instance-id %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
decoded_output = json.loads(output)
state = decoded_output['InstanceStatuses']
if state.__len__() > 0:
run_state = decoded_output['InstanceStatuses'][0]['InstanceState']['Name']
else:
# if describe-instance-status is empty for stopped states, use more detailed call
cmd = "aws ec2 describe-instances"
cmd += " --instance-id %s" % args.vm_id
retcode, output, errval = self.DoCmd(cmd)
if (retcode == 0):
decoded_output = json.loads(output)
anyinfo = decoded_output['Reservations']
if anyinfo.__len__() > 0:
run_state = decoded_output['Reservations'][0]['Instances'][0]['State']['Name']
else:
run_state = "terminated" # doesn't exist any longer
# return the value, should be something like "running" or "pending" or ""
self.Inform(run_state)
return(run_state);
# From image file name, Find the WID of the AMI instance that will be loaded
#
# Get the image ID of the "NVIDIA Volta(TM) Deep Learning AMI" that we created.
# Note that currently (10/2017) the ID of this image changes whenever we update
# the image. This query here does a name-to-id lookup. The name should remain constant.
def GetImageId(self, args):
cmd = "aws ec2 describe-images"
cmd += " --region %s" % args.region
cmd += " --filters Name=name,Values=\"%s\"" % args.image_name
retcode, output, errval = self.DoCmd(cmd)
if (retcode != 0):
error(errval)
sys.exit(1) # fail to get name, exit script
# decode the JSON output
decoded_output = json.loads(output)
# print json.dumps(decoded_output, indent=4, sort_keys=True)
args.image_id = decoded_output['Images'][0]['ImageId'] # ami-8ee326f6
return(0)
###########################################################################
# GetIPSetupCorrectly
#
# Called after the instance is created in order to get if needed, and
# verify that a public IP address has been returned for the VM
#
# Some CSP's, like azure and alibaba return the IP address from another
# function that sets up the public IP. This needs to be called in the
# CreateVM function in that case.
#
# Other CSP's like aws, return the IP address for you "free" of charge
# as part of the instance information for the VM. This might be returned
# only after the VM creation has been completed.
#
# This function is genericly called after the VM has been found to be
# running, to either simply verify that we have a valid IP address in
# the first case above, or to ask the CSP for it and then verify it
# in the second case.
#
# public IP value will be in args.vm_id
#
# This function can do other cross-checks to validate other setups like
# checking if the SSH key-name returned from the CSP is the same as we
# sent it. Checks like this are optional, but highly desirable.
#
# Returns: 0 success
# 1 fails, invalid IP or can't get it
#
def GetIPSetupCorrectly(self, args):
''' called after 'running' status to get IP. Does nothing for Alibaba '''
# On aws, IP address change across stop/start cases.
#
# get full description of the instance json record - large
# from this we can get the public IP address of the instance
cmd = "aws ec2 describe-instances"
cmd += " --instance-id %s" % args.vm_id
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd)
# this return json structure from 'describe-instances' has about 50 values
# in it that, as the command says, describes the instance. Only need a few
# of them here.
decoded_output = json.loads(output)
args.vm_ip = decoded_output['Reservations'][0]['Instances'][0]['PublicDnsName']
key_name = decoded_output['Reservations'][0]['Instances'][0]['KeyName' ]
debug(1, "ip: %s keyname: \"%s\"" % (args.vm_ip, key_name))
# name of SSH keyfile was sent to Create function when VM was built, and we
# get a chance to read it back here. Parinoid check to verify that it is
# the same. This should never happen, but check for safety
if (key_name != args.key_name): # cross-check
error ("args.key_name:\"%s\" != version vm thinks its using:\"%s\"", args.key_name, key_name)
return 1
return 0
##############################################################################
# CSP specific Network Security Group Functions
#
# ShowSecurityGroups Displays NSG (network security groups) in region
# ExistingSecurityGroup Does NSG exist?
# CreateSecurityGroup Creates a NSG from a name, and adds rules
# DeleteSecurityGroup Deletes a NSG
##############################################################################
##############################################################################
# ShowSecurityGroups
#
# This function shows basic information about your account's security groups
# for your region.
#
# Intended to be informative only, as each CSP will probably supply different
# type of information.
#
# Returns: 0 one or more Netwroks Security Groups found in region
# 1 error, or no NSG's defined in region
#
def ShowSecurityGroups(self, args):
''' Displays all current security groups '''
cmd = "aws ec2 describe-security-groups " # build the AWS command to create an instance
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0): # check for return code
error ("Problems describing security groups")
return 1
decoded_output = json.loads(output)
items = len(decoded_output["SecurityGroups"]) # number of security groups
# trace(2, json.dumps(decoded_output["SecurityGroups"][0], 4, sort_keys = True))
# returns a list of security groups. display them
for idx in range(0, items):
print "%2d %-12s \"%s\" \"%s\"" % (idx,
decoded_output["SecurityGroups"][idx]["GroupId"],
decoded_output["SecurityGroups"][idx]["GroupName"],
decoded_output["SecurityGroups"][idx]["Description"])
return 0
##############################################################################
# ExistingSecurityGroup
#
# Given a name of a security group in args.nsg_name, this function sees
# if it currently exists on the CSP
#
# This entire application is written assuming that once a security group is
# created, it doesn't need to really change much for the lifetime of the
# universe. Therefor we don't delete them unless specificly asked for
#
# The purpose of this function is to decide if we need to create a Network
# Security Group, or to return the id of that existing group in args.nsg_id
#
# Returns: 0 if security group args.nsg_name currently exists and is valid
# 1 need to create a group
#
def ExistingSecurityGroup(self, args):
''' Does the security group name currently exist ? get it if it does'''
trace(2, "\"%s\"" % (args.nsg_name))
if (args.nsg_name == "" or args.nsg_name == None or args.nsg_name == "None"):
error("NetworkSecurityGroup name is \"%s\"" % args.nsg_name)
return 1
# Is there a better way to do this than to pull in the entire dictionary
# and iterate through the keys?
cmd = "aws ec2 describe-security-groups " # build the AWS command to create an instance
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0): # check for return code
error ("Problems describing security groups")
return 1
decoded_output = json.loads(output)
# number of security groups
items = len(decoded_output["SecurityGroups"]) # number of security groups
# slow search for name
for idx in range(0, items):
if (decoded_output["SecurityGroups"][idx]["GroupName"] == args.nsg_name):
args.nsg_id = decoded_output["SecurityGroups"][idx]["GroupId"]
debug(2, "%2d %-12s \"%s\"" % (idx,
decoded_output["SecurityGroups"][idx]["GroupId"],
decoded_output["SecurityGroups"][idx]["GroupName"]))
return 0 # found it
# returns 1 if did not find security group
trace(2, "Did not find security group: \"%s\"" % args.nsg_name)
return 1
##############################################################################
# CreateSecurityGroup
#
# Creates a full network security group by the name of args.nsg_name, saves the
# value in args.nsg_id
#
# Any additional rules required for the security group to set up ssh, ssl and
# ping are added to the group here before it is returned.
#
# If the CSP has object-taging feature, the new security group should be
# tagged with a unique name so it can be identified later.
#
# IMPORTANT: if you can create a rule to make the VM pingable (a good thing
# for initial development), be sure to call following in ArgOptions
# so that the ping feature will be used when needed by this app
#
# "parser.set_defaults(pingable=1)"
#
def CreateSecurityGroup(self, args):
''' creates security group. saves it in args.nsg_id '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
# Get the users VPC id if we don't have it
if (args.vpcid == "" or args.vpcid == None or args.vpcid == "None"):
cmd = "aws ec2 describe-vpcs"
cmd += " --region %s" % args.region
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0):
return retcode
decoded_output = json.loads(output)
debug(2, json.dumps(decoded_output, indent=4, sort_keys=True))
args.vpcid = decoded_output["Vpcs"][0]["VpcId"]
debug(1, "args.vpcid <--- %s" % args.vpcid)
# create the security group, with a meaningful description
desc = "NSG Generated for %s" % args.vm_name
cmd = "aws ec2 create-security-group"
cmd += " --group-name %s" % args.nsg_name
cmd += " --description \"%s\"" % desc
cmd += " --vpc-id %s" % args.vpcid
cmd += " --region %s" % args.region
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0): # check for return code
return retcode
# get the groupid of the new security group
decoded_output = json.loads(output)
debug(2, json.dumps(decoded_output, indent=4, sort_keys=True))
args.nsg_id = decoded_output["GroupId"]
debug(1, "args.nsg_id <--- %s" % args.nsg_id)
# tag new group with our group name
cmd = "aws ec2 create-tags"
cmd += " --resource %s" % args.nsg_id
cmd += " --tags Key=Name,Value=%s" % args.nsg_name
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0): # check for return code
return retcode
# Security rules -- make a list of ingress and outgress rules - easy to change
# slow, but this code is rarely used. understandability is more important
ingress = {}
ingress[0] = {"IpProtocol":"tcp", "ToPort":22, "FromPort":22, "CidrIp":"0.0.0.0/0", "Description":"For SSH" }
ingress[1] = {"IpProtocol":"tcp", "ToPort":443, "FromPort":443, "CidrIp":"0.0.0.0/0", "Description":"For SSL" }
ingress[2] = {"IpProtocol":"tcp", "ToPort":5000, "FromPort":5000,"CidrIp":"0.0.0.0/0", "Description":"For NVIDIA DIGITS6" }
ingress[3] = {"IpProtocol":"icmp","ToPort":-1, "FromPort":8, "CidrIp":"0.0.0.0/0", "Description":"To allow to be pinged" }
egress = {}
outer_retcode = 0
for idx in range(0, len(ingress)):
self.Inform("CreateNSG rule %s.%s" % (args.nsg_name, ingress[idx]["Description"]))
cmd = "aws ec2 authorize-security-group-ingress"
cmd += " --group-id %s" % args.nsg_id
cmd += " --ip-permissions '[{" # mini-embedded json like
cmd += " \"IpProtocol\":\"%s\"," % ingress[idx]["IpProtocol"]
cmd += " \"ToPort\":%s," % ingress[idx]["ToPort"] # KEEP 'To' before 'From' - no effect for tcp, but
cmd += " \"FromPort\":%s," % ingress[idx]["FromPort"] # required for how Wildcard ICMP type is defined
cmd += " \"IpRanges\": [{"
cmd += " \"CidrIp\":\"%s\"," % ingress[idx]["CidrIp"]
cmd += " \"Description\":\"%s\"" % ingress[idx]["Description"]
cmd += " }]"
cmd += " }]'"
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0):
outer_retcode = retcode # keep any non-zero return code
# egress rules -- as of 1/2018 there arn't any...
return outer_retcode
##############################################################################
# DeleteSecurityGroup
#
# Delets the security group specified at args.nsg_id, and clears that value
#
# If group Rules attached to the NSG need to be individually deleted, that
# must also be done here if not done automaticly by the CSP
#
def DeleteSecurityGroup(self, args):
''' deletes the security group '''
trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id))
if (args.nsg_id == None):
error("NSG %s already deleted", args.nsg_name)
return(1)
cmd = "aws ec2 delete-security-group"
cmd += " --group-id %s" % args.nsg_id
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0): # check for return code
return retcode
args.nsg_id = None # remove id from args
return(0)
##############################################################################
# CSP specific VM functions
#
# CreateVM Creates a complete fully running VM
# StartVM Starts a VM if it was stopped, returns running
# StopVM Stops the VM if it is currently running
# RestartVM Resets VM, may not quite be same as Stop/Start
# DeleteVM Removes from the CSP a running or stopped VM
##############################################################################
##############################################################################
# CreateVM
#
# Creates a new VM, and returns when it is fully running.
#
# Note that due to simple way that this code saves it's peristent
# data (the id, user name, ... ), only 1 instance can be created
# at a time. Nothing preventing multiple VM's other than way to save/reference
# the id values. The CSPClass.Delete function removes the saved references
#
# The "args" option specify the CSP specific name, disk size, instance type,
# or any other parameter required to fully define the VM that is to be created
#
# Before creating the VM, effort is made to verify that all the supplied
# parameters, such as the SSH key name are valid.
#
# Network Security Group (NSG) is created if needed.
#
# Returns: 0 successful, VM fully created, up and ssh-able
# 1 failure, VM not created for one of many possible reasons
#
def CreateVM(self, args):
''' Creates a new VM. 'args' holds parameters '''
if (args.vm_id != "None" and args.vm_id != None):
error("Instance \"%s\" already exists, run 'deleteVM' first, or 'clean' if stale arg list" % args.vm_id)
return 1
args.vm_ip = "" # make sure IP address is clear
# ssh key file, builds path from options, checks existance
retcode = self.CheckSSHKeyFilePath(args, ".pem")
if (retcode != 0):
return(retcode)
# security group, create if neeeded, does nothing if already exists
# consider moving this step outside this VM create so that better
# reflects real VM timing?
self.Inform("CreateNSG")
if (self.CreateNSG(args) != 0): # sets args.nsg_id
return 1
trace(2, "nsg_id: \"%s\" %s" % (args.nsg_name, args.nsg_id))
# look up image-name, return region specific image id
self.Inform("GetImageId")
if (self.GetImageId(args) != 0):
return 1
trace(2, "image_id: \"%s\" %s" % (args.image_name, args.image_id))
# with security group and image id, we can now create the instance
self.Inform("run-instances")
cmd = "aws ec2 run-instances" # build the AWS command to create an instance
cmd += " --image-id %s" % args.image_id # aws image identifer via self.GetImageid()
cmd += " --instance-type %s" % args.instance_type # t2.micro
cmd += " --region %s" % args.region # us-west-2
cmd += " --key-name %s" % args.key_name # my-security-key
cmd += " --security-group-ids %s" % args.nsg_id # Security Group
retcode, output, errval = self.DoCmd(cmd) # call the AWS command
if (retcode != 0): # check for return code
error ("Problems creating VM \"%s\"" % args.vm_name)
return 1 # nothing to delete, can return
# decode the JSON output
decoded_output = json.loads(output) # convert json format to python structure
trace(3, json.dumps(decoded_output, indent=4, sort_keys=True))
args.vm_id = decoded_output['Instances'][0]['InstanceId']
args.vm_ip = "" # don't have IP we see it running
# Name your instance! . Done here instead of in run-instances call
# it's tricky in bash to get space/qoutes right, at least in original bash code where
# this was orginally written.. :-)
self.Inform("create-tags")
cmd = "aws ec2 create-tags"
cmd += " --resource %s" % args.vm_id
cmd += " --tags Key=Name,Value=%s" % args.vm_name # unique time-stamped name
retcode, output, errval = self.DoCmd(cmd)
# wait till the instance is up and running, pingable and ssh-able
if (retcode == 0):
retcode = self.WaitTillRunning(args, "running", TIMEOUT_1)
# save vm ID and other fields setup here so don't use them if error later
self.ArgSaveToFile(args)
debug(2, "createVM returning %d" % retcode)
return retcode # 0: succcess, 1: failure
##############################################################################
# StartVM
#
# Starts a Stopped VM, returns it in a fully running state, where we have
# the correct IP address if it changed, and can ssh into the VM
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be started, or invalid ID supplied
#
def StartVM(self, args):
''' Starts the VM '''
rc = 1 # assume error
if (self.CheckID(args) == False):
return 1
# get run status and check current state
status = self.GetRunStatus(args)
if (status == "running"):
return 0 # already running, simply return
elif (status == "stopping"):
buf = "%s is in %s state, can't start running now" % (args.vm_id, status)
error(buf)
elif (status == "stopped" or status == "null"):
rc = 0 # ok to proceed
else:
buf = "id %s is in \"%s\" state, not sure can start running" % (args.vm_id, status)
error(buf)
if (rc != 0):
return rc # unexpected status
self.Inform("StartVM")
# start the VM
cmd = "aws ec2 start-instances"
cmd += " --instance-id %s" % args.vm_id
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd)
if (retcode == 0):
rc = self.WaitTillRunning(args, "running", TIMEOUT_1)
return rc # 0: succcess, 1: failure
##############################################################################
# StopVM
#
# Stops a running VM. No persistent resouces are deallocated, as it's expected
# that the VM will be started again.
#
# Note that most CSP's will continue to charge the customer for the allocated
# resources, even in a Stopped state.
#
# Returns: 0 VM fully stopped
# 1 unable to stop VM. May be invalid ID or connection to CSP
#
def StopVM(self, args):
''' Stop the VM '''
if (self.CheckID(args) == False):
return 1
retcode = self.CheckRunStatus(args, "running")
if (retcode != 0):
error ("Not running")
return retcode
self.Inform("StopVM")
cmd = "aws ec2 stop-instances"
cmd += " --instance-id %s" % args.vm_id
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd)
if (retcode == 0):
status = self.GetRunStatus(args)
# The instance becomes "Stopping" after a successful API request,
# and the instance becomes "Stopped" after it is stopped successfully.
if (status != "stopping"):
error("Asked VM to stop, but status = \"%s\"" % (status))
retcode = 1
else:
retcode = self.WaitForRunStatus(args, "stopped", TIMEOUT_2)
return retcode # 0: succcess, 1: failure
##############################################################################
# RestartVM
#
# This function restarts a currently running VM
#
# Returns with the VM in a fully running state, where we have it's public IP
# address and can ssh into it
#
# Returns: 0 successful, VM up and ssh-able
# 1 failure, VM not able to be reset, or invalid ID supplied
#
def RestartVM(self, args): # also known as 'reboot' on aws
''' Restarts the VM '''
if (self.CheckID(args) == False):
return 1
retcode = self.CheckRunStatus(args, "running")
if (retcode != 0):
error ("Not running")
return retcode
self.Inform("RestartVM")
cmd = "aws ec2 reboot-instances"
cmd += " --instance-id %s" % args.vm_id
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd)
# on aws after "reset", the status never becomes "un-running"
# anytime durring the reset procss -- so we check when it FAILS
# to ping to know if estart actually occured. Then we simply wait
# till it's back up again - pingable and ssh-able to know it's
# running
if (retcode == 0):
if (args.pingable == 1):
retcode = self.WaitForPing(args, False, TIMEOUT_2)
else:
time.sleep(5) # let VM go down enough so SSH stops (we hope)
retcode = 0 # fake success, since ping isn't supported
if (retcode != 0):
error("never went un-pingable. Did VM restart?")
else:
retcode = self.WaitTillRunning(args, "running", TIMEOUT_1)
return retcode # 0: succcess, 1: failure
##############################################################################
# DeleteVM
#
# Deletes a VM and releases of it's resources other than the Network Security
# Group.
#
# Returns: 0 success, VM and all it's resource are gone
# 1 problems..
#
def DeleteVM(self, args):
''' delete the vm and all the pieces '''
if (self.CheckID(args) == False):
return 1
cmd = "aws ec2 terminate-instances"
cmd += " --instance-id %s" % args.vm_id
cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd)
if ( retcode == 0 ):
retcode = self.WaitForRunStatus(args, "terminated", TIMEOUT_1)
# Is error handled ok? What if problems deleting? -- instance left around?
if (retcode == 0): # successful so far?
self.Clean(args) # remove file with the persistent id, ip address, ..
self.m_args_fname = "" # clear name, so won't write back args when done
return retcode # 0: succcess, 1: failure
##############################################################################
# CSP specific utility functions
#
# ShowRunning Shows all the account's running VM's
# GetRegions Returns proper list of regions
##############################################################################
##############################################################################
# ShowRunning
#
# CSP specific information function to print out the name, type, description
# and start time of all the running instances in the region
#
# Returns: 0 1 or more running instances were found in CSP's args.region
# 1 no running instances found
#
def ShowRunning(self, args):
''' Shows list of running instances within region of account '''
lines_printed = 0
cmd = "aws ec2 describe-instances"
# cmd += " --region %s" % args.region # us-west-2
retcode, output, errval = self.DoCmd(cmd)
if ( retcode == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output["Reservations"]) # number of security groups
for idx in range(0, items):
tagname = "No 'Name' tag provided to identify instance" # expect the worse
state = decoded_output["Reservations"][idx]["Instances"][0]["State"]["Name"]
if (state == "running"):
try: # may not exist, and may be multiple tags...
tags = decoded_output["Reservations"][idx]["Instances"][0]["Tags"]
tlen = len(tags)
for tidx in range(0, tlen):
if (tags[0]["Key"] == "Name"):
tagname = tags[0]["Value"]
break;
except:
dummy = 1
if (lines_printed == 0):
print("# %s:" % self.m_class_name )
print(" %-36s %-16s %10s \"%s\"" %
(decoded_output["Reservations"][idx]["Instances"][0]["InstanceId"],
decoded_output["Reservations"][idx]["Instances"][0]["InstanceType"],
decoded_output["Reservations"][idx]["Instances"][0]["LaunchTime"][0:10],
tagname))
lines_printed += 1
if (lines_printed == 0):
print("# %s: No running instances found" % self.m_class_name )
return retcode # 0 success, !0 failure
##############################################################################
# GetRegions
#
# Returns a list of regions where VMs can be created by this CSP.
#
# These are basiclly the names of the CSP's data centers... Each data center
# may offer different resoures. Don't care about that here. Just need the
# name.
#
# Used in a choice-list in the arg parser when user gives a non-default
# region name to catch invalid names before any real processing is done
#
# Returns: list of names
def GetRegions(self):
''' Returns a list of region names for the CSP '''
mylist = []
cmd = "aws ec2 describe-regions"
retcode, output, errval = self.DoCmd(cmd)
if ( retcode == 0 ):
decoded_output = json.loads(output)
items = len(decoded_output["Regions"]) # number of regions
for idx in range(0, items):
name = decoded_output["Regions"][idx]["RegionName"]
mylist.append(str(name))
return mylist
| ngc-examples-master | ncsp/aws_funcs.py |
# cspbaseclass.py 3/23/2018
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Cloud Service Provider base class
#
import os
import sys
import time
import subprocess
import json
g_trace_level = 0 # global trace level, see trace_do and debug funcs
##############################################################################
# common helper functions used throughout
def error(*args):
''' error output function '''
print "ERROR: " + ' '.join(args) # may have trouble if integer in args list
def trace_setlevel(trace_level):
''' sets trace_level, returns current value '''
global g_trace_level
g_trace_level = trace_level # set global trace level - done
return g_trace_level
def trace_do(trace_level): # trace_level normally >0 may be 0 or even negative.
''' return true if supplied 'trace_level' <= current value '''
global g_trace_level # g_trace_level should be >= 0
# print ("trace_level:%d g_trace_level:%d" %(trace_level, g_trace_level))
return (trace_level <= g_trace_level) # true if func trace level <= global trace level
def trace(trace_level, *args):
''' a debugging function, prints out line and stack trace if supplied trace_level <= current '''
if (trace_do(trace_level) == True):
caller_frame = sys._getframe(1) # caller stack frame
print "TRACE:%d:%s %s" % (trace_level, caller_frame.f_code.co_name, args)
def debug(trace_level, *args): # like trace, but with no stack trace
''' a debugging function: prints out arg if supplied trace_level <= current '''
if (trace_do(trace_level) == True):
print "%s" % args
def debug_stop(*args):
''' a debugging function: prints out arguments and exits '''
caller_frame = sys._getframe(1) # caller stack frame
print "DEBUG_STOP:%s %s" % (caller_frame.f_code.co_name, args)
sys.exit(1)
def Which(program):
''' same as Linux 'which' command, returns full path to executible or None if not found '''
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
##############################################################################
# CSPBaseClass
#
# Cloud Service Provided common class helper functions
##############################################################################
class CSPBaseClass:
''' Common functions for all Class '''
# Some generic class helper functions
def __init__(self, name, module_path):
''' Class Initialization '''
self.m_class_name = name
# file locations, all under persistent $HOME directory on machine
homedir = os.path.expanduser("~")
partial = "%s/ncsp/" % homedir
if os.path.isdir(partial) == False:
os.mkdir(partial)
partial += self.m_class_name
if os.path.isdir(partial) == False:
os.mkdir(partial)
self.m_save_path = partial + "/data/"
self.m_log_path = partial + "/logs/"
if os.path.isdir(self.m_save_path) == False:
os.mkdir(self.m_save_path)
if os.path.isdir(self.m_log_path) == False:
os.mkdir(self.m_log_path)
# full path names to various files we create and use
self.m_cmd_fname = self.m_log_path + "cmds"
self.m_args_fname = self.m_save_path + "args"
self.m_regions_fname = self.m_save_path + "regions"
self.m_module_path = module_path # path where the modules are
self.m_inform_pos = 0 # used for spinner
# append to the logfile header
thetime = time.strftime("%c", time.localtime())
self.Log("\n#\n# %s\n#\n" % thetime)
def CheckSSHKeyFilePath(self, args, extension):
''' Builds ssh key file from options, verifies existance '''
# key_name and key_key is user defined, is
# Unlike a unix shell, Python does not do any automatic path expansions like '~'
# Something like "~/.ssh/my-security-key.pem"
key_file = "%s%s%s" % (args.key_path, args.key_name, extension)
# Produces "/home/<username>/.ssh/my-security-key.pem"
key_file = os.path.expanduser(key_file)
if os.path.exists(key_file):
args.key_file = key_file
return 0 # success
else:
error("Could not find public keyfile \"%s\" -- Aborting" % key_file)
return 1 # check proper error response??
def Log(self, string):
''' simple time stamping log function '''
with open(self.m_cmd_fname, "a") as f:
f.write(time.strftime("%Y%m%d.%I%M%S: ", time.localtime()))
f.write(string)
f.write("\n")
f.close()
def ArgSaveToFile(self, args):
''' save all the parser default arguments to a file '''
if (self.m_args_fname == "" ):
return 0 # no file name, used in deleteVM to say don't write back
vargs = vars(args) # get whatever "namespace(..)" off args
trace(2, vargs)
with open(self.m_args_fname, "w") as f:
json.dump(vargs, f) # save dictionary as a json file
return 0
def ArgRestoreFromFile(self, parser):
''' restores default args in parser if file containing them exists '''
if (self.m_args_fname == "" ):
return
# pull in saved key,values from file, append to provided vargs
if os.path.exists(self.m_args_fname):
mydict = []
with open(self.m_args_fname, "r") as f:
mydict = json.load(f);
debug(2, json.dumps(mydict, indent=4, sort_keys=True))
for item in mydict.items():
kv = {item[0] : item[1]} # convert key,value to single item dictionary
parser.set_defaults(**kv) # update default value for key
return 0
return 1
def ArgShowFile(self):
''' displays peristent args file '''
print ("# %s" % self.m_args_fname)
if os.path.exists(self.m_args_fname):
with open(self.m_args_fname, "r") as f:
mydict = json.load(f);
print json.dumps(mydict, indent=4, sort_keys=True)
else:
print ("# does not exist")
return 0
def Clean(self, args):
''' erases cached args and other persistent file '''
# prevent problem if host reuses the IP address in a later VM
self.DeleteIPFromSSHKnownHostsFile(args)
# remove the persistent args
if (self.m_args_fname != "" ):
if (os.path.exists(self.m_args_fname)):
os.remove(self.m_args_fname);
# remove cached list of CSP's regions
if (self.m_regions_fname != "" ):
if (os.path.exists(self.m_regions_fname)):
os.remove(self.m_regions_fname);
return 0
def Inform(self, info):
''' spinner busy clock thing for long wait items '''
if (True): # depends on trace level?
myclock = [ "|", "/", "-", "\\" ]
if (self.m_inform_pos > 3):
self.m_inform_pos = 0 # restart back at beginning of 'clock' array
# azure has a space char before the clock char for the prompt to sit on
# emulate that look here
sys.stdout.write(" %s %s .. \r" % (myclock[self.m_inform_pos], info))
if (trace_do(1)):
sys.stdout.write("\n") # if tracing, go to new line
sys.stdout.flush()
self.m_inform_pos += 1
else:
print info
def ClassName(self):
''' return name of the class (azure, aws, ali...)'''
return self.m_class_name
def CheckID(self, args):
''' checks that the VM id exists -- if doesn't, don't know if we have a running vm '''
# Note that string "None" (as a string) sneaks into arg file after
# delete. Need to check for it in addition to None (the object)
if args.vm_id is None or args.vm_id == "" or args.vm_id == "None":
error("No %-5s vm currently defined" % self.m_class_name);
return False
else:
return True
def DoCmdNoError(self, cmd):
''' Blocking command -- returns command output, doesn't report error'''
debug(1, cmd)
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output, errval = child.communicate() # returns data from stdout, stderr
debug(3, output)
return (child.returncode, output, errval) # pass back retcode, stdout, stderr
def DoCmd(self, cmd):
''' Blocking command -- returns command output'''
retcode, output, errval = self.DoCmdNoError(cmd) # Do the work
if retcode != 0: # report any error
if (trace_do(1) == False): # if we have tracing on >=1, already printed cmd
print("cmd: %s" % cmd)
print("errval: \"%s\" child.returncode %d" % (errval, retcode)) # debug
return (retcode, output, errval) # pass back retcode, stdout, stderr
# DeleteIPFromSSHKnownHostsFile
#
# the CSP's may (will) eventually reuse the same IP address for new VMs.
# However the new VM's will have a different ECDSA key, and you will
# receive nasty messages from ssh when you try to talk to this new
# VM if key saved in ~/.ssh/known_hosts for this IP has not been
# removed..
#
# This function should be called when the VM is deleted to avoid this
# type of unnecessary problem from confusing the user
#
def DeleteIPFromSSHKnownHostsFile(self, args):
''' reused ip's for new vm's cause first-time ssh issues, remove from known-hosts file '''
if (args.vm_ip != None and args.vm_ip != "None" and args.vm_ip != ""):
# use 'ssh-keygen -R <ipaddr>' to remove offending value
# value are hashed? on linux, not direct IP address
cmd="ssh-keygen -R %s 2> /dev/null" % args.vm_ip
self.DoCmd(cmd) # not critical if it fails...
return 0
# Ssh
#
# SSH should not Cloud Service Provider dependent, so it's common function
# passes back tripple: 'returncode, stdoutstr, stderrstr' in all cases
# if 'print' is set, locally prints (used when add-hoc user ssh cmds given)
# argc is from the commands, which includes either historical or new vm_ip addr
# args is directly from the typed in commmand string, and is what we want VM to do
#
# Returns: retcode, stdoutstr, stderrstr
#
def Ssh(self, args, doprint, argv):
''' SSH into instance, maybe running a command then returning '''
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False): # checks for a valid VM id
return 1
stdoutstr = ""
stderrstr = ""
retcode = 1 # assume error, until this gets set to 0
if (args.vm_ip == ""):
return retcode, stdoutstr, stderrstr
cmd = "ssh"
# cmd += " -oStrictHostKeyChecking=no" # did when started earler, should not be needed here
# ssh keyfile -- which may not be needed in all cases. Only put it here (with -i)
# if it's supplied.
if (args.key_file != None and args.key_file != ""):
cmd += " -i %s " % args.key_file
# user name and IP
cmd += " %s@%s" % (args.user, args.vm_ip) # 'user' name can't have a /n in string!
# add additional user supplied args to command string
llen = argv.__len__()
if llen == 0:
subprocess.call(cmd, shell=True)
else:
# print "llen=%d" % llen
cmd += " " # make sure there's space after last token on cmd string
# convert from array back to a string
# NOTE: the main usage of this code if from argv[] string array
# from the command line. Funky usage when using ssh commands
# internally (and funky means that we have to give this function
# an initialized argv[] string array). But doing this allows
# only one function to be created...
for i in range(0, llen):
cmd += argv.__getitem__(i)
cmd += " "
# common errcode, stdout, stderr return from running the command
retcode, stdoutstr, stderrstr = self.DoCmd(cmd)
# requested to print it? (do this for add-hoc cmds from user)
if doprint:
print stdoutstr
# return the values, so parser code can play with it
return retcode, stdoutstr, stderrstr
def Ping(self, args):
if (args.pingable == 0):
error("'%s' interface does not currently support ping" % self.m_class_name)
return 1
# check for a valid VM id, returns if it's not set, indicating that
# either a VM hasn't been created, or it was deleted.
if (self.CheckID(args) == False): # checks for a valid VM id
return 1
if (args.vm_ip == "" or args.vm_ip == None or args.vm_ip == "None"):
error("No IP is assigned to %s" % args.vm_name)
# ping a few times, all output goes into 'output' or 'errval'
cmd = "ping -t 3 -c 3 %s" % args.vm_ip
# print cmd
retcode, output, errval = self.DoCmd(cmd)
# report what was found
if (retcode == 0):
print "ping to %s was successful" % args.vm_ip
else:
error("ping to %s failed" % args.vm_ip)
print output
print errval
return retcode
def CheckRunStatus(self, args, value):
''' Sees if the current run-status is value '''
status = self.GetRunStatus(args)
# print("CheckRunStatus-status:%s looking for-value:%s" % (status, value))
if (status.lower() == value.lower()): # case insensitive
return 0 # 0 for status=='value' success, 1 for something else
else:
return 1 # not what we want
def WaitForRunStatus(self, args, value, timeout):
''' waits for status state to be value '''
now = time.time() # floating point number
end = now + timeout
rc = self.CheckRunStatus(args, value)
while (rc != 0 and now < end):
time.sleep(0.5) # Wait time
rc = self.CheckRunStatus(args, value) # want to be value, returns 0 if is
now = time.time() # floating point number for time
if (rc != 0):
error ("Timeout " + value)
return rc # True for got status=='value' within timeout, False if not
def WaitForPing(self, args, state, timeout):
''' Attempts to Ping, or not to Ping VM, waits till get a response '''
''' Note: VM's may not support ping see args.pingable flag '''
now = time.time() # floating point number
end = now + timeout
ip = args.vm_ip
cmd = "ping -c 1 -W 1 "
cmd += ip
retcode, output, errval = self.DoCmdNoError(cmd)
if (retcode == 0):
pingable = True # wait till can ping
info = "wait for not ping-able"
else:
pingable = False
info = "wait for ping-able"
# can check here if pingable (state == True) or not-pingable (state == False)
while (pingable != state and now < end):
self.Inform(info)
time.sleep(0.5) # Wait time
retcode, output, errval = self.DoCmdNoError(cmd)
if (retcode == 0):
pingable = True # wait till can ping
else:
pingable = False
now = time.time() # floating point number
if (pingable == state): # response from ping-cmd is expected state
# print "PING SUCCESSFUL: \"%s\"" % cmd
return(0) # 0 returned for success -- is pingable
else:
if (now > end):
error ("Ping: Timeout %s" %(cmd))
else:
error ("Ping: Failed \"%s\"\n" %(cmd, errval))
return(1) # 1 returned for failure, not pingable
def WaitTillCanSSH(self, args, sshcmd, timeout):
''' Spins till gets a ssh response from the VM '''
# Ssh-ability is not really poll-able -- doesn't return till either timeout
# or success, meaning that there is no Inform notifier updates
# occuring if booting, and waiting for OS to come up..
# The best solution is to have ping response to VM working, so
# this step really isn't an issue
now = time.time() # floating point number
end = now + timeout
cmd = "ssh -oStrictHostKeyChecking=no " # allow to be added to /.ssh/known_hosts
cmd += "-o ConnectTimeout=2 " # quicker timeout, see clock move
# ssh keyfile -- which may not be needed in all cases.
# Only put it here (with -i) if it's supplied.
if (args.key_file != None and args.key_file != ""):
cmd += " -i %s " % args.key_file
cmd += "%s@%s " % (args.user, args.vm_ip) # space after ip, before following cmd
cmd += sshcmd # the ssh command we want to run
cmd += " 2> /dev/null" # don't want to see stderr msgs
retcode, output, errval = self.DoCmdNoError(cmd)
cnt = 0
while (retcode != 0 and now < end):
cnt = cnt + 1
self.Inform("wait for ssh-able %d" % cnt)
time.sleep(1) # Wait time
retcode, output, errval = self.DoCmdNoError(cmd)
now = time.time() # floating point number
if (retcode == 0): # response from ping-cmd is 0 if able to ping
# print "SSH SUCCESSFUL: \"%s\"" % cmd
return(0) # 0 returned for success
else:
if (now > end):
error ("SSH Timeout: \"%s\"" %(cmd))
else:
error ("SSH Failed: : \"%s\"\n%s" %(cmd, errval))
return(1) # 1 returned for timeout, can't ssh
def WaitTillRunning(self, args, value, timeout):
''' called after launch, waits till can get IP from running instance '''
''' value is "Running" for alibaba, or "running" for aws -- case dependent '''
# initially right after 'start', status will be 'pending'
# wait till we get to a status value of 'running'
rc = self.WaitForRunStatus(args, value, timeout) # Has different cases/values for CSP!
if (rc != 0):
error("Did not get run status writing timeout")
return rc # fail, not runable, return 1
# after the CSP says we are "running", the next thing we will need
# is the IP adresss of the VM. This may be setup in create, or we may
# need be able to query for it from the CSP as is needed in aws. In any
# case, don't leave this step till have the IP address to the VM.
# NOTE: the VM is not up enough to respond to the IP, it's still booting
rc = self.GetIPSetupCorrectly(args) # CSP specific way to get IP address
if (rc != 0): # may have been done in Create for some CSPs
return rc
# make sure we can ping -- this takes a few seconds after the kernel boots
# for the network to come up to return pings.
# pinging might not be enabled in network config for CSP
if (args.pingable != 0):
rc = self.WaitForPing(args, True, timeout)
if (rc != 0):
return rc # returns 1 - not pingable
# See if we can ssh this beast. Note that this might be the first time
# we do so with the given IP, so the known_host files may not have it.
# this function handles that so user isn't prompted.
#
# spins, waiting for SSH to work
rc = self.WaitTillCanSSH(args, "uname -a", timeout)
if (rc != 0):
return rc # returns 1 - could not ssh
return 0 # 0: success, running - 1:fail, not running
def KernelBootTime(self, args):
''' ask VM for kernel boot time '''
kernel = 0
user = 0
total = 0
rc = self.WaitTillCanSSH(args, "uname -a", 10)
if (rc != 0):
error("Could not ssh")
return rc
# Use "systemd-analyze" to grab the kernel boot time from the VM
# Greping through syslog is error-prone, especially if it's booted
# multiple times, or bootup ordering changes.
#
# NOTE: embedded qoutes are fun here -- requires something like this
# (example): "\"grep \\\"[1]: Startup finished\\\" /var/log/syslog | tail -n 1\""
cmd = [] # might there be a a better way to do this? (self.Ssh takes array of strings)
cmd.append("\"systemd-analyze\"")
retcode, stdoutstr, stderrstr = self.Ssh(args, False, cmd)
# stdoutstr should be:
# Startup finished in 3.582s (kernel) + 5.972s (userspace) = 9.555s
if (retcode == 0): # successfull ssh
tmpary = stdoutstr.split() # ['Startup', 'finished', 'in', '3.582s', '(kernel)', '+', '5.972s', '(userspace)', '=', '9.555s']
if tmpary[4] != "(kernel)" or tmpary[7] != "(userspace)":
error("not expected output from systemd-analyze:" + stdoutstr)
retcode = 1
else:
kernel = tmpary[3]
user = tmpary[6]
total = tmpary[9]
# print kernel + user + total
else:
print stderrstr # unhappy
return retcode, kernel, user, total # 4 values
def Show(self, args):
''' Shows detailed information about the vm -- name, size, status... '''
print ("%-10s \"%s\" %s %s" % ("vm", args.vm_name, args.vm_id, args.vm_ip))
print ("%-10s \"%s\" %s" % ("nsg", args.nsg_name, args.nsg_id))
return 0
def Status(self, args):
''' Shows run/halt status of VM '''
if (self.CheckID(args) == False):
return 1
status = self.GetRunStatus(args) # prints status output via Inform()
print("\n")
return 0
def GetRegionsCached(self):
''' returns the regions list for csp, cached to file first time '''
# if we have done this before, the regions are cached in a file
try:
with open(self.m_regions_fname, "r") as f:
mylist = json.load(f);
except:
mylist = self.GetRegions() # csp dependent query function
with open(self.m_regions_fname, "w") as f:
json.dump(mylist, f)
return mylist # return list
def ShowRegions(self, args):
''' shows the regions supported by csp '''
mylist = self.GetRegionsCached()
for region in mylist:
print (" %s" % region)
return 0
def ShowIP(self, args):
''' shows the public IP address for the VM '''
if (self.CheckID(args) == False):
return 1
print args.vm_ip
return 0
##############################################################################
# Top level Network Security Group (NSG) command functions - CSP independent
#
# The NSG is created per-user, not per VM instance, under the assumption that
# it doesn't need to change for the different types of VM's that the user
# creates. See how the args.nsg_name field is defined in add_common_options()
# to understand how the this could be changed.
#
# NOTE: it's up to the csp specific deleteVM implementations to decide to
# delete the Network Security Group or not.
##############################################################################
def ShowNSGs(self, args):
return(self.ShowSecurityGroups(args))
def CreateNSG(self, args):
''' returns security group, creates/queries if it does not currently exist '''
self.Inform("CreateNSG")
# Do we have ID from user or from persistent?
if (args.nsg_id != "" and args.nsg_id != None and args.nsg_id != "None"):
trace(2, "Security group \"%s\" exists: %s" % (args.nsg_name, args.nsg_id))
return 0
if (args.nsg_name == ""):
error("Network Security Group name is \"\" - aborting")
sys.exit(1)
# Does it exist by name? Don't need to create it if so
self.Inform("ExistingNSG")
if (self.ExistingSecurityGroup(args) == 0):
trace(2, "Security group \"%s\" found: %s" % (args.nsg_name, args.nsg_id))
return 0
# Create a new security group
# The ID is written to args.nsg_id
self.Inform("CreateNSG")
rc = self.CreateSecurityGroup(args)
if (rc != 0):
return rc
trace(2, "Created Security Group \"%s\": %s" % (args.nsg_name, args.nsg_id))
return 0
def DeleteNSG(self, args):
''' deletes security group if it currently exists '''
rc = 0
# Do we have ID from user or from persistent?
self.Inform("DeleteNSG")
if (args.nsg_id != "" and args.nsg_id != None and args.nsg_id != "None"):
rc = self.DeleteSecurityGroup(args)
elif (args.nsg_name != "" and args.nsg_name != None and args.nsg_name != "None"):
if (self.ExistingSecurityGroup(args) == 0): # Does it exist by that name?
rc = self.DeleteSecurityGroup(args) # above call set args.nsg_id
return rc
| ngc-examples-master | ncsp/cspbaseclass.py |
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from os.path import dirname, join
import tensorflow as tf
import tensorflow_hub as hub
def main():
model = tf.keras.Sequential([
hub.KerasLayer(
"https://tfhub.dev/google/imagenet/resnet_v2_50/classification/5", dtype=tf.float32)
])
# None enables dynamic batch sizes
model.build([None, 224, 224, 3])
model.save(join(dirname(__file__), 'resnet50'))
if __name__ == '__main__':
main()
| ProViz-AI-Samples-master | inference_partner_training/TensorRT/Models/fetch_model.py |
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import onnx
import onnx_graphsurgeon as gs
# Load ONNX model and create IR
graph = gs.import_onnx(onnx.load(str(sys.argv[1])))
# Find all BatchNormalization nodes
bn_nodes = [node for node in graph.nodes if node.op == "BatchNormalization"]
# Disconnect BatchNormalization nodes and insert MyPlugin nodes
for node in bn_nodes:
inputs = node.inputs[:1]
outputs = node.outputs[:]
node.inputs[0].outputs.remove(node)
node.outputs[0].inputs.remove(node)
graph.layer(op="MyPlugin", inputs=inputs, outputs=outputs, attrs={"num_inputs": 1})
# Remove dangling BatchNormalization nodes
graph.cleanup()
# Save modified ONNX to disk
onnx.save(gs.export_onnx(graph), str(sys.argv[2]))
| ProViz-AI-Samples-master | inference_partner_training/TensorRT/TRTPluginSample/replace_bn.py |
# Copyright (c) 2017 NVIDIA Corporation
import argparse
from math import sqrt
parser = argparse.ArgumentParser(description='RMSE_calculator')
parser.add_argument('--path_to_predictions', type=str, default="", metavar='N',
help='Path file with actual ratings and predictions')
parser.add_argument('--round', action='store_true',
help='round predictions to nearest')
args = parser.parse_args()
print(args)
def main():
with open(args.path_to_predictions, 'r') as inpt:
lines = inpt.readlines()
n = 0
denom = 0.0
for line in lines:
parts = line.split('\t')
prediction = float(parts[2]) if not args.round else round(float(parts[2]))
rating = float(parts[3])
denom += (prediction - rating)*(prediction - rating)
n += 1
print("####################")
print("RMSE: {}".format(sqrt(denom/n)))
print("####################")
if __name__ == '__main__':
main() | DeepRecommender-master | compute_RMSE.py |
# Copyright (c) 2017 NVIDIA Corporation
import torch
import argparse
from reco_encoder.data import input_layer
from reco_encoder.model import model
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
import torch.nn as nn
from torch.autograd import Variable
import copy
import time
from pathlib import Path
from logger import Logger
from math import sqrt
import numpy as np
import os
parser = argparse.ArgumentParser(description='RecoEncoder')
parser.add_argument('--lr', type=float, default=0.00001, metavar='N',
help='learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0, metavar='N',
help='L2 weight decay')
parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N',
help='dropout drop probability')
parser.add_argument('--noise_prob', type=float, default=0.0, metavar='N',
help='noise probability')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='global batch size')
parser.add_argument('--summary_frequency', type=int, default=100, metavar='N',
help='how often to save summaries')
parser.add_argument('--aug_step', type=int, default=-1, metavar='N',
help='do data augmentation every X step')
parser.add_argument('--constrained', action='store_true',
help='constrained autoencoder')
parser.add_argument('--skip_last_layer_nl', action='store_true',
help='if present, decoder\'s last layer will not apply non-linearity function')
parser.add_argument('--num_epochs', type=int, default=50, metavar='N',
help='maximum number of epochs')
parser.add_argument('--save_every', type=int, default=3, metavar='N',
help='save every N number of epochs')
parser.add_argument('--optimizer', type=str, default="momentum", metavar='N',
help='optimizer kind: adam, momentum, adagrad or rmsprop')
parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N',
help='hidden layer sizes, comma-separated')
parser.add_argument('--gpu_ids', type=str, default="0", metavar='N',
help='comma-separated gpu ids to use for data parallel training')
parser.add_argument('--path_to_train_data', type=str, default="", metavar='N',
help='Path to training data')
parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N',
help='Path to evaluation data')
parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N',
help='type of the non-linearity used in activations')
parser.add_argument('--logdir', type=str, default="logs", metavar='N',
help='where to save model and write logs')
args = parser.parse_args()
print(args)
use_gpu = torch.cuda.is_available() # global flag
if use_gpu:
print('GPU is available.')
else:
print('GPU is not available.')
def do_eval(encoder, evaluation_data_layer):
encoder.eval()
denom = 0.0
total_epoch_loss = 0.0
for i, (eval, src) in enumerate(evaluation_data_layer.iterate_one_epoch_eval()):
inputs = Variable(src.cuda().to_dense() if use_gpu else src.to_dense())
targets = Variable(eval.cuda().to_dense() if use_gpu else eval.to_dense())
outputs = encoder(inputs)
loss, num_ratings = model.MSEloss(outputs, targets)
total_epoch_loss += loss.item()
denom += num_ratings.item()
return sqrt(total_epoch_loss / denom)
def log_var_and_grad_summaries(logger, layers, global_step, prefix, log_histograms=False):
"""
Logs variable and grad stats for layer. Transfers data from GPU to CPU automatically
:param logger: TB logger
:param layers: param list
:param global_step: global step for TB
:param prefix: name prefix
:param log_histograms: (default: False) whether or not log histograms
:return:
"""
for ind, w in enumerate(layers):
# Variables
w_var = w.data.cpu().numpy()
logger.scalar_summary("Variables/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_var),
global_step)
if log_histograms:
logger.histo_summary(tag="Variables/{}_{}".format(prefix, ind), values=w.data.cpu().numpy(),
step=global_step)
# Gradients
w_grad = w.grad.data.cpu().numpy()
logger.scalar_summary("Gradients/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_grad),
global_step)
if log_histograms:
logger.histo_summary(tag="Gradients/{}_{}".format(prefix, ind), values=w.grad.data.cpu().numpy(),
step=global_step)
def main():
logger = Logger(args.logdir)
params = dict()
params['batch_size'] = args.batch_size
params['data_dir'] = args.path_to_train_data
params['major'] = 'users'
params['itemIdInd'] = 1
params['userIdInd'] = 0
print("Loading training data")
data_layer = input_layer.UserItemRecDataProvider(params=params)
print("Data loaded")
print("Total items found: {}".format(len(data_layer.data.keys())))
print("Vector dim: {}".format(data_layer.vector_dim))
print("Loading eval data")
eval_params = copy.deepcopy(params)
# must set eval batch size to 1 to make sure no examples are missed
eval_params['data_dir'] = args.path_to_eval_data
eval_data_layer = input_layer.UserItemRecDataProvider(params=eval_params,
user_id_map=data_layer.userIdMap, # the mappings are provided
item_id_map=data_layer.itemIdMap)
eval_data_layer.src_data = data_layer.data
rencoder = model.AutoEncoder(layer_sizes=[data_layer.vector_dim] + [int(l) for l in args.hidden_layers.split(',')],
nl_type=args.non_linearity_type,
is_constrained=args.constrained,
dp_drop_prob=args.drop_prob,
last_layer_activations=not args.skip_last_layer_nl)
os.makedirs(args.logdir, exist_ok=True)
model_checkpoint = args.logdir + "/model"
path_to_model = Path(model_checkpoint)
if path_to_model.is_file():
print("Loading model from: {}".format(model_checkpoint))
rencoder.load_state_dict(torch.load(model_checkpoint))
print('######################################################')
print('######################################################')
print('############# AutoEncoder Model: #####################')
print(rencoder)
print('######################################################')
print('######################################################')
gpu_ids = [int(g) for g in args.gpu_ids.split(',')]
print('Using GPUs: {}'.format(gpu_ids))
if len(gpu_ids)>1:
rencoder = nn.DataParallel(rencoder,
device_ids=gpu_ids)
if use_gpu: rencoder = rencoder.cuda()
if args.optimizer == "adam":
optimizer = optim.Adam(rencoder.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
elif args.optimizer == "adagrad":
optimizer = optim.Adagrad(rencoder.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
elif args.optimizer == "momentum":
optimizer = optim.SGD(rencoder.parameters(),
lr=args.lr, momentum=0.9,
weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=[24, 36, 48, 66, 72], gamma=0.5)
elif args.optimizer == "rmsprop":
optimizer = optim.RMSprop(rencoder.parameters(),
lr=args.lr, momentum=0.9,
weight_decay=args.weight_decay)
else:
raise ValueError('Unknown optimizer kind')
t_loss = 0.0
t_loss_denom = 0.0
global_step = 0
if args.noise_prob > 0.0:
dp = nn.Dropout(p=args.noise_prob)
for epoch in range(args.num_epochs):
print('Doing epoch {} of {}'.format(epoch, args.num_epochs))
e_start_time = time.time()
rencoder.train()
total_epoch_loss = 0.0
denom = 0.0
if args.optimizer == "momentum":
scheduler.step()
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.cuda().to_dense() if use_gpu else mb.to_dense())
optimizer.zero_grad()
outputs = rencoder(inputs)
loss, num_ratings = model.MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
global_step += 1
t_loss += loss.item()
t_loss_denom += 1
if i % args.summary_frequency == 0:
print('[%d, %5d] RMSE: %.7f' % (epoch, i, sqrt(t_loss / t_loss_denom)))
logger.scalar_summary("Training_RMSE", sqrt(t_loss/t_loss_denom), global_step)
t_loss = 0
t_loss_denom = 0.0
log_var_and_grad_summaries(logger, rencoder.encode_w, global_step, "Encode_W")
log_var_and_grad_summaries(logger, rencoder.encode_b, global_step, "Encode_b")
if not rencoder.is_constrained:
log_var_and_grad_summaries(logger, rencoder.decode_w, global_step, "Decode_W")
log_var_and_grad_summaries(logger, rencoder.decode_b, global_step, "Decode_b")
total_epoch_loss += loss.item()
denom += 1
#if args.aug_step > 0 and i % args.aug_step == 0 and i > 0:
if args.aug_step > 0:
# Magic data augmentation trick happen here
for t in range(args.aug_step):
inputs = Variable(outputs.data)
if args.noise_prob > 0.0:
inputs = dp(inputs)
optimizer.zero_grad()
outputs = rencoder(inputs)
loss, num_ratings = model.MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
e_end_time = time.time()
print('Total epoch {} finished in {} seconds with TRAINING RMSE loss: {}'
.format(epoch, e_end_time - e_start_time, sqrt(total_epoch_loss/denom)))
logger.scalar_summary("Training_RMSE_per_epoch", sqrt(total_epoch_loss/denom), epoch)
logger.scalar_summary("Epoch_time", e_end_time - e_start_time, epoch)
if epoch % args.save_every == 0 or epoch == args.num_epochs - 1:
eval_loss = do_eval(rencoder, eval_data_layer)
print('Epoch {} EVALUATION LOSS: {}'.format(epoch, eval_loss))
logger.scalar_summary("EVALUATION_RMSE", eval_loss, epoch)
print("Saving model to {}".format(model_checkpoint + ".epoch_"+str(epoch)))
torch.save(rencoder.state_dict(), model_checkpoint + ".epoch_"+str(epoch))
print("Saving model to {}".format(model_checkpoint + ".last"))
torch.save(rencoder.state_dict(), model_checkpoint + ".last")
# save to onnx
dummy_input = Variable(torch.randn(params['batch_size'], data_layer.vector_dim).type(torch.float))
torch.onnx.export(rencoder.float(), dummy_input.cuda() if use_gpu else dummy_input,
model_checkpoint + ".onnx", verbose=True)
print("ONNX model saved to {}!".format(model_checkpoint + ".onnx"))
if __name__ == '__main__':
main()
| DeepRecommender-master | run.py |
# THIS FILE IS COPY-PASTED FROM HERE: https://github.com/yunjey/pytorch-tutorial/tree/master/tutorials/04-utils/tensorboard
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import tensorflow as tf
import numpy as np
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush() | DeepRecommender-master | logger.py |
# Copyright (c) 2017 NVIDIA Corporation
import torch
import argparse
import copy
from reco_encoder.data import input_layer
from reco_encoder.model import model
from torch.autograd import Variable
from pathlib import Path
parser = argparse.ArgumentParser(description='RecoEncoder')
parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N',
help='dropout drop probability')
parser.add_argument('--constrained', action='store_true',
help='constrained autoencoder')
parser.add_argument('--skip_last_layer_nl', action='store_true',
help='if present, decoder\'s last layer will not apply non-linearity function')
parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N',
help='hidden layer sizes, comma-separated')
parser.add_argument('--path_to_train_data', type=str, default="", metavar='N',
help='Path to training data')
parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N',
help='Path to evaluation data')
parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N',
help='type of the non-linearity used in activations')
parser.add_argument('--save_path', type=str, default="autorec.pt", metavar='N',
help='where to save model')
parser.add_argument('--predictions_path', type=str, default="out.txt", metavar='N',
help='where to save predictions')
args = parser.parse_args()
print(args)
use_gpu = torch.cuda.is_available() # global flag
if use_gpu:
print('GPU is available.')
else:
print('GPU is not available.')
def main():
params = dict()
params['batch_size'] = 1
params['data_dir'] = args.path_to_train_data
params['major'] = 'users'
params['itemIdInd'] = 1
params['userIdInd'] = 0
print("Loading training data")
data_layer = input_layer.UserItemRecDataProvider(params=params)
print("Data loaded")
print("Total items found: {}".format(len(data_layer.data.keys())))
print("Vector dim: {}".format(data_layer.vector_dim))
print("Loading eval data")
eval_params = copy.deepcopy(params)
# must set eval batch size to 1 to make sure no examples are missed
eval_params['batch_size'] = 1
eval_params['data_dir'] = args.path_to_eval_data
eval_data_layer = input_layer.UserItemRecDataProvider(params=eval_params,
user_id_map=data_layer.userIdMap,
item_id_map=data_layer.itemIdMap)
rencoder = model.AutoEncoder(layer_sizes=[data_layer.vector_dim] + [int(l) for l in args.hidden_layers.split(',')],
nl_type=args.non_linearity_type,
is_constrained=args.constrained,
dp_drop_prob=args.drop_prob,
last_layer_activations=not args.skip_last_layer_nl)
path_to_model = Path(args.save_path)
if path_to_model.is_file():
print("Loading model from: {}".format(path_to_model))
rencoder.load_state_dict(torch.load(args.save_path))
print('######################################################')
print('######################################################')
print('############# AutoEncoder Model: #####################')
print(rencoder)
print('######################################################')
print('######################################################')
rencoder.eval()
if use_gpu: rencoder = rencoder.cuda()
inv_userIdMap = {v: k for k, v in data_layer.userIdMap.items()}
inv_itemIdMap = {v: k for k, v in data_layer.itemIdMap.items()}
eval_data_layer.src_data = data_layer.data
with open(args.predictions_path, 'w') as outf:
for i, ((out, src), majorInd) in enumerate(eval_data_layer.iterate_one_epoch_eval(for_inf=True)):
inputs = Variable(src.cuda().to_dense() if use_gpu else src.to_dense())
targets_np = out.to_dense().numpy()[0, :]
outputs = rencoder(inputs).cpu().data.numpy()[0, :]
non_zeros = targets_np.nonzero()[0].tolist()
major_key = inv_userIdMap [majorInd]
for ind in non_zeros:
outf.write("{}\t{}\t{}\t{}\n".format(major_key, inv_itemIdMap[ind], outputs[ind], targets_np[ind]))
if i % 10000 == 0:
print("Done: {}".format(i))
if __name__ == '__main__':
main()
| DeepRecommender-master | infer.py |
# Copyright (c) 2017 NVIDIA Corporation
import unittest
import sys
import torch.optim as optim
from torch.autograd import Variable
from reco_encoder.data.input_layer import UserItemRecDataProvider
from reco_encoder.model.model import AutoEncoder, MSEloss
sys.path.append('data')
sys.path.append('model')
class iRecAutoEncoderTest(unittest.TestCase):
def test_CPU(self):
print("iRecAutoEncoderTest Test on CPU started")
params = {}
params['batch_size'] = 64
params['data_dir'] = 'test/testData_iRec'
data_layer = UserItemRecDataProvider(params=params)
print("Vector dim: {}".format(data_layer.vector_dim))
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys())>0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 256, 128], is_constrained=True)
print(encoder)
print(encoder.parameters())
optimizer = optim.SGD(encoder.parameters(), lr=0.01, momentum=0.9)
for epoch in range(20):
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
print('[%d, %5d] loss: %.7f' % (epoch, i, loss.item()))
def test_GPU(self):
print("iRecAutoEncoderTest Test on GPU started")
params = {}
params['batch_size'] = 32
params['data_dir'] = 'test/testData_iRec'
data_layer = UserItemRecDataProvider(params=params)
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys()) > 0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 1024, 512, 512, 512, 512, 128])
encoder.cuda()
optimizer = optim.Adam(encoder.parameters())
print(encoder)
for epoch in range(30):
total_epoch_loss = 0.0
denom = 0.0
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense().cuda())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
total_epoch_loss += loss.item()
denom += 1
print("Total epoch {} loss: {}".format(epoch, total_epoch_loss/denom))
class uRecAutoEncoderTest(unittest.TestCase):
def test_CPU(self):
print("uRecAutoEncoderTest Test on CPU started")
params = {}
params['batch_size'] = 256
params['data_dir'] = 'test/testData_uRec'
data_layer = UserItemRecDataProvider(params=params)
print("Vector dim: {}".format(data_layer.vector_dim))
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys())>0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 128, data_layer.vector_dim])
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
for epoch in range(1):
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
print('[%d, %5d] loss: %.7f' % (epoch, i, loss.item()))
if i == 5: # too much compute for CPU
break
def test_GPU(self):
print("uRecAutoEncoderTest Test on GPU started")
params = {}
params['batch_size'] = 64
params['data_dir'] = 'test/testData_uRec'
data_layer = UserItemRecDataProvider(params=params)
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys()) > 0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 1024, 512, 512, 128])
encoder.cuda()
optimizer = optim.Adam(encoder.parameters())
print(encoder)
for epoch in range(2):
total_epoch_loss = 0.0
denom = 0.0
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense().cuda())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
total_epoch_loss += loss.item()
denom += 1
print("Total epoch {} loss: {}".format(epoch, total_epoch_loss / denom))
if __name__ == '__main__':
unittest.main()
| DeepRecommender-master | test/test_model.py |
# Copyright (c) 2017 NVIDIA Corporation | DeepRecommender-master | test/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
import unittest
from reco_encoder.data.input_layer import UserItemRecDataProvider
class UserItemRecDataProviderTest(unittest.TestCase):
def test_1(self):
print("Test 1 started")
params = {}
params['batch_size'] = 64
params['data_dir'] = 'test/testData_iRec'
data_layer = UserItemRecDataProvider(params=params)
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys())>0)
def test_iterations(self):
params = {}
params['batch_size'] = 32
params['data_dir'] = 'test/testData_iRec'
data_layer = UserItemRecDataProvider(params=params)
print("Total items found: {}".format(len(data_layer.data.keys())))
for i, data in enumerate(data_layer.iterate_one_epoch()):
print(i)
print(data.size())
if __name__ == '__main__':
unittest.main()
| DeepRecommender-master | test/data_layer_tests.py |
# Copyright (c) 2017 NVIDIA Corporation
from os import listdir, path, makedirs
import random
import sys
import time
import datetime
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.keys())))
def save_data_to_file(data, filename):
with open(filename, 'w') as out:
for userId in data:
for record in data[userId]:
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
def create_NETFLIX_data_timesplit(all_data,
train_min,
train_max,
test_min,
test_max):
"""
Creates time-based split of NETFLIX data into train, and (validation, test)
:param all_data:
:param train_min:
:param train_max:
:param test_min:
:param test_max:
:return:
"""
train_min_ts = time.mktime(datetime.datetime.strptime(train_min,"%Y-%m-%d").timetuple())
train_max_ts = time.mktime(datetime.datetime.strptime(train_max, "%Y-%m-%d").timetuple())
test_min_ts = time.mktime(datetime.datetime.strptime(test_min, "%Y-%m-%d").timetuple())
test_max_ts = time.mktime(datetime.datetime.strptime(test_max, "%Y-%m-%d").timetuple())
training_data = dict()
validation_data = dict()
test_data = dict()
train_set_items = set()
for userId, userRatings in all_data.items():
time_sorted_ratings = sorted(userRatings, key=lambda x: x[2]) # sort by timestamp
for rating_item in time_sorted_ratings:
if rating_item[2] >= train_min_ts and rating_item[2] <= train_max_ts:
if not userId in training_data:
training_data[userId] = []
training_data[userId].append(rating_item)
train_set_items.add(rating_item[0]) # keep track of items from training set
elif rating_item[2] >= test_min_ts and rating_item[2] <= test_max_ts:
if not userId in training_data: # only include users seen in the training set
continue
p = random.random()
if p <=0.5:
if not userId in validation_data:
validation_data[userId] = []
validation_data[userId].append(rating_item)
else:
if not userId in test_data:
test_data[userId] = []
test_data[userId].append(rating_item)
# remove items not not seen in training set
for userId, userRatings in test_data.items():
test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
for userId, userRatings in validation_data.items():
validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
return training_data, validation_data, test_data
def main(args):
user2id_map = dict()
item2id_map = dict()
userId = 0
itemId = 0
all_data = dict()
folder = args[1]
out_folder = args[2]
# create necessary folders:
for output_dir in [(out_folder + f) for f in [
"/N3M_TRAIN", "/N3M_VALID", "/N3M_TEST", "/N6M_TRAIN",
"/N6M_VALID", "/N6M_TEST", "/N1Y_TRAIN", "/N1Y_VALID",
"/N1Y_TEST", "/NF_TRAIN", "/NF_VALID", "/NF_TEST"]]:
makedirs(output_dir, exist_ok=True)
text_files = [path.join(folder, f)
for f in listdir(folder)
if path.isfile(path.join(folder, f)) and ('.txt' in f)]
for text_file in text_files:
with open(text_file, 'r') as f:
print("Processing: {}".format(text_file))
lines = f.readlines()
item = int(lines[0][:-2]) # remove newline and :
if not item in item2id_map:
item2id_map[item] = itemId
itemId += 1
for rating in lines[1:]:
parts = rating.strip().split(",")
user = int(parts[0])
if not user in user2id_map:
user2id_map[user] = userId
userId += 1
rating = float(parts[1])
ts = int(time.mktime(datetime.datetime.strptime(parts[2],"%Y-%m-%d").timetuple()))
if user2id_map[user] not in all_data:
all_data[user2id_map[user]] = []
all_data[user2id_map[user]].append((item2id_map[item], rating, ts))
print("STATS FOR ALL INPUT DATA")
print_stats(all_data)
# Netflix full
(nf_train, nf_valid, nf_test) = create_NETFLIX_data_timesplit(all_data,
"1999-12-01",
"2005-11-30",
"2005-12-01",
"2005-12-31")
print("Netflix full train")
print_stats(nf_train)
save_data_to_file(nf_train, out_folder + "/NF_TRAIN/nf.train.txt")
print("Netflix full valid")
print_stats(nf_valid)
save_data_to_file(nf_valid, out_folder + "/NF_VALID/nf.valid.txt")
print("Netflix full test")
print_stats(nf_test)
save_data_to_file(nf_test, out_folder + "/NF_TEST/nf.test.txt")
(n3m_train, n3m_valid, n3m_test) = create_NETFLIX_data_timesplit(all_data,
"2005-09-01",
"2005-11-30",
"2005-12-01",
"2005-12-31")
print("Netflix 3m train")
print_stats(n3m_train)
save_data_to_file(n3m_train, out_folder+"/N3M_TRAIN/n3m.train.txt")
print("Netflix 3m valid")
print_stats(n3m_valid)
save_data_to_file(n3m_valid, out_folder + "/N3M_VALID/n3m.valid.txt")
print("Netflix 3m test")
print_stats(n3m_test)
save_data_to_file(n3m_test, out_folder + "/N3M_TEST/n3m.test.txt")
(n6m_train, n6m_valid, n6m_test) = create_NETFLIX_data_timesplit(all_data,
"2005-06-01",
"2005-11-30",
"2005-12-01",
"2005-12-31")
print("Netflix 6m train")
print_stats(n6m_train)
save_data_to_file(n6m_train, out_folder+"/N6M_TRAIN/n6m.train.txt")
print("Netflix 6m valid")
print_stats(n6m_valid)
save_data_to_file(n6m_valid, out_folder + "/N6M_VALID/n6m.valid.txt")
print("Netflix 6m test")
print_stats(n6m_test)
save_data_to_file(n6m_test, out_folder + "/N6M_TEST/n6m.test.txt")
# Netflix 1 year
(n1y_train, n1y_valid, n1y_test) = create_NETFLIX_data_timesplit(all_data,
"2004-06-01",
"2005-05-31",
"2005-06-01",
"2005-06-30")
print("Netflix 1y train")
print_stats(n1y_train)
save_data_to_file(n1y_train, out_folder + "/N1Y_TRAIN/n1y.train.txt")
print("Netflix 1y valid")
print_stats(n1y_valid)
save_data_to_file(n1y_valid, out_folder + "/N1Y_VALID/n1y.valid.txt")
print("Netflix 1y test")
print_stats(n1y_test)
save_data_to_file(n1y_test, out_folder + "/N1Y_TEST/n1y.test.txt")
if __name__ == "__main__":
main(sys.argv)
| DeepRecommender-master | data_utils/netflix_data_convert.py |
# Copyright (c) 2017 NVIDIA Corporation
import sys
import datetime
import random
from math import floor
def print_stats(data):
total_ratings = 0
print("STATS")
for user in data:
total_ratings += len(data[user])
print("Total Ratings: {}".format(total_ratings))
print("Total User count: {}".format(len(data.keys())))
def save_data_to_file(data, filename):
with open(filename, 'w') as out:
for userId in data:
for record in data[userId]:
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
def main(args):
inpt = args[1]
out_prefix = args[2]
percent = 0.7
user2id_map = dict()
item2id_map = dict()
userId = 0
itemId = 0
data = dict()
min_ts = 100000000000
max_ts = 0
total_rating_count = 0
with open(inpt, 'r') as inpt_f: #ratings.csv headers: userId,movieId,rating,timestamp
for line in inpt_f:
if 'userId' in line:
continue
parts = line.split(',')
user = int(parts[0])
item = int(parts[1])
rating = float(parts[2])
ts = int(parts[3])
if min_ts > ts:
min_ts = ts
if max_ts < ts:
max_ts = ts
if not user in user2id_map:
user2id_map[user] = userId
userId += 1
if not item in item2id_map:
item2id_map[item] = itemId
itemId += 1
total_rating_count += 1
if user2id_map[user] not in data:
data[user2id_map[user]] = []
data[user2id_map[user]].append((item2id_map[item], rating, ts))
print("STATS")
print("Total Ratings: {}".format(total_rating_count))
print("Total User count: {}".format(len(user2id_map)))
print("Total Item count: {}".format(len(item2id_map)))
print("Minimum ts: {}, which is {}".format(min_ts, datetime.datetime.fromtimestamp(min_ts).strftime('%Y-%m-%d')))
print("Maximum ts: {}, which is {}".format(max_ts, datetime.datetime.fromtimestamp(max_ts).strftime('%Y-%m-%d')))
training_data = dict()
validation_data = dict()
test_data = dict()
train_set_items = set()
for userId in data.keys():
if len(data[userId]) < 2:
#print("WARNING, userId {} has less than 2 ratings, skipping user...".format(userId))
continue
time_sorted_ratings = sorted(data[userId], key=lambda x: x[2]) # sort by timestamp
last_train_ind = floor(percent * len(time_sorted_ratings))
training_data[userId] = time_sorted_ratings[:last_train_ind]
for rating_item in time_sorted_ratings[:last_train_ind]:
train_set_items.add(rating_item[0]) # keep track of items from training set
p = random.random()
if p <= 0.5:
validation_data[userId] = time_sorted_ratings[last_train_ind:]
else:
test_data[userId] = time_sorted_ratings[last_train_ind:]
# remove items not not seen in training set
for userId, userRatings in test_data.items():
test_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
for userId, userRatings in validation_data.items():
validation_data[userId] = [rating for rating in userRatings if rating[0] in train_set_items]
print("Training Data")
print_stats(training_data)
save_data_to_file(training_data, out_prefix+".train")
print("Validation Data")
print_stats(validation_data)
save_data_to_file(validation_data, out_prefix + ".valid")
print("Test Data")
print_stats(test_data)
save_data_to_file(test_data, out_prefix + ".test")
if __name__ == "__main__":
main(sys.argv)
| DeepRecommender-master | data_utils/movie_lense_data_converter.py |
# Copyright (c) 2017 NVIDIA Corporation
| DeepRecommender-master | reco_encoder/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
| DeepRecommender-master | reco_encoder/model/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as weight_init
from torch.autograd import Variable
def activation(input, kind):
#print("Activation: {}".format(kind))
if kind == 'selu':
return F.selu(input)
elif kind == 'relu':
return F.relu(input)
elif kind == 'relu6':
return F.relu6(input)
elif kind == 'sigmoid':
return F.sigmoid(input)
elif kind == 'tanh':
return F.tanh(input)
elif kind == 'elu':
return F.elu(input)
elif kind == 'lrelu':
return F.leaky_relu(input)
elif kind == 'swish':
return input*F.sigmoid(input)
elif kind == 'none':
return input
else:
raise ValueError('Unknown non-linearity type')
def MSEloss(inputs, targets, size_average=False):
mask = targets != 0
num_ratings = torch.sum(mask.float())
criterion = nn.MSELoss(reduction='sum' if not size_average else 'mean')
return criterion(inputs * mask.float(), targets), Variable(torch.Tensor([1.0])) if size_average else num_ratings
class AutoEncoder(nn.Module):
def __init__(self, layer_sizes, nl_type='selu', is_constrained=True, dp_drop_prob=0.0, last_layer_activations=True):
"""
Describes an AutoEncoder model
:param layer_sizes: Encoder network description. Should start with feature size (e.g. dimensionality of x).
For example: [10000, 1024, 512] will result in:
- encoder 2 layers: 10000x1024 and 1024x512. Representation layer (z) will be 512
- decoder 2 layers: 512x1024 and 1024x10000.
:param nl_type: (default 'selu') Type of no-linearity
:param is_constrained: (default: True) Should constrain decoder weights
:param dp_drop_prob: (default: 0.0) Dropout drop probability
:param last_layer_activations: (default: True) Whether to apply activations on last decoder layer
"""
super(AutoEncoder, self).__init__()
self._dp_drop_prob = dp_drop_prob
self._last_layer_activations = last_layer_activations
if dp_drop_prob > 0:
self.drop = nn.Dropout(dp_drop_prob)
self._last = len(layer_sizes) - 2
self._nl_type = nl_type
self.encode_w = nn.ParameterList(
[nn.Parameter(torch.rand(layer_sizes[i + 1], layer_sizes[i])) for i in range(len(layer_sizes) - 1)])
for ind, w in enumerate(self.encode_w):
weight_init.xavier_uniform_(w)
self.encode_b = nn.ParameterList(
[nn.Parameter(torch.zeros(layer_sizes[i + 1])) for i in range(len(layer_sizes) - 1)])
reversed_enc_layers = list(reversed(layer_sizes))
self.is_constrained = is_constrained
if not is_constrained:
self.decode_w = nn.ParameterList(
[nn.Parameter(torch.rand(reversed_enc_layers[i + 1], reversed_enc_layers[i])) for i in range(len(reversed_enc_layers) - 1)])
for ind, w in enumerate(self.decode_w):
weight_init.xavier_uniform(w)
self.decode_b = nn.ParameterList(
[nn.Parameter(torch.zeros(reversed_enc_layers[i + 1])) for i in range(len(reversed_enc_layers) - 1)])
print("******************************")
print("******************************")
print(layer_sizes)
print("Dropout drop probability: {}".format(self._dp_drop_prob))
print("Encoder pass:")
for ind, w in enumerate(self.encode_w):
print(w.data.size())
print(self.encode_b[ind].size())
print("Decoder pass:")
if self.is_constrained:
print('Decoder is constrained')
for ind, w in enumerate(list(reversed(self.encode_w))):
print(w.transpose(0, 1).size())
print(self.decode_b[ind].size())
else:
for ind, w in enumerate(self.decode_w):
print(w.data.size())
print(self.decode_b[ind].size())
print("******************************")
print("******************************")
def encode(self, x):
for ind, w in enumerate(self.encode_w):
x = activation(input=F.linear(input=x, weight=w, bias=self.encode_b[ind]), kind=self._nl_type)
if self._dp_drop_prob > 0: # apply dropout only on code layer
x = self.drop(x)
return x
def decode(self, z):
if self.is_constrained:
for ind, w in enumerate(list(reversed(self.encode_w))): # constrained autoencode re-uses weights from encoder
z = activation(input=F.linear(input=z, weight=w.transpose(0, 1), bias=self.decode_b[ind]),
# last layer or decoder should not apply non linearities
kind=self._nl_type if ind!=self._last or self._last_layer_activations else 'none')
#if self._dp_drop_prob > 0 and ind!=self._last: # and no dp on last layer
# z = self.drop(z)
else:
for ind, w in enumerate(self.decode_w):
z = activation(input=F.linear(input=z, weight=w, bias=self.decode_b[ind]),
# last layer or decoder should not apply non linearities
kind=self._nl_type if ind!=self._last or self._last_layer_activations else 'none')
#if self._dp_drop_prob > 0 and ind!=self._last: # and no dp on last layer
# z = self.drop(z)
return z
def forward(self, x):
return self.decode(self.encode(x))
| DeepRecommender-master | reco_encoder/model/model.py |
# Copyright (c) 2017 NVIDIA Corporation
| DeepRecommender-master | reco_encoder/data/__init__.py |
# Copyright (c) 2017 NVIDIA Corporation
"""Data Layer Classes"""
from os import listdir, path
from random import shuffle
import torch
class UserItemRecDataProvider:
def __init__(self, params, user_id_map=None, item_id_map=None):
self._params = params
self._data_dir = self.params['data_dir']
self._extension = ".txt" if 'extension' not in self.params else self.params['extension']
self._i_id = 0 if 'itemIdInd' not in self.params else self.params['itemIdInd']
self._u_id = 1 if 'userIdInd' not in self.params else self.params['userIdInd']
self._r_id = 2 if 'ratingInd' not in self.params else self.params['ratingInd']
self._major = 'items' if 'major' not in self.params else self.params['major']
if not (self._major == 'items' or self._major == 'users'):
raise ValueError("Major must be 'users' or 'items', but got {}".format(self._major))
self._major_ind = self._i_id if self._major == 'items' else self._u_id
self._minor_ind = self._u_id if self._major == 'items' else self._i_id
self._delimiter = '\t' if 'delimiter' not in self.params else self.params['delimiter']
if user_id_map is None or item_id_map is None:
self._build_maps()
else:
self._user_id_map = user_id_map
self._item_id_map = item_id_map
major_map = self._item_id_map if self._major == 'items' else self._user_id_map
minor_map = self._user_id_map if self._major == 'items' else self._item_id_map
self._vector_dim = len(minor_map)
src_files = [path.join(self._data_dir, f)
for f in listdir(self._data_dir)
if path.isfile(path.join(self._data_dir, f)) and f.endswith(self._extension)]
self._batch_size = self.params['batch_size']
self.data = dict()
for source_file in src_files:
with open(source_file, 'r') as src:
for line in src.readlines():
parts = line.strip().split(self._delimiter)
if len(parts)<3:
raise ValueError('Encountered badly formatted line in {}'.format(source_file))
key = major_map[int(parts[self._major_ind])]
value = minor_map[int(parts[self._minor_ind])]
rating = float(parts[self._r_id])
#print("Key: {}, Value: {}, Rating: {}".format(key, value, rating))
if key not in self.data:
self.data[key] = []
self.data[key].append((value, rating))
def _build_maps(self):
self._user_id_map = dict()
self._item_id_map = dict()
src_files = [path.join(self._data_dir, f)
for f in listdir(self._data_dir)
if path.isfile(path.join(self._data_dir, f)) and f.endswith(self._extension)]
u_id = 0
i_id = 0
for source_file in src_files:
with open(source_file, 'r') as src:
for line in src.readlines():
parts = line.strip().split(self._delimiter)
if len(parts)<3:
raise ValueError('Encountered badly formatted line in {}'.format(source_file))
u_id_orig = int(parts[self._u_id])
if u_id_orig not in self._user_id_map:
self._user_id_map[u_id_orig] = u_id
u_id += 1
i_id_orig = int(parts[self._i_id])
if i_id_orig not in self._item_id_map:
self._item_id_map[i_id_orig] = i_id
i_id += 1
def iterate_one_epoch(self):
data = self.data
keys = list(data.keys())
shuffle(keys)
s_ind = 0
e_ind = self._batch_size
while e_ind < len(keys):
local_ind = 0
inds1 = []
inds2 = []
vals = []
for ind in range(s_ind, e_ind):
inds2 += [v[0] for v in data[keys[ind]]]
inds1 += [local_ind]*len([v[0] for v in data[keys[ind]]])
vals += [v[1] for v in data[keys[ind]]]
local_ind += 1
i_torch = torch.LongTensor([inds1, inds2])
v_torch = torch.FloatTensor(vals)
mini_batch = torch.sparse.FloatTensor(i_torch, v_torch, torch.Size([self._batch_size, self._vector_dim]))
s_ind += self._batch_size
e_ind += self._batch_size
yield mini_batch
def iterate_one_epoch_eval(self, for_inf=False):
keys = list(self.data.keys())
s_ind = 0
while s_ind < len(keys):
inds1 = [0] * len([v[0] for v in self.data[keys[s_ind]]])
inds2 = [v[0] for v in self.data[keys[s_ind]]]
vals = [v[1] for v in self.data[keys[s_ind]]]
src_inds1 = [0] * len([v[0] for v in self.src_data[keys[s_ind]]])
src_inds2 = [v[0] for v in self.src_data[keys[s_ind]]]
src_vals = [v[1] for v in self.src_data[keys[s_ind]]]
i_torch = torch.LongTensor([inds1, inds2])
v_torch = torch.FloatTensor(vals)
src_i_torch = torch.LongTensor([src_inds1, src_inds2])
src_v_torch = torch.FloatTensor(src_vals)
mini_batch = (torch.sparse.FloatTensor(i_torch, v_torch, torch.Size([1, self._vector_dim])),
torch.sparse.FloatTensor(src_i_torch, src_v_torch, torch.Size([1, self._vector_dim])))
s_ind += 1
if not for_inf:
yield mini_batch
else:
yield mini_batch, keys[s_ind - 1]
@property
def vector_dim(self):
return self._vector_dim
@property
def userIdMap(self):
return self._user_id_map
@property
def itemIdMap(self):
return self._item_id_map
@property
def params(self):
return self._params
| DeepRecommender-master | reco_encoder/data/input_layer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import ctypes
from functools import lru_cache
import os
from pathlib import Path
import re
import shutil
import subprocess
from subprocess import CalledProcessError
import sys
import tempfile
from typing import List, Optional, Tuple, Union
import setuptools
from setuptools.command.build_ext import build_ext
# Project directory root
root_path: Path = Path(__file__).resolve().parent
@lru_cache(maxsize=1)
def te_version() -> str:
"""Transformer Engine version string
Includes Git commit as local version, unless suppressed with
NVTE_NO_LOCAL_VERSION environment variable.
"""
with open(root_path / "VERSION", "r") as f:
version = f.readline().strip()
if not int(os.getenv("NVTE_NO_LOCAL_VERSION", "0")):
try:
output = subprocess.run(
["git", "rev-parse" , "--short", "HEAD"],
capture_output=True,
cwd=root_path,
check=True,
universal_newlines=True,
)
except (CalledProcessError, OSError):
pass
else:
commit = output.stdout.strip()
version += f"+{commit}"
return version
@lru_cache(maxsize=1)
def with_debug_build() -> bool:
"""Whether to build with a debug configuration"""
for arg in sys.argv:
if arg == "--debug":
sys.argv.remove(arg)
return True
if int(os.getenv("NVTE_BUILD_DEBUG", "0")):
return True
return False
# Call once in global scope since this function manipulates the
# command-line arguments. Future calls will use a cached value.
with_debug_build()
def found_cmake() -> bool:
""""Check if valid CMake is available
CMake 3.18 or newer is required.
"""
# Check if CMake is available
try:
_cmake_bin = cmake_bin()
except FileNotFoundError:
return False
# Query CMake for version info
output = subprocess.run(
[_cmake_bin, "--version"],
capture_output=True,
check=True,
universal_newlines=True,
)
match = re.search(r"version\s*([\d.]+)", output.stdout)
version = match.group(1).split('.')
version = tuple(int(v) for v in version)
return version >= (3, 18)
def cmake_bin() -> Path:
"""Get CMake executable
Throws FileNotFoundError if not found.
"""
# Search in CMake Python package
_cmake_bin: Optional[Path] = None
try:
import cmake
except ImportError:
pass
else:
cmake_dir = Path(cmake.__file__).resolve().parent
_cmake_bin = cmake_dir / "data" / "bin" / "cmake"
if not _cmake_bin.is_file():
_cmake_bin = None
# Search in path
if _cmake_bin is None:
_cmake_bin = shutil.which("cmake")
if _cmake_bin is not None:
_cmake_bin = Path(_cmake_bin).resolve()
# Return executable if found
if _cmake_bin is None:
raise FileNotFoundError("Could not find CMake executable")
return _cmake_bin
def found_ninja() -> bool:
""""Check if Ninja is available"""
return shutil.which("ninja") is not None
def found_pybind11() -> bool:
""""Check if pybind11 is available"""
# Check if Python package is installed
try:
import pybind11
except ImportError:
pass
else:
return True
# Check if CMake can find pybind11
if not found_cmake():
return False
try:
subprocess.run(
[
"cmake",
"--find-package",
"-DMODE=EXIST",
"-DNAME=pybind11",
"-DCOMPILER_ID=CXX",
"-DLANGUAGE=CXX",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
except (CalledProcessError, OSError):
pass
else:
return True
return False
def cuda_version() -> Tuple[int, ...]:
"""CUDA Toolkit version as a (major, minor) tuple
Throws FileNotFoundError if NVCC is not found.
"""
# Try finding NVCC
nvcc_bin: Optional[Path] = None
if nvcc_bin is None and os.getenv("CUDA_HOME"):
# Check in CUDA_HOME
cuda_home = Path(os.getenv("CUDA_HOME"))
nvcc_bin = cuda_home / "bin" / "nvcc"
if nvcc_bin is None:
# Check if nvcc is in path
nvcc_bin = shutil.which("nvcc")
if nvcc_bin is not None:
nvcc_bin = Path(nvcc_bin)
if nvcc_bin is None:
# Last-ditch guess in /usr/local/cuda
cuda_home = Path("/usr/local/cuda")
nvcc_bin = cuda_home / "bin" / "nvcc"
if not nvcc_bin.is_file():
raise FileNotFoundError(f"Could not find NVCC at {nvcc_bin}")
# Query NVCC for version info
output = subprocess.run(
[nvcc_bin, "-V"],
capture_output=True,
check=True,
universal_newlines=True,
)
match = re.search(r"release\s*([\d.]+)", output.stdout)
version = match.group(1).split('.')
return tuple(int(v) for v in version)
@lru_cache(maxsize=1)
def with_userbuffers() -> bool:
"""Check if userbuffers support is enabled"""
if int(os.getenv("NVTE_WITH_USERBUFFERS", "0")):
assert os.getenv("MPI_HOME"), \
"MPI_HOME must be set if NVTE_WITH_USERBUFFERS=1"
return True
return False
@lru_cache(maxsize=1)
def frameworks() -> List[str]:
"""DL frameworks to build support for"""
_frameworks: List[str] = []
supported_frameworks = ["pytorch", "jax", "tensorflow", "paddle"]
# Check environment variable
if os.getenv("NVTE_FRAMEWORK"):
_frameworks.extend(os.getenv("NVTE_FRAMEWORK").split(","))
# Check command-line arguments
for arg in sys.argv.copy():
if arg.startswith("--framework="):
_frameworks.extend(arg.replace("--framework=", "").split(","))
sys.argv.remove(arg)
# Detect installed frameworks if not explicitly specified
if not _frameworks:
try:
import torch
except ImportError:
pass
else:
_frameworks.append("pytorch")
try:
import jax
except ImportError:
pass
else:
_frameworks.append("jax")
try:
import tensorflow
except ImportError:
pass
else:
_frameworks.append("tensorflow")
try:
import paddle
except ImportError:
pass
else:
_frameworks.append("paddle")
# Special framework names
if "all" in _frameworks:
_frameworks = supported_frameworks.copy()
if "none" in _frameworks:
_frameworks = []
# Check that frameworks are valid
_frameworks = [framework.lower() for framework in _frameworks]
for framework in _frameworks:
if framework not in supported_frameworks:
raise ValueError(
f"Transformer Engine does not support framework={framework}"
)
return _frameworks
# Call once in global scope since this function manipulates the
# command-line arguments. Future calls will use a cached value.
frameworks()
def setup_requirements() -> Tuple[List[str], List[str], List[str]]:
"""Setup Python dependencies
Returns dependencies for build, runtime, and testing.
"""
# Common requirements
setup_reqs: List[str] = []
install_reqs: List[str] = ["pydantic"]
test_reqs: List[str] = ["pytest"]
def add_unique(l: List[str], vals: Union[str, List[str]]) -> None:
"""Add entry to list if not already included"""
if isinstance(vals, str):
vals = [vals]
for val in vals:
if val not in l:
l.append(val)
# Requirements that may be installed outside of Python
if not found_cmake():
add_unique(setup_reqs, "cmake>=3.18")
if not found_ninja():
add_unique(setup_reqs, "ninja")
# Framework-specific requirements
if "pytorch" in frameworks():
add_unique(install_reqs, ["torch", "flash-attn>=1.0.6, <=2.2.1"])
add_unique(test_reqs, ["numpy", "onnxruntime", "torchvision"])
if "jax" in frameworks():
if not found_pybind11():
add_unique(setup_reqs, "pybind11")
add_unique(install_reqs, ["jax", "flax>=0.7.1"])
add_unique(test_reqs, ["numpy", "praxis"])
if "tensorflow" in frameworks():
if not found_pybind11():
add_unique(setup_reqs, "pybind11")
add_unique(install_reqs, "tensorflow")
add_unique(test_reqs, ["keras", "tensorflow_datasets"])
if "paddle" in frameworks():
add_unique(install_reqs, "paddlepaddle-gpu")
add_unique(test_reqs, "numpy")
return setup_reqs, install_reqs, test_reqs
class CMakeExtension(setuptools.Extension):
"""CMake extension module"""
def __init__(
self,
name: str,
cmake_path: Path,
cmake_flags: Optional[List[str]] = None,
) -> None:
super().__init__(name, sources=[]) # No work for base class
self.cmake_path: Path = cmake_path
self.cmake_flags: List[str] = [] if cmake_flags is None else cmake_flags
def _build_cmake(self, build_dir: Path, install_dir: Path) -> None:
# Make sure paths are str
_cmake_bin = str(cmake_bin())
cmake_path = str(self.cmake_path)
build_dir = str(build_dir)
install_dir = str(install_dir)
# CMake configure command
build_type = "Debug" if with_debug_build() else "Release"
configure_command = [
_cmake_bin,
"-S",
cmake_path,
"-B",
build_dir,
f"-DCMAKE_BUILD_TYPE={build_type}",
f"-DCMAKE_INSTALL_PREFIX={install_dir}",
]
configure_command += self.cmake_flags
if found_ninja():
configure_command.append("-GNinja")
try:
import pybind11
except ImportError:
pass
else:
pybind11_dir = Path(pybind11.__file__).resolve().parent
pybind11_dir = pybind11_dir / "share" / "cmake" / "pybind11"
configure_command.append(f"-Dpybind11_DIR={pybind11_dir}")
# CMake build and install commands
build_command = [_cmake_bin, "--build", build_dir]
install_command = [_cmake_bin, "--install", build_dir]
# Run CMake commands
for command in [configure_command, build_command, install_command]:
print(f"Running command {' '.join(command)}")
try:
subprocess.run(command, cwd=build_dir, check=True)
except (CalledProcessError, OSError) as e:
raise RuntimeError(f"Error when running CMake: {e}")
# PyTorch extension modules require special handling
if "pytorch" in frameworks():
from torch.utils.cpp_extension import BuildExtension
elif "paddle" in frameworks():
from paddle.utils.cpp_extension import BuildExtension
else:
from setuptools.command.build_ext import build_ext as BuildExtension
class CMakeBuildExtension(BuildExtension):
"""Setuptools command with support for CMake extension modules"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def run(self) -> None:
# Build CMake extensions
for ext in self.extensions:
if isinstance(ext, CMakeExtension):
print(f"Building CMake extension {ext.name}")
with tempfile.TemporaryDirectory() as build_dir:
build_dir = Path(build_dir)
package_path = Path(self.get_ext_fullpath(ext.name))
install_dir = package_path.resolve().parent
ext._build_cmake(
build_dir=build_dir,
install_dir=install_dir,
)
# Paddle requires linker search path for libtransformer_engine.so
paddle_ext = None
if "paddle" in frameworks():
for ext in self.extensions:
if "paddle" in ext.name:
ext.library_dirs.append(self.build_lib)
paddle_ext = ext
break
# Build non-CMake extensions as usual
all_extensions = self.extensions
self.extensions = [
ext for ext in self.extensions
if not isinstance(ext, CMakeExtension)
]
super().run()
self.extensions = all_extensions
# Manually write stub file for Paddle extension
if paddle_ext is not None:
# Load libtransformer_engine.so to avoid linker errors
for path in Path(self.build_lib).iterdir():
if path.name.startswith("libtransformer_engine."):
ctypes.CDLL(str(path), mode=ctypes.RTLD_GLOBAL)
# Figure out stub file path
module_name = paddle_ext.name
assert module_name.endswith("_pd_"), \
"Expected Paddle extension module to end with '_pd_'"
stub_name = module_name[:-4] # remove '_pd_'
stub_path = os.path.join(self.build_lib, stub_name + ".py")
# Figure out library name
# Note: This library doesn't actually exist. Paddle
# internally reinserts the '_pd_' suffix.
so_path = self.get_ext_fullpath(module_name)
_, so_ext = os.path.splitext(so_path)
lib_name = stub_name + so_ext
# Write stub file
print(f"Writing Paddle stub for {lib_name} into file {stub_path}")
from paddle.utils.cpp_extension.extension_utils import custom_write_stub
custom_write_stub(lib_name, stub_path)
def setup_common_extension() -> CMakeExtension:
"""Setup CMake extension for common library
Also builds JAX, TensorFlow, and userbuffers support if needed.
"""
cmake_flags = []
if "jax" in frameworks():
cmake_flags.append("-DENABLE_JAX=ON")
if "tensorflow" in frameworks():
cmake_flags.append("-DENABLE_TENSORFLOW=ON")
if with_userbuffers():
cmake_flags.append("-DNVTE_WITH_USERBUFFERS=ON")
return CMakeExtension(
name="transformer_engine",
cmake_path=root_path / "transformer_engine",
cmake_flags=cmake_flags,
)
def _all_files_in_dir(path):
return list(path.iterdir())
def setup_pytorch_extension() -> setuptools.Extension:
"""Setup CUDA extension for PyTorch support"""
# Source files
src_dir = root_path / "transformer_engine" / "pytorch" / "csrc"
extensions_dir = src_dir / "extensions"
sources = [
src_dir / "common.cu",
src_dir / "ts_fp8_op.cpp",
] + \
_all_files_in_dir(extensions_dir)
# Header files
include_dirs = [
root_path / "transformer_engine" / "common" / "include",
root_path / "transformer_engine" / "pytorch" / "csrc",
root_path / "3rdparty" / "cudnn-frontend" / "include",
]
# Compiler flags
cxx_flags = ["-O3"]
nvcc_flags = [
"-O3",
"-gencode",
"arch=compute_70,code=sm_70",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
# Version-dependent CUDA options
try:
version = cuda_version()
except FileNotFoundError:
print("Could not determine CUDA Toolkit version")
else:
if version >= (11, 2):
nvcc_flags.extend(["--threads", "4"])
if version >= (11, 0):
nvcc_flags.extend(["-gencode", "arch=compute_80,code=sm_80"])
if version >= (11, 8):
nvcc_flags.extend(["-gencode", "arch=compute_90,code=sm_90"])
# userbuffers support
if with_userbuffers():
if os.getenv("MPI_HOME"):
mpi_home = Path(os.getenv("MPI_HOME"))
include_dirs.append(mpi_home / "include")
cxx_flags.append("-DNVTE_WITH_USERBUFFERS")
nvcc_flags.append("-DNVTE_WITH_USERBUFFERS")
# Construct PyTorch CUDA extension
sources = [str(path) for path in sources]
include_dirs = [str(path) for path in include_dirs]
from torch.utils.cpp_extension import CUDAExtension
return CUDAExtension(
name="transformer_engine_extensions",
sources=sources,
include_dirs=include_dirs,
# libraries=["transformer_engine"], ### TODO (tmoon) Debug linker errors
extra_compile_args={
"cxx": cxx_flags,
"nvcc": nvcc_flags,
},
)
def setup_paddle_extension() -> setuptools.Extension:
"""Setup CUDA extension for Paddle support"""
# Source files
src_dir = root_path / "transformer_engine" / "paddle" / "csrc"
sources = [
src_dir / "extensions.cu",
src_dir / "common.cpp",
src_dir / "custom_ops.cu",
]
# Header files
include_dirs = [
root_path / "transformer_engine" / "common" / "include",
root_path / "transformer_engine" / "paddle" / "csrc",
]
# Compiler flags
cxx_flags = ["-O3"]
nvcc_flags = [
"-O3",
"-gencode",
"arch=compute_70,code=sm_70",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
# Version-dependent CUDA options
try:
version = cuda_version()
except FileNotFoundError:
print("Could not determine CUDA Toolkit version")
else:
if version >= (11, 2):
nvcc_flags.extend(["--threads", "4"])
if version >= (11, 0):
nvcc_flags.extend(["-gencode", "arch=compute_80,code=sm_80"])
if version >= (11, 8):
nvcc_flags.extend(["-gencode", "arch=compute_90,code=sm_90"])
# Construct Paddle CUDA extension
sources = [str(path) for path in sources]
include_dirs = [str(path) for path in include_dirs]
from paddle.utils.cpp_extension import CUDAExtension
ext = CUDAExtension(
sources=sources,
include_dirs=include_dirs,
libraries=["transformer_engine"],
extra_compile_args={
"cxx": cxx_flags,
"nvcc": nvcc_flags,
},
)
ext.name = "transformer_engine_paddle_pd_"
return ext
def main():
# Submodules to install
packages = setuptools.find_packages(
include=["transformer_engine", "transformer_engine.*"],
)
# Dependencies
setup_requires, install_requires, test_requires = setup_requirements()
# Extensions
ext_modules = [setup_common_extension()]
if "pytorch" in frameworks():
ext_modules.append(setup_pytorch_extension())
if "paddle" in frameworks():
ext_modules.append(setup_paddle_extension())
# Configure package
setuptools.setup(
name="transformer_engine",
version=te_version(),
packages=packages,
description="Transformer acceleration library",
ext_modules=ext_modules,
cmdclass={"build_ext": CMakeBuildExtension},
setup_requires=setup_requires,
install_requires=install_requires,
extras_require={"test": test_requires},
license_files=("LICENSE",),
)
if __name__ == "__main__":
main()
| TransformerEngine-main | setup.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Test TE Paddle Layer-level APIs"""
import math
import os
import pytest
from utils import assert_allclose
import paddle
import transformer_engine.paddle as te
from transformer_engine.paddle.fp8 import is_fp8_available, fp8_autocast
from transformer_engine.common.recipe import DelayedScaling
is_fp8_supported, reason = is_fp8_available()
LINEAR_CASES = [(16, 16, 32), (32, 32, 64)]
NORM_CASES = [(16, 32), (256, 1024)]
@pytest.fixture(autouse=True)
def setup():
"""Setup random seed before each test"""
paddle.seed(10)
yield
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('use_fp8', [True, False])
def test_checkpoint(use_fp8):
"""Test checkpoint save / load"""
bs = 16
in_features = 16
out_features = 32
file_name = "model.pdparams"
input_tensor = paddle.uniform(shape=(bs, in_features), dtype='float32')
model = te.Linear(in_features, out_features)
model_loaded = te.Linear(in_features, out_features)
# Populate amax_history
with fp8_autocast(enabled=False, calibrating=True):
_ = model(input_tensor)
# Save model
paddle.save(model.state_dict(), file_name)
# Get ref output
with fp8_autocast(enabled=use_fp8):
out_ref = model(input_tensor)
# Load model
model_loaded.set_state_dict(paddle.load(file_name))
if os.path.exists(file_name):
os.remove(file_name)
# Get actual output
with fp8_autocast(enabled=use_fp8):
out = model_loaded(input_tensor)
assert_allclose(out, out_ref)
def calc_output_and_grad(layer, x, dy):
"""
Calculate forward and backward pass
"""
inp = paddle.to_tensor(x)
inp.stop_gradient = x.stop_gradient
y = layer(inp)
y.backward(dy)
return y, inp.grad if not inp.stop_gradient else None
@staticmethod
def calc_output_and_grad_ln_out(layer, x, dy, return_ln_out=False):
"""
Calculate forward and backward pass for layernorm
"""
inp = paddle.to_tensor(x)
inp.stop_gradient = x.stop_gradient
outputs = layer(inp)
ln_out = None
if return_ln_out:
y, ln_out = outputs
else:
y = outputs
y.backward(dy)
return y, ln_out, inp.grad if not inp.stop_gradient else None
class TestLinear:
"""
Tests for Linear layer
"""
@staticmethod
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="BF16 Linear requires Ampere+ GPU")
@pytest.mark.parametrize('bs,in_features,out_features', LINEAR_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_linear_bf16(bs, in_features, out_features, has_bias, no_dbias, no_dgrad, no_wgrad,
activation_dtype):
"""
Test BF16 Linear
"""
rtol = 5e-2
atol = 5e-2
input_tensor = paddle.uniform(shape=(bs, in_features), dtype=activation_dtype)
input_tensor.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, out_features), dtype=activation_dtype)
paddle.set_default_dtype(activation_dtype)
layer_te = te.Linear(in_features, out_features, bias_attr=None if has_bias else False)
layer_pd = te.Linear(in_features,
out_features,
bias_attr=None if has_bias else False,
backend='paddle')
layer_pd.weight.copy_(layer_te.weight.T, True)
if has_bias:
layer_pd.bias.copy_(layer_te.bias, True)
layer_te.weight.stop_gradient = no_wgrad
layer_pd.weight.stop_gradient = no_wgrad
if has_bias:
layer_te.bias.stop_gradient = no_dbias
layer_pd.bias.stop_gradient = no_dbias
out_ref, grad_input_ref = calc_output_and_grad(layer_pd, input_tensor, grad_out)
out, grad_input = calc_output_and_grad(layer_te, input_tensor, grad_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.weight.grad, layer_pd.weight.grad.T, rtol=rtol, atol=atol)
if has_bias and not no_dbias:
assert_allclose(layer_te.bias.grad, layer_pd.bias.grad, rtol=rtol, atol=atol)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('bs,in_features,out_features', LINEAR_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('fp8_wgrad', [True, False])
@pytest.mark.parametrize('do_calibration', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_linear_fp8(bs, in_features, out_features, has_bias, no_dbias, no_dgrad, no_wgrad,
fp8_wgrad, do_calibration, activation_dtype):
"""
Test FP8 Linear
"""
rtol = 0.1
atol = 0.5
input_tensor = paddle.uniform(shape=(bs, in_features), dtype=activation_dtype)
input_tensor.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, out_features), dtype=activation_dtype)
recipe = DelayedScaling(override_linear_precision=(False, False, not fp8_wgrad))
paddle.set_default_dtype(activation_dtype)
layer_te = te.Linear(
in_features=in_features,
out_features=out_features,
bias_attr=None if has_bias else False,
)
layer_pd = te.Linear(
in_features=in_features,
out_features=out_features,
bias_attr=None if has_bias else False,
backend='paddle',
)
layer_pd.weight.copy_(layer_te.weight.T, True)
if has_bias:
layer_pd.bias.copy_(layer_te.bias, True)
layer_te.weight.stop_gradient = no_wgrad
layer_pd.weight.stop_gradient = no_wgrad
if has_bias:
layer_te.bias.stop_gradient = no_dbias
layer_pd.bias.stop_gradient = no_dbias
with fp8_autocast(enabled=not do_calibration, calibrating=do_calibration,
fp8_recipe=recipe):
out_ref, grad_input_ref = calc_output_and_grad(layer_pd, input_tensor, grad_out)
out, grad_input = calc_output_and_grad(layer_te, input_tensor, grad_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.weight.grad, layer_pd.weight.grad.T, rtol=rtol, atol=atol)
if has_bias and not no_dbias:
assert_allclose(layer_te.bias.grad, layer_pd.bias.grad, rtol=rtol, atol=atol)
if do_calibration:
assert paddle.count_nonzero(layer_te.fp8_meta["scaling_fwd"].amax_history).item() > 0
@pytest.mark.parametrize('bs,hidden_size', NORM_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_layernorm_bf16(bs, hidden_size, has_bias, no_dbias, no_dgrad, no_wgrad, activation_dtype):
"""
Test BF16 LayerNorm
"""
eps = 1e-3
rtol = 1e-2
atol = 1e-2
x = paddle.uniform(shape=(bs, hidden_size), dtype=activation_dtype)
x.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, hidden_size), dtype=activation_dtype)
paddle.set_default_dtype(activation_dtype)
layer_te = te.LayerNorm(hidden_size=hidden_size, eps=eps, bias_attr=None if has_bias else False)
layer_pd = te.LayerNorm(hidden_size=hidden_size,
eps=eps,
bias_attr=None if has_bias else False,
backend='paddle')
layer_pd.weight.copy_(layer_te.weight, True)
if has_bias:
layer_pd.bias.copy_(layer_te.bias, True)
layer_te.weight.stop_gradient = no_wgrad
layer_pd.weight.stop_gradient = no_wgrad
if has_bias:
layer_te.bias.stop_gradient = no_dbias
layer_pd.bias.stop_gradient = no_dbias
out_ref, grad_input_ref = calc_output_and_grad(layer_pd, x, grad_out)
out, grad_input = calc_output_and_grad(layer_te, x, grad_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.weight.grad, layer_pd.weight.grad, rtol=rtol, atol=atol)
if has_bias and not no_dbias:
assert_allclose(layer_te.bias.grad, layer_pd.bias.grad, rtol=rtol, atol=atol)
class TestLayerNormLinear:
"""
Tests for LayerNormLinear layer
"""
@staticmethod
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="BF16 Linear requires Ampere+ GPU")
@pytest.mark.parametrize('bs,in_features,out_features', LINEAR_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('return_ln_out', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_layernorm_linear_bf16(bs, in_features, out_features, has_bias, no_dbias, no_dgrad,
no_wgrad, return_ln_out, activation_dtype):
"""
Test BF16 LayerNormLinear Layer
"""
paddle.set_default_dtype(activation_dtype)
rtol = 5e-2
atol = 5e-2
input_tensor = paddle.uniform(shape=(bs, in_features), dtype=activation_dtype)
input_tensor.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, out_features), dtype=activation_dtype)
eps = 1e-3
layer_te = te.LayerNormLinear(
in_features=in_features,
out_features=out_features,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
)
layer_pd = te.LayerNormLinear(
in_features=in_features,
out_features=out_features,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
backend='paddle',
)
layer_pd.ln_weight.copy_(layer_te.ln_weight, True)
layer_pd.ln_bias.copy_(layer_te.ln_bias, True)
layer_pd.weight.copy_(layer_te.weight.T, True)
if has_bias:
layer_pd.bias.copy_(layer_te.bias, True)
layer_te.weight.stop_gradient = no_wgrad
layer_te.ln_weight.stop_gradient = no_wgrad
layer_te.ln_bias.stop_gradient = no_dbias
layer_pd.weight.stop_gradient = no_wgrad
layer_pd.ln_weight.stop_gradient = no_wgrad
layer_pd.ln_bias.stop_gradient = no_dbias
if has_bias:
layer_te.bias.stop_gradient = no_dbias
layer_pd.bias.stop_gradient = no_dbias
out_ref, ln_out_ref, grad_input_ref = calc_output_and_grad_ln_out(
layer_pd, input_tensor, grad_out, return_ln_out=return_ln_out)
out, ln_out, grad_input = calc_output_and_grad_ln_out(layer_te,
input_tensor,
grad_out,
return_ln_out=return_ln_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.weight.grad, layer_pd.weight.grad.T, rtol=rtol, atol=atol)
assert_allclose(layer_te.ln_weight.grad, layer_pd.ln_weight.grad, rtol=rtol, atol=atol)
if not no_dbias:
assert_allclose(layer_te.ln_bias.grad, layer_pd.ln_bias.grad, rtol=rtol, atol=atol)
if has_bias:
assert_allclose(layer_te.bias.grad, layer_pd.bias.grad, rtol=rtol, atol=atol)
if return_ln_out:
assert_allclose(ln_out, ln_out_ref, rtol=rtol, atol=atol)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('bs,in_features,out_features', LINEAR_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('fp8_wgrad', [True, False])
@pytest.mark.parametrize('do_calibration', [True, False])
@pytest.mark.parametrize('return_ln_out', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_layernorm_linear_fp8(bs, in_features, out_features, has_bias, no_dbias, no_dgrad,
no_wgrad, fp8_wgrad, do_calibration, return_ln_out,
activation_dtype):
"""
Test FP8 LayerNormLinear Layer
"""
paddle.set_default_dtype(activation_dtype)
rtol = 0.1
atol = 0.75
input_tensor = paddle.uniform(shape=(bs, in_features), dtype=activation_dtype)
input_tensor.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, out_features), dtype=activation_dtype)
eps = 1e-3
recipe = DelayedScaling(override_linear_precision=(False, False, not fp8_wgrad))
layer_te = te.LayerNormLinear(
in_features=in_features,
out_features=out_features,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
)
layer_pd = te.LayerNormLinear(
in_features=in_features,
out_features=out_features,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
backend='paddle',
)
layer_pd.ln_weight.copy_(layer_te.ln_weight, True)
layer_pd.ln_bias.copy_(layer_te.ln_bias, True)
layer_pd.weight.copy_(layer_te.weight.T, True)
if has_bias:
layer_pd.bias.copy_(layer_te.bias, True)
layer_te.weight.stop_gradient = no_wgrad
layer_te.ln_weight.stop_gradient = no_wgrad
layer_te.ln_bias.stop_gradient = no_dbias
layer_pd.weight.stop_gradient = no_wgrad
layer_pd.ln_weight.stop_gradient = no_wgrad
layer_pd.ln_bias.stop_gradient = no_dbias
if has_bias:
layer_te.bias.stop_gradient = no_dbias
layer_pd.bias.stop_gradient = no_dbias
with fp8_autocast(enabled=not do_calibration, calibrating=do_calibration,
fp8_recipe=recipe):
out_ref, ln_out_ref, grad_input_ref = calc_output_and_grad_ln_out(
layer_pd, input_tensor, grad_out, return_ln_out=return_ln_out)
out, ln_out, grad_input = calc_output_and_grad_ln_out(layer_te,
input_tensor,
grad_out,
return_ln_out=return_ln_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.weight.grad, layer_pd.weight.grad.T, rtol=rtol, atol=atol)
assert_allclose(layer_te.ln_weight.grad, layer_pd.ln_weight.grad, rtol=rtol, atol=atol)
if not no_dbias:
assert_allclose(layer_te.ln_bias.grad, layer_pd.ln_bias.grad, rtol=rtol, atol=atol)
if has_bias:
assert_allclose(layer_te.bias.grad, layer_pd.bias.grad, rtol=rtol, atol=atol)
if return_ln_out:
assert_allclose(ln_out, ln_out_ref, rtol=rtol, atol=atol)
if do_calibration:
assert paddle.count_nonzero(layer_te.fp8_meta["scaling_fwd"].amax_history).item() > 0
class TestLayerNormMLP:
"""
Test LayerNormMLP Layer
"""
@staticmethod
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="BF16 Linear requires Ampere+ GPU")
@pytest.mark.parametrize('bs,hidden_size,ffn_hidden_size', LINEAR_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('return_ln_out', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_layernorm_mlp_bf16(bs, hidden_size, ffn_hidden_size, has_bias, no_dbias, no_dgrad,
no_wgrad, return_ln_out, activation_dtype):
"""
Tests for TestLayerNormMLP layer
"""
paddle.set_default_dtype(activation_dtype)
rtol = 5e-2
atol = 5e-2
input_tensor = paddle.uniform(shape=(bs, hidden_size), dtype=activation_dtype)
input_tensor.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, hidden_size), dtype=activation_dtype)
eps = 1e-3
layer_te = te.LayerNormMLP(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
)
layer_pd = te.LayerNormMLP(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
backend='paddle',
)
layer_pd.ln_weight.copy_(layer_te.ln_weight, True)
layer_pd.ln_bias.copy_(layer_te.ln_bias, True)
layer_pd.fc1_weight.copy_(layer_te.fc1_weight.T, True)
layer_pd.fc2_weight.copy_(layer_te.fc2_weight.T, True)
if has_bias:
layer_pd.fc1_bias.copy_(layer_te.fc1_bias, True)
layer_pd.fc2_bias.copy_(layer_te.fc2_bias, True)
layer_te.fc1_weight.stop_gradient = no_wgrad
layer_te.fc2_weight.stop_gradient = no_wgrad
layer_te.ln_weight.stop_gradient = no_wgrad
layer_te.ln_bias.stop_gradient = no_dbias
layer_pd.fc1_weight.stop_gradient = no_wgrad
layer_pd.fc2_weight.stop_gradient = no_wgrad
layer_pd.ln_weight.stop_gradient = no_wgrad
layer_pd.ln_bias.stop_gradient = no_dbias
if has_bias:
layer_te.fc1_bias.stop_gradient = no_dbias
layer_te.fc2_bias.stop_gradient = no_dbias
layer_pd.fc1_bias.stop_gradient = no_dbias
layer_pd.fc2_bias.stop_gradient = no_dbias
out_ref, ln_out_ref, grad_input_ref = calc_output_and_grad_ln_out(
layer_pd, input_tensor, grad_out, return_ln_out=return_ln_out)
out, ln_out, grad_input = calc_output_and_grad_ln_out(layer_te,
input_tensor,
grad_out,
return_ln_out=return_ln_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.ln_weight.grad, layer_pd.ln_weight.grad, rtol=rtol, atol=atol)
assert_allclose(layer_te.fc1_weight.grad,
layer_pd.fc1_weight.grad.T,
rtol=rtol,
atol=atol)
assert_allclose(layer_te.fc2_weight.grad,
layer_pd.fc2_weight.grad.T,
rtol=rtol,
atol=atol)
if not no_dbias:
assert_allclose(layer_te.ln_bias.grad, layer_pd.ln_bias.grad, rtol=rtol, atol=atol)
if has_bias:
assert_allclose(layer_te.fc1_bias.grad,
layer_pd.fc1_bias.grad,
rtol=rtol,
atol=atol)
assert_allclose(layer_te.fc2_bias.grad,
layer_pd.fc2_bias.grad,
rtol=rtol,
atol=atol)
if return_ln_out:
assert_allclose(ln_out, ln_out_ref, rtol=rtol, atol=atol)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('bs,hidden_size,ffn_hidden_size', LINEAR_CASES)
@pytest.mark.parametrize('has_bias,no_dbias', [[True, False], [True, True], [False, False]])
@pytest.mark.parametrize('no_dgrad', [True, False])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('fp8_wgrad', [True, False])
@pytest.mark.parametrize('do_calibration', [True, False])
@pytest.mark.parametrize('return_ln_out', [True, False])
@pytest.mark.parametrize('activation_dtype', ['bfloat16', 'float32'])
def test_layernorm_mlp_fp8(bs, hidden_size, ffn_hidden_size, has_bias, no_dbias, no_dgrad,
no_wgrad, fp8_wgrad, do_calibration, return_ln_out,
activation_dtype):
"""
Test FP8 LayerNormMLP Layer
"""
paddle.set_default_dtype(activation_dtype)
rtol = 0.1
atol = 0.75
input_tensor = paddle.uniform(shape=(bs, hidden_size), dtype=activation_dtype)
input_tensor.stop_gradient = no_dgrad
grad_out = paddle.uniform(shape=(bs, hidden_size), dtype=activation_dtype)
eps = 1e-3
recipe = DelayedScaling(override_linear_precision=(False, False, not fp8_wgrad))
layer_te = te.LayerNormMLP(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
)
layer_pd = te.LayerNormMLP(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
eps=eps,
bias_attr=None if has_bias else False,
return_layernorm_output=return_ln_out,
backend='paddle',
)
layer_pd.ln_weight.copy_(layer_te.ln_weight, True)
layer_pd.ln_bias.copy_(layer_te.ln_bias, True)
layer_pd.fc1_weight.copy_(layer_te.fc1_weight.T, True)
layer_pd.fc2_weight.copy_(layer_te.fc2_weight.T, True)
if has_bias:
layer_pd.fc1_bias.copy_(layer_te.fc1_bias, True)
layer_pd.fc2_bias.copy_(layer_te.fc2_bias, True)
layer_te.fc1_weight.stop_gradient = no_wgrad
layer_te.fc2_weight.stop_gradient = no_wgrad
layer_te.ln_weight.stop_gradient = no_wgrad
layer_te.ln_bias.stop_gradient = no_dbias
layer_pd.fc1_weight.stop_gradient = no_wgrad
layer_pd.fc2_weight.stop_gradient = no_wgrad
layer_pd.ln_weight.stop_gradient = no_wgrad
layer_pd.ln_bias.stop_gradient = no_dbias
if has_bias:
layer_te.fc1_bias.stop_gradient = no_dbias
layer_te.fc2_bias.stop_gradient = no_dbias
layer_pd.fc1_bias.stop_gradient = no_dbias
layer_pd.fc2_bias.stop_gradient = no_dbias
with fp8_autocast(enabled=not do_calibration, calibrating=do_calibration,
fp8_recipe=recipe):
out_ref, ln_out_ref, grad_input_ref = calc_output_and_grad_ln_out(
layer_pd, input_tensor, grad_out, return_ln_out=return_ln_out)
out, ln_out, grad_input = calc_output_and_grad_ln_out(layer_te,
input_tensor,
grad_out,
return_ln_out=return_ln_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
if not no_dgrad:
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
assert_allclose(layer_te.ln_weight.grad, layer_pd.ln_weight.grad, rtol=rtol, atol=atol)
assert_allclose(layer_te.fc1_weight.grad,
layer_pd.fc1_weight.grad.T,
rtol=rtol,
atol=atol)
assert_allclose(layer_te.fc2_weight.grad,
layer_pd.fc2_weight.grad.T,
rtol=rtol,
atol=atol)
if not no_dbias:
assert_allclose(layer_te.ln_bias.grad, layer_pd.ln_bias.grad, rtol=rtol, atol=atol)
if has_bias:
assert_allclose(layer_te.fc1_bias.grad,
layer_pd.fc1_bias.grad,
rtol=rtol,
atol=atol)
assert_allclose(layer_te.fc2_bias.grad,
layer_pd.fc2_bias.grad,
rtol=rtol,
atol=atol)
if return_ln_out:
assert_allclose(ln_out, ln_out_ref, rtol=rtol, atol=atol)
if do_calibration:
assert paddle.count_nonzero(layer_te.fp8_meta["scaling_fwd"].amax_history).item() > 0
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="cuDNN fMHA requires Ampere+ GPU")
@pytest.mark.parametrize('bs', [1, 2, 8])
@pytest.mark.parametrize('hidden_size, num_heads', [[1024, 16], [768, 12]])
@pytest.mark.parametrize('q_seqlen, kv_seqlen', [[128, 128], [512, 512]])
@pytest.mark.parametrize('attn_type', ['self', 'cross'])
@pytest.mark.parametrize('mask_type', ['causal', 'padding'])
@pytest.mark.parametrize('math_dtype', ['bfloat16', 'float16'])
def test_dot_product_attention(bs, hidden_size, num_heads, q_seqlen, kv_seqlen, attn_type,
mask_type, math_dtype):
"""
Test DotProductAttention Layer
"""
paddle.set_default_dtype(math_dtype)
rtol = 1e-4
atol = 2e-2
head_size = hidden_size // num_heads
self_attn_qkv_input = paddle.normal(mean=0.0,
std=0.02,
shape=(bs, q_seqlen, 3, num_heads,
head_size)).astype(math_dtype)
cross_attn_q_input = paddle.normal(mean=0.0,
std=0.02,
shape=(bs, q_seqlen, num_heads,
head_size)).astype(math_dtype)
cross_attn_kv_input = paddle.normal(mean=0.0,
std=0.02,
shape=(bs, kv_seqlen, 2, num_heads,
head_size)).astype(math_dtype)
q_actual_seqlen = paddle.randint(low=20, high=q_seqlen, shape=(bs,), dtype='int32')
kv_actual_seqlen = paddle.randint(low=20, high=kv_seqlen, shape=(bs,),
dtype='int32') if attn_type == 'cross' else q_actual_seqlen
attn_mask = paddle.ones(shape=(bs, 1, q_seqlen, kv_seqlen), dtype='bool')
grad_out = paddle.normal(mean=0.0, std=0.02,
shape=(bs, q_seqlen, num_heads, head_size)).astype('float32')
for i in range(0, bs):
grad_out[i, q_actual_seqlen[i]:, :, :] = 0
grad_out = grad_out.astype(math_dtype)
for i in range(0, bs):
attn_mask[i, 0, 0:q_actual_seqlen[i], 0:kv_actual_seqlen[i]] = False
norm_factor = math.sqrt(hidden_size // num_heads)
layer_te = te.DotProductAttention(norm_factor,
attention_dropout=0.0,
attn_mask_type=mask_type,
attention_type=attn_type,
backend='transformer_engine')
layer_pd = te.DotProductAttention(norm_factor,
attention_dropout=0.0,
attn_mask_type=mask_type,
attention_type=attn_type,
backend='paddle')
def calc_attn_output_and_grad(layer, q, kv, mask, dout):
_q = paddle.to_tensor(q, stop_gradient=False)
_kv = paddle.to_tensor(kv, stop_gradient=False) if kv is not None else None
out = layer(_q, _kv, mask)
out.backward(dout)
return out, _q.grad, _kv.grad if _kv is not None else None
if attn_type == 'self':
out, qkv_grad, _ = calc_attn_output_and_grad(layer_te, self_attn_qkv_input, None, attn_mask,
grad_out)
out_ref, qkv_grad_ref, _ = calc_attn_output_and_grad(layer_pd, self_attn_qkv_input, None,
attn_mask, grad_out)
valid_out_ref = paddle.full_like(out_ref, 0)
for i in range(0, bs):
valid_out_ref[i, 0:q_actual_seqlen[i], :, :] = out_ref[i, 0:q_actual_seqlen[i], :, :]
q_grad = qkv_grad[:, :, 0]
k_grad = qkv_grad[:, :, 1]
v_grad = qkv_grad[:, :, 2]
q_grad_ref = qkv_grad_ref[:, :, 0]
k_grad_ref = qkv_grad_ref[:, :, 1]
v_grad_ref = qkv_grad_ref[:, :, 2]
else:
out, q_grad, kv_grad = calc_attn_output_and_grad(layer_te, cross_attn_q_input,
cross_attn_kv_input, attn_mask, grad_out)
out_ref, q_grad_ref, kv_grad_ref = calc_attn_output_and_grad(layer_pd, cross_attn_q_input,
cross_attn_kv_input, attn_mask,
grad_out)
valid_out_ref = paddle.full_like(out_ref, 0)
for i in range(0, bs):
valid_out_ref[i, 0:q_actual_seqlen[i], :, :] = out_ref[i, 0:q_actual_seqlen[i], :, :]
k_grad = kv_grad[:, :, 0]
v_grad = kv_grad[:, :, 1]
k_grad_ref = kv_grad_ref[:, :, 0]
v_grad_ref = kv_grad_ref[:, :, 1]
valid_q_grad_ref = paddle.full_like(q_grad_ref, 0)
valid_k_grad_ref = paddle.full_like(k_grad_ref, 0)
valid_v_grad_ref = paddle.full_like(v_grad_ref, 0)
for i in range(0, bs):
valid_q_grad_ref[i, 0:q_actual_seqlen[i], :, :] = q_grad_ref[i, 0:q_actual_seqlen[i], :, :]
valid_k_grad_ref[i, 0:kv_actual_seqlen[i], :, :] = k_grad_ref[i,
0:kv_actual_seqlen[i], :, :]
valid_v_grad_ref[i, 0:kv_actual_seqlen[i], :, :] = v_grad_ref[i,
0:kv_actual_seqlen[i], :, :]
assert_allclose(out, valid_out_ref, rtol=rtol, atol=atol)
assert_allclose(q_grad, valid_q_grad_ref, rtol=rtol, atol=atol)
assert_allclose(k_grad, valid_k_grad_ref, rtol=rtol, atol=atol)
assert_allclose(v_grad, valid_v_grad_ref, rtol=rtol, atol=atol)
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="cuDNN fMHA requires Ampere+ GPU")
@pytest.mark.parametrize('bs', [1, 2, 8])
@pytest.mark.parametrize('hidden_size, num_heads, ffn_hidden_size', [[1024, 16, 4096]])
@pytest.mark.parametrize('q_seqlen, kv_seqlen', [[128, 128], [512, 512]])
@pytest.mark.parametrize('has_bias, no_dbias', [[False, True], [True, True], [True, False]])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('mask_type', ['causal', 'padding'])
@pytest.mark.parametrize('math_dtype', ['bfloat16', 'float16'])
@pytest.mark.parametrize('output_layernorm', [True, False])
@pytest.mark.parametrize('return_layernorm_output', [True, False])
def test_transformer_encoder_layer(bs, hidden_size, num_heads, ffn_hidden_size, has_bias, no_dbias,
no_wgrad, q_seqlen, kv_seqlen, mask_type, math_dtype,
output_layernorm, return_layernorm_output):
"""
Test Transformer Encoder Layer
"""
paddle.set_default_dtype(math_dtype)
rtol = 5e-2
atol = 5e-2
eps = 1e-3
encoder_input = paddle.uniform(shape=(bs, q_seqlen, hidden_size), dtype=math_dtype)
q_actual_seqlen = paddle.ones(shape=(bs,), dtype='int32') * q_seqlen
kv_actual_seqlen = q_actual_seqlen
attn_mask = paddle.ones(shape=(bs, 1, q_seqlen, kv_seqlen), dtype='bool')
grad_out = paddle.normal(mean=0.0, std=0.02,
shape=(bs, q_seqlen, hidden_size)).astype('float32')
for i in range(0, bs):
grad_out[i, q_actual_seqlen[i]:, :] = 0
grad_out = grad_out.astype(math_dtype)
for i in range(0, bs):
attn_mask[i, 0, 0:q_actual_seqlen[i], 0:kv_actual_seqlen[i]] = False
layer_te = te.TransformerLayer(hidden_size,
ffn_hidden_size,
num_heads,
layernorm_epsilon=eps,
hidden_dropout=0.0,
attention_dropout=0.0,
weight_attr=None,
bias_attr=None if has_bias else False,
self_attn_mask_type=mask_type,
apply_residual_connection_post_layernorm=return_layernorm_output,
output_layernorm=output_layernorm,
layer_type='encoder',
backend='transformer_engine')
layer_pd = te.TransformerLayer(hidden_size,
ffn_hidden_size,
num_heads,
layernorm_epsilon=eps,
hidden_dropout=0.0,
attention_dropout=0.0,
weight_attr=None,
bias_attr=None if has_bias else False,
self_attn_mask_type=mask_type,
apply_residual_connection_post_layernorm=return_layernorm_output,
output_layernorm=output_layernorm,
layer_type='encoder',
backend='paddle')
# MultiHeadAttention params
if output_layernorm:
layer_pd.self_attention.qkv.weight.copy_(layer_te.self_attention.qkv.weight.T, True)
layer_pd.self_attention.qkv.weight.stop_gradient = no_wgrad
layer_te.self_attention.qkv.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.self_attention.qkv.bias.copy_(layer_te.self_attention.qkv.bias, True)
layer_pd.self_attention.qkv.bias.stop_gradient = no_dbias
layer_te.self_attention.qkv.bias.stop_gradient = no_dbias
else:
layer_pd.self_attention.layernorm_qkv.ln_weight.copy_(
layer_te.self_attention.layernorm_qkv.ln_weight, True)
layer_pd.self_attention.layernorm_qkv.ln_bias.copy_(
layer_te.self_attention.layernorm_qkv.ln_bias, True)
layer_pd.self_attention.layernorm_qkv.weight.copy_(
layer_te.self_attention.layernorm_qkv.weight.T, True)
layer_pd.self_attention.layernorm_qkv.ln_weight.stop_gradient = no_wgrad
layer_pd.self_attention.layernorm_qkv.ln_bias.stop_gradient = no_dbias
layer_pd.self_attention.layernorm_qkv.weight.stop_gradient = no_wgrad
layer_te.self_attention.layernorm_qkv.ln_weight.stop_gradient = no_wgrad
layer_te.self_attention.layernorm_qkv.ln_bias.stop_gradient = no_dbias
layer_te.self_attention.layernorm_qkv.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.self_attention.layernorm_qkv.bias.copy_(
layer_te.self_attention.layernorm_qkv.bias, True)
layer_pd.self_attention.layernorm_qkv.bias.stop_gradient = no_dbias
layer_te.self_attention.layernorm_qkv.bias.stop_gradient = no_dbias
layer_pd.self_attention.proj.weight.copy_(layer_te.self_attention.proj.weight.T, True)
layer_pd.self_attention.proj.weight.stop_gradient = no_wgrad
layer_te.self_attention.proj.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.self_attention.proj.bias.copy_(layer_te.self_attention.proj.bias, True)
layer_pd.self_attention.proj.bias.stop_gradient = no_dbias
layer_te.self_attention.proj.bias.stop_gradient = no_dbias
# LayerNorm MLP params
layer_pd.layernorm_mlp.ln_weight.copy_(layer_te.layernorm_mlp.ln_weight, True)
layer_pd.layernorm_mlp.ln_bias.copy_(layer_te.layernorm_mlp.ln_bias, True)
layer_pd.layernorm_mlp.fc1_weight.copy_(layer_te.layernorm_mlp.fc1_weight.T, True)
layer_pd.layernorm_mlp.fc2_weight.copy_(layer_te.layernorm_mlp.fc2_weight.T, True)
layer_pd.layernorm_mlp.ln_weight.stop_gradient = no_wgrad
layer_pd.layernorm_mlp.ln_bias.stop_gradient = no_dbias
layer_pd.layernorm_mlp.fc1_weight.stop_gradient = no_wgrad
layer_pd.layernorm_mlp.fc2_weight.stop_gradient = no_wgrad
layer_te.layernorm_mlp.ln_weight.stop_gradient = no_wgrad
layer_te.layernorm_mlp.ln_bias.stop_gradient = no_dbias
layer_te.layernorm_mlp.fc1_weight.stop_gradient = no_wgrad
layer_te.layernorm_mlp.fc2_weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.layernorm_mlp.fc1_bias.copy_(layer_te.layernorm_mlp.fc1_bias, True)
layer_pd.layernorm_mlp.fc2_bias.copy_(layer_te.layernorm_mlp.fc2_bias, True)
layer_pd.layernorm_mlp.fc1_bias.stop_gradient = no_dbias
layer_pd.layernorm_mlp.fc2_bias.stop_gradient = no_dbias
layer_te.layernorm_mlp.fc1_bias.stop_gradient = no_dbias
layer_te.layernorm_mlp.fc2_bias.stop_gradient = no_dbias
if output_layernorm:
layer_pd.layernorm.weight.copy_(layer_te.layernorm.weight, True)
layer_pd.layernorm.bias.copy_(layer_te.layernorm.bias, True)
layer_pd.layernorm.weight.stop_gradient = no_wgrad
layer_pd.layernorm.bias.stop_gradient = no_dbias
layer_te.layernorm.weight.stop_gradient = no_wgrad
layer_te.layernorm.bias.stop_gradient = no_dbias
def calc_transformer_output_and_grad(layer, encoder_input, mask, dout):
_encoder_input = paddle.to_tensor(encoder_input, stop_gradient=False)
out = layer(_encoder_input, mask)
out.backward(dout)
return out, _encoder_input.grad
out_ref, grad_input_ref = calc_transformer_output_and_grad(layer_pd, encoder_input, attn_mask,
grad_out)
out, grad_input = calc_transformer_output_and_grad(layer_te, encoder_input, attn_mask, grad_out)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
assert_allclose(grad_input, grad_input_ref, rtol=rtol, atol=atol)
if not no_wgrad:
if output_layernorm:
assert_allclose(layer_te.self_attention.qkv.weight.grad,
layer_pd.self_attention.qkv.weight.grad.T,
rtol=rtol,
atol=atol)
else:
assert_allclose(layer_te.self_attention.layernorm_qkv.weight.grad,
layer_pd.self_attention.layernorm_qkv.weight.grad.T,
rtol=rtol,
atol=atol)
if not no_dbias:
if output_layernorm:
assert_allclose(layer_te.self_attention.qkv.bias.grad,
layer_pd.self_attention.qkv.bias.grad,
rtol=0.01,
atol=0.5)
else:
assert_allclose(layer_te.self_attention.layernorm_qkv.bias.grad,
layer_pd.self_attention.layernorm_qkv.bias.grad,
rtol=0.01,
atol=0.5)
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="cuDNN fMHA requires Ampere+ GPU")
@pytest.mark.parametrize('bs', [1, 2, 8])
@pytest.mark.parametrize('hidden_size, num_heads, ffn_hidden_size', [[1024, 16, 4096]])
@pytest.mark.parametrize('q_seqlen, kv_seqlen', [[128, 128], [512, 512]])
@pytest.mark.parametrize('has_bias, no_dbias', [[False, True], [True, True], [True, False]])
@pytest.mark.parametrize('no_wgrad', [True, False])
@pytest.mark.parametrize('mask_type', ['causal', 'padding'])
@pytest.mark.parametrize('math_dtype', ['bfloat16', 'float16'])
@pytest.mark.parametrize('output_layernorm', [True, False])
@pytest.mark.parametrize('return_layernorm_output', [True, False])
@pytest.mark.parametrize('recompute_core_attention', [True, False])
def test_transformer_decoder_layer(bs, hidden_size, num_heads, ffn_hidden_size, has_bias, no_dbias,
no_wgrad, q_seqlen, kv_seqlen, mask_type, math_dtype,
output_layernorm, return_layernorm_output,
recompute_core_attention):
"""
Test Transformer Decoder Layer
"""
paddle.set_default_dtype(math_dtype)
rtol = 5e-2
atol = 6e-2
eps = 1e-3
encoder_input = paddle.uniform(shape=(bs, q_seqlen, hidden_size), dtype=math_dtype)
encoder_output = paddle.uniform(shape=(bs, kv_seqlen, hidden_size), dtype=math_dtype)
q_actual_seqlen = paddle.ones(shape=(bs,), dtype='int32') * q_seqlen
kv_actual_seqlen = q_actual_seqlen
attn_mask = paddle.ones(shape=(bs, 1, q_seqlen, kv_seqlen), dtype='bool')
grad_out = paddle.normal(mean=0.0, std=0.2, shape=(bs, q_seqlen, hidden_size)).astype('float32')
for i in range(0, bs):
grad_out[i, q_actual_seqlen[i]:, :] = 0
grad_out = grad_out.astype(math_dtype)
for i in range(0, bs):
attn_mask[i, 0, 0:q_actual_seqlen[i], 0:kv_actual_seqlen[i]] = False
layer_te = te.TransformerLayer(hidden_size,
ffn_hidden_size,
num_heads,
layernorm_epsilon=eps,
hidden_dropout=0.0,
attention_dropout=0.0,
weight_attr=None,
bias_attr=None if has_bias else False,
self_attn_mask_type=mask_type,
apply_residual_connection_post_layernorm=return_layernorm_output,
output_layernorm=output_layernorm,
layer_type='decoder',
backend='transformer_engine')
layer_pd = te.TransformerLayer(hidden_size,
ffn_hidden_size,
num_heads,
layernorm_epsilon=eps,
hidden_dropout=0.0,
attention_dropout=0.0,
weight_attr=None,
bias_attr=None if has_bias else False,
self_attn_mask_type=mask_type,
apply_residual_connection_post_layernorm=return_layernorm_output,
output_layernorm=output_layernorm,
layer_type='decoder',
backend='paddle')
# MultiHeadAttention params - self attn
if output_layernorm:
layer_pd.self_attention.qkv.weight.copy_(layer_te.self_attention.qkv.weight.T, True)
layer_pd.self_attention.qkv.weight.stop_gradient = no_wgrad
layer_te.self_attention.qkv.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.self_attention.qkv.bias.copy_(layer_te.self_attention.qkv.bias, True)
layer_pd.self_attention.qkv.bias.stop_gradient = no_dbias
layer_te.self_attention.qkv.bias.stop_gradient = no_dbias
else:
layer_pd.self_attention.layernorm_qkv.ln_weight.copy_(
layer_te.self_attention.layernorm_qkv.ln_weight, True)
layer_pd.self_attention.layernorm_qkv.ln_bias.copy_(
layer_te.self_attention.layernorm_qkv.ln_bias, True)
layer_pd.self_attention.layernorm_qkv.weight.copy_(
layer_te.self_attention.layernorm_qkv.weight.T, True)
layer_pd.self_attention.layernorm_qkv.ln_weight.stop_gradient = no_wgrad
layer_pd.self_attention.layernorm_qkv.ln_bias.stop_gradient = no_dbias
layer_pd.self_attention.layernorm_qkv.weight.stop_gradient = no_wgrad
layer_te.self_attention.layernorm_qkv.ln_weight.stop_gradient = no_wgrad
layer_te.self_attention.layernorm_qkv.ln_bias.stop_gradient = no_dbias
layer_te.self_attention.layernorm_qkv.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.self_attention.layernorm_qkv.bias.copy_(
layer_te.self_attention.layernorm_qkv.bias, True)
layer_pd.self_attention.layernorm_qkv.bias.stop_gradient = no_dbias
layer_te.self_attention.layernorm_qkv.bias.stop_gradient = no_dbias
layer_pd.self_attention.proj.weight.copy_(layer_te.self_attention.proj.weight.T, True)
layer_pd.self_attention.proj.weight.stop_gradient = no_wgrad
layer_te.self_attention.proj.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.self_attention.proj.bias.copy_(layer_te.self_attention.proj.bias, True)
layer_pd.self_attention.proj.bias.stop_gradient = no_dbias
layer_te.self_attention.proj.bias.stop_gradient = no_dbias
# MultiHeadAttention params - cross attn
layer_pd.inter_attention.layernorm_query.ln_weight.copy_(
layer_te.inter_attention.layernorm_query.ln_weight, True)
layer_pd.inter_attention.layernorm_query.ln_bias.copy_(
layer_te.inter_attention.layernorm_query.ln_bias, True)
layer_pd.inter_attention.layernorm_query.weight.copy_(
layer_te.inter_attention.layernorm_query.weight.T, True)
layer_pd.inter_attention.layernorm_query.ln_weight.stop_gradient = no_wgrad
layer_pd.inter_attention.layernorm_query.ln_bias.stop_gradient = no_dbias
layer_pd.inter_attention.layernorm_query.weight.stop_gradient = no_wgrad
layer_te.inter_attention.layernorm_query.ln_weight.stop_gradient = no_wgrad
layer_te.inter_attention.layernorm_query.ln_bias.stop_gradient = no_dbias
layer_te.inter_attention.layernorm_query.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.inter_attention.layernorm_query.bias.copy_(
layer_te.inter_attention.layernorm_query.bias, True)
layer_pd.inter_attention.layernorm_query.bias.stop_gradient = no_dbias
layer_te.inter_attention.layernorm_query.bias.stop_gradient = no_dbias
layer_pd.inter_attention.key_value.weight.copy_(layer_te.inter_attention.key_value.weight.T,
True)
layer_pd.inter_attention.key_value.weight.stop_gradient = no_wgrad
layer_te.inter_attention.key_value.weight.stop_gradient = no_wgrad
layer_pd.inter_attention.proj.weight.copy_(layer_te.inter_attention.proj.weight.T, True)
layer_pd.inter_attention.proj.weight.stop_gradient = no_wgrad
layer_te.inter_attention.proj.weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.inter_attention.key_value.bias.copy_(layer_te.inter_attention.key_value.bias, True)
layer_pd.inter_attention.key_value.bias.stop_gradient = no_dbias
layer_te.inter_attention.key_value.bias.stop_gradient = no_dbias
layer_pd.inter_attention.proj.bias.copy_(layer_te.inter_attention.proj.bias, True)
layer_pd.inter_attention.proj.bias.stop_gradient = no_dbias
layer_te.inter_attention.proj.bias.stop_gradient = no_dbias
# LayerNorm MLP params
layer_pd.layernorm_mlp.ln_weight.copy_(layer_te.layernorm_mlp.ln_weight, True)
layer_pd.layernorm_mlp.ln_bias.copy_(layer_te.layernorm_mlp.ln_bias, True)
layer_pd.layernorm_mlp.fc1_weight.copy_(layer_te.layernorm_mlp.fc1_weight.T, True)
layer_pd.layernorm_mlp.fc2_weight.copy_(layer_te.layernorm_mlp.fc2_weight.T, True)
layer_pd.layernorm_mlp.ln_weight.stop_gradient = no_wgrad
layer_pd.layernorm_mlp.ln_bias.stop_gradient = no_dbias
layer_pd.layernorm_mlp.fc1_weight.stop_gradient = no_wgrad
layer_pd.layernorm_mlp.fc2_weight.stop_gradient = no_wgrad
layer_te.layernorm_mlp.ln_weight.stop_gradient = no_wgrad
layer_te.layernorm_mlp.ln_bias.stop_gradient = no_dbias
layer_te.layernorm_mlp.fc1_weight.stop_gradient = no_wgrad
layer_te.layernorm_mlp.fc2_weight.stop_gradient = no_wgrad
if has_bias:
layer_pd.layernorm_mlp.fc1_bias.copy_(layer_te.layernorm_mlp.fc1_bias, True)
layer_pd.layernorm_mlp.fc2_bias.copy_(layer_te.layernorm_mlp.fc2_bias, True)
layer_pd.layernorm_mlp.fc1_bias.stop_gradient = no_dbias
layer_pd.layernorm_mlp.fc2_bias.stop_gradient = no_dbias
layer_te.layernorm_mlp.fc1_bias.stop_gradient = no_dbias
layer_te.layernorm_mlp.fc2_bias.stop_gradient = no_dbias
if output_layernorm:
layer_pd.layernorm.weight.copy_(layer_te.layernorm.weight, True)
layer_pd.layernorm.bias.copy_(layer_te.layernorm.bias, True)
layer_pd.layernorm.weight.stop_gradient = no_wgrad
layer_pd.layernorm.bias.stop_gradient = no_dbias
layer_te.layernorm.weight.stop_gradient = no_wgrad
layer_te.layernorm.bias.stop_gradient = no_dbias
def calc_transformer_output_and_grad(layer,
encoder_input,
mask,
encoder_output,
enc_dec_attn_mask,
dout,
recompute_core_attention=False):
_encoder_input = paddle.to_tensor(encoder_input, stop_gradient=False)
_encoder_output = paddle.to_tensor(encoder_output, stop_gradient=False)
out = layer(_encoder_input,
mask,
_encoder_output,
enc_dec_attn_mask,
recompute_core_attention=recompute_core_attention)
out.backward(dout)
return out, _encoder_input.grad, _encoder_output.grad
out_ref, grad_encoder_input_ref, grad_encoder_output_ref = calc_transformer_output_and_grad(
layer_pd, encoder_input, attn_mask, encoder_output, attn_mask, grad_out)
out, grad_encoder_input, grad_encoder_output = calc_transformer_output_and_grad(
layer_te,
encoder_input,
attn_mask,
encoder_output,
attn_mask,
grad_out,
recompute_core_attention=recompute_core_attention)
assert_allclose(out, out_ref, rtol=rtol, atol=atol)
assert_allclose(grad_encoder_input, grad_encoder_input_ref, rtol=rtol, atol=atol)
assert_allclose(grad_encoder_output, grad_encoder_output_ref, rtol=rtol, atol=atol)
if not no_wgrad:
if output_layernorm:
assert_allclose(layer_te.self_attention.qkv.weight.grad,
layer_pd.self_attention.qkv.weight.grad.T,
rtol=rtol,
atol=atol)
else:
assert_allclose(layer_te.self_attention.layernorm_qkv.weight.grad,
layer_pd.self_attention.layernorm_qkv.weight.grad.T,
rtol=rtol,
atol=0.1)
assert_allclose(layer_te.inter_attention.layernorm_query.weight.grad,
layer_pd.inter_attention.layernorm_query.weight.grad.T,
rtol=rtol,
atol=atol)
if not no_dbias:
if output_layernorm:
assert_allclose(layer_te.self_attention.qkv.bias.grad,
layer_pd.self_attention.qkv.bias.grad,
rtol=0.01,
atol=0.6)
else:
assert_allclose(layer_te.self_attention.layernorm_qkv.bias.grad,
layer_pd.self_attention.layernorm_qkv.bias.grad,
rtol=0.01,
atol=0.5)
assert_allclose(layer_te.inter_attention.layernorm_query.bias.grad,
layer_pd.inter_attention.layernorm_query.bias.grad,
rtol=rtol,
atol=atol)
| TransformerEngine-main | tests/paddle/test_layers.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Test TE operators"""
import struct
import numpy as np
import pytest
import paddle
import paddle.nn.functional as F
from utils import assert_allclose, create_fp8_meta
import transformer_engine # pylint: disable=unused-import
import transformer_engine_paddle as tex # pylint: disable=wrong-import-order
from transformer_engine.paddle.cpp_extensions import (
cast_to_fp8,
cast_from_fp8,
gemm,
fp8_gemm,
transpose,
cast_transpose,
cast_transpose_bgrad,
te_gelu,
gelu_fp8,
dgelu_cast_transpose_bgrad_fp8,
layernorm_fwd_fp8,
layernorm_fwd,
layernorm_bwd,
rmsnorm_fwd_fp8,
rmsnorm_fwd,
rmsnorm_bwd,
fused_attn_fwd_qkvpacked,
fused_attn_bwd_qkvpacked,
fused_attn_fwd_kvpacked,
fused_attn_bwd_kvpacked,
scaled_softmax_forward,
scaled_softmax_backward,
scaled_masked_softmax_forward,
scaled_masked_softmax_backward,
scaled_upper_triang_masked_softmax_forward,
scaled_upper_triang_masked_softmax_backward,
)
from transformer_engine.paddle.fp8 import is_fp8_available
from transformer_engine.paddle.constants import FP8FwdTensors
from transformer_engine.common.recipe import DelayedScaling
GEMM_CASES = [(256, 256, 512), (32, 32, 32), (16384, 1024, 2816), (16384, 2816, 1024),
(16384, 1024, 1024)]
is_fp8_supported, reason = is_fp8_available()
SELF_ATTN_CASES = [(32, 512, 16, 64), (32, 128, 16, 64)]
CROSS_ATTN_CASES = [(32, 128, 512, 16, 64)]
FLASH_ATTN_CASES = [(4, 1024, 16, 64), (2, 2048, 16, 128)]
ATTN_DTYPES = [tex.DType.kFloat16, tex.DType.kBFloat16]
@pytest.fixture(autouse=True)
def setup():
"""Setup random seed before each test"""
np.random.seed(10)
paddle.seed(11)
yield
def test_quantize_dequantize():
"""
Test cast_to_fp8 and cast_from_fp8
"""
a = paddle.rand(shape=(32, 32), dtype='float32')
# Init fp8_meta
fp8_meta = create_fp8_meta()
for fp8_dtype in [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2]:
a_fp8 = cast_to_fp8(a, fp8_meta, FP8FwdTensors.GEMM1_OUTPUT, otype=fp8_dtype)
b = cast_from_fp8(a_fp8,
fp8_meta,
FP8FwdTensors.GEMM1_OUTPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
assert_allclose(a, b, rtol=5e-2, atol=5e-2)
def copy_bits_from_float_to_uint16(f):
"""
Copy bits
"""
return struct.unpack('<I', struct.pack('<f', f))[0] >> 16
def convert_float_to_uint16(float_list):
"""
convert float to uint16
"""
new_output = []
for x in np.nditer(float_list):
new_output.append(np.uint16(copy_bits_from_float_to_uint16(x)))
new_output = np.reshape(new_output, float_list.shape).view(np.uint16)
return new_output
class TestTranspose:
"""
Test transpose operators
"""
@staticmethod
def test_transpose_bf16():
"""
Test BF16 transpose
"""
a = paddle.rand(shape=(16, 32), dtype='bfloat16')
a_transposed = transpose(a, otype=tex.DType.kBFloat16)
assert_allclose(a_transposed, a.T)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('fp8_dtype', [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2])
def test_transpose_fp8(fp8_dtype):
"""
Test FP8 transpose
"""
min_val = -8
max_val = 8
a = paddle.cast(paddle.randint(min_val, max_val, shape=(16, 32)), 'float32')
fp8_meta = create_fp8_meta()
a_fp8 = cast_to_fp8(a, fp8_meta, FP8FwdTensors.GEMM1_INPUT, otype=fp8_dtype)
a_fp8_transposed = transpose(a_fp8, otype=fp8_dtype)
a_transposed = cast_from_fp8(a_fp8_transposed,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
assert_allclose(a_transposed, a.T)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('fp8_dtype', [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2])
def test_cast_transpose(fp8_dtype):
"""
Test cast_transpose
"""
min_val = -8
max_val = 8
a = paddle.cast(paddle.randint(min_val, max_val, shape=(16, 32)), 'float32')
fp8_meta = create_fp8_meta()
a_fp8_casted, a_fp8_transposed = cast_transpose(a,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
otype=fp8_dtype)
a_transposed = cast_from_fp8(a_fp8_transposed,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
a_casted = cast_from_fp8(a_fp8_casted,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
assert_allclose(a_casted, a)
assert_allclose(a_transposed, a.T)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('fp8_dtype', [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2])
def test_cast_transpose_bgrad(fp8_dtype):
"""
Test cast_transpose_bgrad
"""
min_val = -8
max_val = 8
a = paddle.cast(paddle.randint(min_val, max_val, shape=(16, 32)), 'float32')
fp8_meta = create_fp8_meta()
bgrad, a_fp8_casted, a_fp8_transposed = cast_transpose_bgrad(a,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
otype=fp8_dtype)
a_transposed = cast_from_fp8(a_fp8_transposed,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
a_casted = cast_from_fp8(a_fp8_casted,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
assert_allclose(a_casted, a)
assert_allclose(a_transposed, a.T)
assert_allclose(bgrad, a.sum(axis=0))
class TestActivation:
"""
Test activation operators
"""
@staticmethod
def test_gelu_bf16():
"""
Test BF16 GELU Forward
"""
a = paddle.rand(shape=(16, 32), dtype='bfloat16') * 2 - 1
gelu_out = te_gelu(a, otype=tex.DType.kBFloat16)
gelu_ref = paddle.nn.GELU()(a)
assert_allclose(gelu_out, gelu_ref, rtol=1e-2)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('fp8_dtype', [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2])
def test_gelu_fp8(fp8_dtype):
"""
Test FP8 GELU Forward
"""
a = paddle.rand(shape=(16, 32), dtype='float32') * 2 - 1
fp8_meta = create_fp8_meta()
gelu_out_fp8 = gelu_fp8(a, fp8_meta, FP8FwdTensors.GEMM1_INPUT, otype=fp8_dtype)
gelu_out = cast_from_fp8(gelu_out_fp8,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
gelu_ref = paddle.nn.GELU()(a)
assert_allclose(gelu_out, gelu_ref, rtol=0.1, atol=0.01)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('fp8_dtype', [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2])
def test_gelu_bwd_fp8(fp8_dtype):
"""
Test FP8 GELU Backward
"""
# y = GELU(x), calculate ref
x = paddle.rand(shape=(16, 32), dtype='float32') * 2 - 1
x.stop_gradient = False
y = paddle.nn.GELU()(x)
y_grad = paddle.rand(shape=(16, 32), dtype='float32') * 2 - 1
paddle.autograd.backward([y], [y_grad], True)
# calculate fp8
fp8_meta = create_fp8_meta()
x_grad_fp8, x_grad_t_fp8, dbias = dgelu_cast_transpose_bgrad_fp8(y_grad,
x,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
otype=fp8_dtype)
x_grad = cast_from_fp8(x_grad_fp8,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
x_grad_t = cast_from_fp8(x_grad_t_fp8,
fp8_meta,
FP8FwdTensors.GEMM1_INPUT,
itype=fp8_dtype,
otype=tex.DType.kFloat32)
assert_allclose(x_grad, x.grad, rtol=0.1, atol=0.01)
assert_allclose(x_grad_t, x.grad.T, rtol=0.1, atol=0.01)
assert_allclose(dbias, x.grad.sum(axis=0), rtol=0.1, atol=0.01)
class TestGemm:
"""
Tests for gemm(cuBLASLt) operator
"""
@staticmethod
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="BF16 GEMM requires Ampere+ GPU")
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
def test_bf16(m, n, k):
"""
Test "TN" BF16 GEMM
"""
a = paddle.rand(shape=(m, k), dtype='bfloat16')
b = paddle.rand(shape=(n, k), dtype='bfloat16')
workspace = paddle.zeros(shape=[33_554_432], dtype='uint8')
ref_out = paddle.matmul(a, b.T)
# CublasLt inside tex.te_gemm assumes inputs are column major.
# Mathematically, A@B=C is equivalent to B^T@A^T=C^T, where X^T is the
# transpose of X.
# Here we perform "TN" GEMM in column major, i.e., b@a^T = C^T,
# which is equivalent to a@b^T = C in row major.
actual_out, _, _ = gemm(b, a, paddle.bfloat16, workspace, False, None, False, False, "TN",
None, None, False)
assert_allclose(actual_out, ref_out)
@staticmethod
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="BF16 GEMM requires Ampere+ GPU")
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
def test_bf16_inplace(m, n, k):
"""
Test "TN" BF16 GEMM, with accumulate=True
"""
min_val = -16
max_val = 16
a = paddle.rand(shape=(m, k), dtype='bfloat16')
b = paddle.rand(shape=(n, k), dtype='bfloat16')
c = paddle.cast(paddle.randint(min_val, max_val, shape=(m, n)), 'bfloat16')
workspace = paddle.zeros(shape=[33_554_432], dtype='uint8')
ref_out = c + paddle.matmul(a, b.T)
actual_out = paddle.clone(c)
_, _, _ = gemm(b, a, paddle.bfloat16, workspace, False, None, False, True, "TN", actual_out,
None, False)
assert_allclose(actual_out, ref_out, rtol=5e-2, atol=5e-2)
@staticmethod
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
def test_fp8_randint(m, n, k):
"""
Test "TN" FP8 GEMM
"""
min_val = -8
max_val = 8
fp8_dtype = tex.DType.kFloat8E4M3
out_dtype = paddle.float32
fp8_meta = create_fp8_meta(num_gemms=1)
a = paddle.cast(paddle.randint(min_val, max_val, shape=(m, k)), 'float32')
a_casted = cast_to_fp8(a, fp8_meta, FP8FwdTensors.GEMM1_INPUT, otype=fp8_dtype)
b = paddle.cast(paddle.randint(min_val, max_val, shape=(n, k)), 'float32')
b_casted = cast_to_fp8(b, fp8_meta, FP8FwdTensors.GEMM1_WEIGHT, otype=fp8_dtype)
workspace = paddle.zeros(shape=[33_554_432], dtype='uint8')
ref_out = paddle.matmul(a, b.T)
actual_out = fp8_gemm(b_casted, fp8_meta.scale_inv, FP8FwdTensors.GEMM1_WEIGHT, fp8_dtype,
a_casted, fp8_meta.scale_inv, FP8FwdTensors.GEMM1_INPUT, fp8_dtype,
out_dtype, workspace)
assert_allclose(actual_out, ref_out)
class TestLayerNorm:
"""
Test layernorm operators
"""
@staticmethod
def calc_fwd_ref(x, eps, gamma, beta):
"""
Calculate reference using paddle layer_norm op
"""
y = paddle.nn.functional.layer_norm(x=x,
normalized_shape=x.shape[1:],
weight=gamma,
bias=beta,
epsilon=eps)
mean = paddle.mean(x, axis=-1)
var = paddle.var(x, axis=-1)
inv_var = paddle.sqrt(1. / var)
return y, mean, inv_var
@staticmethod
def calc_bwd_ref(x, eps, gamma, beta, dy):
"""
Calculate reference using paddle layer_norm op
"""
x.stop_gradient = False
gamma.stop_gradient = False
beta.stop_gradient = False
y = paddle.nn.functional.layer_norm(x=x,
normalized_shape=x.shape[1:],
weight=gamma,
bias=beta,
epsilon=eps)
paddle.autograd.backward([y], [dy], True)
return x.grad, gamma.grad, beta.grad
def test_layernorm_fwd(self):
"""
Test BF16 LayerNorm Forward
"""
N, H = (16, 32)
eps = 1e-3
x = paddle.uniform(shape=(N, H), dtype='bfloat16')
gamma = paddle.uniform(shape=(H,), dtype='bfloat16')
beta = paddle.uniform(shape=(H,), dtype='bfloat16')
y, mu, rsigma = layernorm_fwd(x, gamma, beta, eps, tex.DType.kBFloat16)
y_ref, mu_ref, rsigma_ref = self.calc_fwd_ref(x, eps, gamma, beta)
assert_allclose(y, y_ref, rtol=1e-4, atol=1e-4)
assert_allclose(mu, mu_ref, rtol=1e-3, atol=1e-3)
assert_allclose(rsigma, rsigma_ref, rtol=5e-2, atol=5e-2)
@staticmethod
def test_layernorm_fwd_fp8():
"""
Test FP8 LayerNorm Forward
"""
fp8_dtype = tex.DType.kFloat8E4M3
N, H = (16, 32)
eps = 1e-3
x = paddle.uniform(shape=(N, H), dtype='float32')
gamma = paddle.uniform(shape=(H,), dtype='float32')
beta = paddle.uniform(shape=(H,), dtype='float32')
fp8_tensor = FP8FwdTensors.GEMM1_INPUT
fp8_meta = create_fp8_meta()
y_ref, mu_ref, rsigma_ref = layernorm_fwd(x, gamma, beta, eps, tex.DType.kFloat32)
y_fp8, mu, rsigma = layernorm_fwd_fp8(x, gamma, beta, eps, fp8_meta, fp8_tensor, fp8_dtype)
y = cast_from_fp8(y_fp8, fp8_meta, fp8_tensor, itype=fp8_dtype, otype=tex.DType.kFloat32)
assert_allclose(y, y_ref, rtol=0.1, atol=0.01)
assert_allclose(mu, mu_ref)
assert_allclose(rsigma, rsigma_ref)
def test_layernorm_bwd(self):
"""
Test BF16 LayerNorm Backward
"""
N, H = (16, 32)
eps = 1e-3
x = paddle.uniform(shape=(N, H), dtype='bfloat16')
dy = paddle.uniform(shape=(N, H), dtype='bfloat16')
gamma = paddle.uniform(shape=(H,), dtype='bfloat16')
beta = paddle.uniform(shape=(H,), dtype='bfloat16')
dx_ref, dgamma_ref, dbeta_ref = self.calc_bwd_ref(x, eps, gamma, beta, dy)
_, mu, rsigma = layernorm_fwd(x, gamma, beta, eps, tex.DType.kBFloat16)
dx, dgamma, dbeta = layernorm_bwd(dy, x, mu, rsigma, gamma)
assert_allclose(dx, dx_ref, rtol=1e-5, atol=1e-5)
assert_allclose(dgamma, dgamma_ref, rtol=1e-5, atol=1e-5)
assert_allclose(dbeta, dbeta_ref, rtol=1e-5, atol=1e-5)
class TestRMSNorm:
"""
Test rmsnorm operators
"""
@staticmethod
def calc_fwd_ref(x, eps, gamma):
"""
Calculate rmsnorm reference using paddle op
"""
norm = paddle.rsqrt(paddle.mean(x**2, axis=-1, keepdim=True) + eps)
y = x * norm * gamma
return y
def calc_bwd_ref(self, x, eps, gamma, dy):
"""
Calculate rmsnorm bwd reference using paddle op
"""
x.stop_gradient = False
gamma.stop_gradient = False
y = self.calc_fwd_ref(x, eps, gamma)
paddle.autograd.backward([y], [dy], True)
return x.grad, gamma.grad
def test_rmsnorm_fwd(self):
"""
Test BF16 RMSNorm Forward
"""
N, H = (16, 32)
eps = 1e-3
x = paddle.uniform(shape=(N, H), dtype='bfloat16')
gamma = paddle.uniform(shape=(H,), dtype='bfloat16')
y, _ = rmsnorm_fwd(x, gamma, eps, tex.DType.kBFloat16)
y_ref = self.calc_fwd_ref(x, eps, gamma)
assert_allclose(y, y_ref, rtol=1e-2, atol=1e-2)
@staticmethod
def test_rmsnorm_fwd_fp8():
"""
Test FP8 RMSNorm Forward
"""
fp8_dtype = tex.DType.kFloat8E4M3
N, H = (16, 32)
eps = 1e-3
x = paddle.uniform(shape=(N, H), dtype='float32')
gamma = paddle.uniform(shape=(H,), dtype='float32')
fp8_tensor = FP8FwdTensors.GEMM1_INPUT
fp8_meta = create_fp8_meta()
y_ref, rsigma_ref = rmsnorm_fwd(x, gamma, eps, tex.DType.kFloat32)
y_fp8, rsigma = rmsnorm_fwd_fp8(x, gamma, eps, fp8_meta, fp8_tensor, fp8_dtype)
y = cast_from_fp8(y_fp8, fp8_meta, fp8_tensor, itype=fp8_dtype, otype=tex.DType.kFloat32)
assert_allclose(y, y_ref, rtol=0.1, atol=0.01)
assert_allclose(rsigma, rsigma_ref)
def test_rmsnorm_bwd(self):
"""
Test BF16 RMSNorm Backward
"""
N, H = (16, 32)
eps = 1e-3
x = paddle.uniform(shape=(N, H), dtype='bfloat16')
dy = paddle.uniform(shape=(N, H), dtype='bfloat16')
gamma = paddle.uniform(shape=(H,), dtype='bfloat16')
dx_ref, dgamma_ref = self.calc_bwd_ref(x, eps, gamma, dy)
_, rsigma = rmsnorm_fwd(x, gamma, eps, tex.DType.kBFloat16)
dx, dgamma = rmsnorm_bwd(dy, x, rsigma, gamma)
assert_allclose(dx, dx_ref, rtol=1e-2, atol=1e-2)
assert_allclose(dgamma, dgamma_ref, rtol=1e-2, atol=5e-2)
class TestFusedAttn:
"""
Test fused attention operators
"""
def set_input(self, b, s_q, s_kv, h, d, dtype, attn_mode='self_attn', is_causal_masking=False):
"""
set test input
"""
def _random(shape):
if self.dtype == "bfloat16":
data = np.random.normal(loc=0.0, scale=0.02, size=shape).astype("float32")
return convert_float_to_uint16(data)
return np.random.normal(loc=0.0, scale=0.02, size=shape).astype(self.dtype)
self.batch_size = b
self.q_seqlen = s_q
self.kv_seqlen = s_kv
self.num_heads = h
self.head_size = d
self.dropout_prob = 0.0
self.scaling_factor = 1.0 / np.sqrt(d)
self.q_shape = (b, s_q, h, d)
self.kv_shape = (b, s_kv, h, d)
self.fuse_qkv_shape = (b, s_q, 3, h, d)
self.fuse_kv_shape = (b, s_kv, 2, h, d)
self.bias_shape = (1, h, s_q, s_kv)
self.attn_mode = attn_mode
self.dtype = dtype
self.is_causal_masking = is_causal_masking
self.q = _random(self.q_shape)
if self.attn_mode == "self_attn":
self.kv = self.q
else:
self.kv = _random(self.kv_shape)
self.q_actual_seqlen = np.random.randint(
low=20,
high=self.q_seqlen,
size=(self.batch_size,),
dtype=np.int32,
)
self.kv_actual_seqlen = self.q_actual_seqlen
self.q_cu_seqlen = np.cumsum(self.q_actual_seqlen)
self.q_cu_seqlen = np.insert(self.q_cu_seqlen, 0, 0)
self.kv_cu_seqlen = np.cumsum(self.kv_actual_seqlen)
self.kv_cu_seqlen = np.insert(self.kv_cu_seqlen, 0, 0)
self.attn_mask = np.zeros(
shape=(self.batch_size, 1, self.q_seqlen, self.kv_seqlen),
dtype=np.int32,
)
for i in range(0, self.batch_size):
self.attn_mask[i, 0, 0:self.q_actual_seqlen[i], 0:self.kv_actual_seqlen[i],] = 1
if self.is_causal_masking:
assert attn_mode == "self_attn", "only support causal masking for self attention"
col_beg, col_end = 1, self.q_actual_seqlen[i]
for row in range(0, self.q_actual_seqlen[i]):
self.attn_mask[i, 0, row, col_beg:col_end] = 0
col_beg += 1
dout = _random((self.batch_size, self.q_seqlen, self.num_heads, self.head_size))
self.dout = paddle.to_tensor(dout, dtype=self.dtype)
def _get_reference_out(self):
paddle.disable_static(place=paddle.CUDAPlace(0))
q_tensor = paddle.to_tensor(self.q, stop_gradient=False)
k_tensor = paddle.to_tensor(self.kv, stop_gradient=False)
v_tensor = paddle.to_tensor(self.kv, stop_gradient=False)
q_out = paddle.transpose(x=q_tensor, perm=[0, 2, 1, 3]) # [b, s, h, d] -> [b, h, s, d]
k_out = paddle.transpose(x=k_tensor, perm=[0, 2, 1, 3]) # [b, s, h, d] -> [b, h, s, d]
v_out = paddle.transpose(x=v_tensor, perm=[0, 2, 1, 3]) # [b, s, h, d] -> [b, h, s, d]
qk_out = paddle.matmul(
x=q_out * self.scaling_factor,
y=k_out,
transpose_x=False,
transpose_y=True,
)
attn_mask = paddle.to_tensor(self.attn_mask, stop_gradient=True)
attn_mask = (paddle.cast(attn_mask, self.dtype) - 1.0) * 1e4
attn_mask_out = qk_out + attn_mask
softmax_out = F.softmax(attn_mask_out)
if self.dropout_prob:
dropout_out = F.dropout(
softmax_out,
self.dropout_prob,
training=self.training,
mode="upscale_in_train",
)
qkv_out = paddle.matmul(dropout_out, v_out)
else:
qkv_out = paddle.matmul(softmax_out, v_out)
out = paddle.transpose(qkv_out, perm=[0, 2, 1, 3]) # [b, h, s, d] -> [b, s, h, d]
paddle.autograd.backward(
[out],
[self.dout],
retain_graph=True,
)
return out, q_tensor.grad, k_tensor.grad, v_tensor.grad
def _get_fused_attention_out(self):
paddle.disable_static(place=paddle.CUDAPlace(0))
if self.attn_mode == "self_attn":
qkv = np.stack([self.q, self.kv, self.kv], axis=2) # [b, s, 3, h, d]
qkv_tensor = paddle.to_tensor(qkv, stop_gradient=False)
else:
q_tensor = paddle.to_tensor(self.q, stop_gradient=False)
kv = np.stack([self.kv, self.kv], axis=2) # [b, s, 2, h, d]
kv_tensor = paddle.to_tensor(kv, stop_gradient=False)
q_cu_seqlen_tensor = paddle.to_tensor(self.q_cu_seqlen, dtype="int32", stop_gradient=True)
kv_cu_seqlen_tensor = paddle.to_tensor(self.kv_cu_seqlen, dtype="int32", stop_gradient=True)
rng_state = paddle.zeros((2,), dtype=np.int64)
qkv_dtype = tex.DType.kBFloat16 if self.dtype == "bfloat16" else tex.DType.kFloat16
out, softmax_aux_tensor, q_grad, k_grad, v_grad = None, None, None, None, None
if self.attn_mode == 'self_attn':
out, softmax_aux_tensor = fused_attn_fwd_qkvpacked(
qkv_tensor,
q_cu_seqlen_tensor,
rng_state,
is_training=True,
max_seqlen=self.q_seqlen,
qkv_dtype=qkv_dtype,
Bias=None,
attn_scale=self.scaling_factor,
dropout=self.dropout_prob,
set_zero=False,
attn_mask_type="causal" if self.is_causal_masking else "padding")
dqkv, _ = fused_attn_bwd_qkvpacked(
qkv_tensor,
q_cu_seqlen_tensor,
rng_state,
out,
self.dout,
softmax_aux_tensor,
max_seqlen=self.q_seqlen,
qkv_dtype=qkv_dtype,
attn_scale=self.scaling_factor,
dropout=self.dropout_prob,
set_zero=False,
attn_mask_type="causal" if self.is_causal_masking else "padding")
q_grad = dqkv[:, :, 0, :, :]
k_grad = dqkv[:, :, 1, :, :]
v_grad = dqkv[:, :, 2, :, :]
else: # attn_mode == 'cross_attn'
out, softmax_aux_tensor = fused_attn_fwd_kvpacked(q_tensor,
kv_tensor,
q_cu_seqlen_tensor,
kv_cu_seqlen_tensor,
rng_state,
is_training=True,
max_seqlen_q=self.q_seqlen,
max_seqlen_kv=self.kv_seqlen,
qkv_dtype=qkv_dtype,
Bias=None,
attn_scale=self.scaling_factor,
dropout=self.dropout_prob,
set_zero=False)
dq, dkv, _ = fused_attn_bwd_kvpacked(q_tensor,
kv_tensor,
q_cu_seqlen_tensor,
kv_cu_seqlen_tensor,
rng_state,
out,
self.dout,
softmax_aux_tensor,
max_seqlen_q=self.q_seqlen,
max_seqlen_kv=self.kv_seqlen,
qkv_dtype=qkv_dtype,
attn_scale=self.scaling_factor,
dropout=self.dropout_prob,
set_zero=False)
q_grad = dq
k_grad = dkv[:, :, 0, :, :]
v_grad = dkv[:, :, 1, :, :]
return out, q_grad, k_grad, v_grad
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() not in ((8, 0), (9, 0)),
reason="cuDNN fMHA requires Ampere and Hopper GPU")
@pytest.mark.parametrize('b, s, h, d', SELF_ATTN_CASES)
@pytest.mark.parametrize('dtype', ['float16', 'bfloat16'])
@pytest.mark.parametrize('is_causal_masking', [True, False])
def test_self_attn_forward_backward(self, b, s, h, d, dtype, is_causal_masking):
"""
test self attention forward + backward
"""
self.set_input(b, s, s, h, d, dtype, "self_attn", is_causal_masking)
reference_out, q_grad_ref, k_grad_ref, v_grad_ref = self._get_reference_out()
fused_attention_out, q_grad, k_grad, v_grad = self._get_fused_attention_out()
assert_allclose(reference_out, fused_attention_out, rtol=1e-3, atol=1e-2)
assert_allclose(q_grad_ref, q_grad, rtol=1e-3, atol=1e-2)
assert_allclose(k_grad_ref, k_grad, rtol=1e-3, atol=1e-2)
assert_allclose(v_grad_ref, v_grad, rtol=1e-3, atol=1e-2)
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() not in ((8, 0), (9, 0)),
reason="cuDNN fMHA requires Ampere and Hopper GPU")
@pytest.mark.parametrize('b, s_q, s_kv, h, d', CROSS_ATTN_CASES)
@pytest.mark.parametrize('dtype', ['float16', 'bfloat16'])
def test_cross_attn_forward_backward(self, b, s_q, s_kv, h, d, dtype):
"""
test cross attention forward + backward
"""
self.set_input(b, s_q, s_kv, h, d, dtype, "cross_attn")
reference_out, q_grad_ref, k_grad_ref, v_grad_ref = self._get_reference_out()
fused_attention_out, q_grad, k_grad, v_grad = self._get_fused_attention_out()
assert_allclose(reference_out, fused_attention_out, rtol=1e-3, atol=1e-2)
assert_allclose(q_grad_ref, q_grad, rtol=1e-3, atol=1e-2)
assert_allclose(k_grad_ref, k_grad, rtol=1e-3, atol=1e-2)
assert_allclose(v_grad_ref, v_grad, rtol=1e-3, atol=1e-2)
@pytest.mark.skipif(paddle.device.cuda.get_device_capability() < (8, 0),
reason="cuDNN fMHA requires Ampere+ GPU")
@pytest.mark.parametrize('b, s, h, d', FLASH_ATTN_CASES)
@pytest.mark.parametrize('dtype', ['float16', 'bfloat16'])
@pytest.mark.parametrize('is_causal_masking', [True])
def test_flash_attn_forward_backward(self, b, s, h, d, dtype, is_causal_masking):
"""
test flash attention forward + backward
"""
self.set_input(b, s, s, h, d, dtype, "self_attn", is_causal_masking)
reference_out, q_grad_ref, k_grad_ref, v_grad_ref = self._get_reference_out()
fused_attention_out, q_grad, k_grad, v_grad = self._get_fused_attention_out()
assert_allclose(reference_out, fused_attention_out, rtol=1e-3, atol=1e-2)
assert_allclose(q_grad_ref, q_grad, rtol=1e-3, atol=1e-2)
assert_allclose(k_grad_ref, k_grad, rtol=1e-3, atol=1e-2)
assert_allclose(v_grad_ref, v_grad, rtol=1e-3, atol=1e-2)
class TestSoftmax:
"""
Test softmax operators
"""
@staticmethod
@pytest.mark.parametrize('dtype', ['float16', 'bfloat16'])
def test_scaled_softmax_fwd_bwd(dtype):
"""test scaled softmax"""
B, H, S = (16, 4, 32)
scale = 0.8
x = paddle.uniform(shape=(B, H, S, S), dtype=dtype)
x.stop_gradient = False
dy = paddle.uniform(shape=(B, H, S, S), dtype=dtype)
y_ref = F.softmax(scale * x)
y = scaled_softmax_forward(x, scale)
paddle.autograd.backward([y_ref], [dy], True)
dx_ref = x.grad
dx = scaled_softmax_backward(dy, y, scale)
assert_allclose(y_ref, y, rtol=1e-4, atol=1e-3)
assert_allclose(dx_ref, dx, rtol=1e-4, atol=1e-3)
@staticmethod
@pytest.mark.parametrize('dtype', ['float16', 'bfloat16'])
def test_scaled_masked_softmax_fwd_bwd(dtype):
"""test scaled masked softmax"""
B, H, S = (16, 4, 32)
scale = 0.8
x = paddle.uniform(shape=(B, H, S, S), dtype=dtype)
x.stop_gradient = False
dy = paddle.uniform(shape=(B, H, S, S), dtype=dtype)
mask = paddle.reshape(x[0, 0] > 0.3, shape=(1, 1, S, S))
mask_flipped = x[0, 0] <= 0.3
mask_ref = (mask_flipped.astype(dtype) - 1.0) * 1e4
y_ref = F.softmax(scale * x + mask_ref)
y = scaled_masked_softmax_forward(x, mask, scale)
paddle.autograd.backward([y_ref], [dy], True)
dx_ref = x.grad
dx = scaled_masked_softmax_backward(dy, y, scale)
assert_allclose(y_ref, y, rtol=1e-4, atol=1e-3)
assert_allclose(dx_ref, dx, rtol=1e-4, atol=1e-3)
@staticmethod
@pytest.mark.parametrize('dtype', ['float16', 'bfloat16'])
def test_scaled_upper_triang_masked_softmax_fwd_bwd(dtype):
"""test scaled upper triang masked softmax"""
B, S = (16, 32)
scale = 0.8
x = paddle.uniform(shape=(B, S, S), dtype=dtype)
x.stop_gradient = False
dy = paddle.uniform(shape=(B, S, S), dtype=dtype)
mask = paddle.ones((S, S), dtype='int32')
col_beg, col_end = 1, S
for row in range(0, S):
mask[row, col_beg:col_end] = 0
col_beg += 1
mask_ref = (mask.astype(dtype) - 1.0) * 1e4
y_ref = F.softmax(scale * x + mask_ref)
y = scaled_upper_triang_masked_softmax_forward(x, scale)
paddle.autograd.backward([y_ref], [dy], True)
dx_ref = x.grad
dx = scaled_upper_triang_masked_softmax_backward(dy, y, scale)
assert_allclose(y_ref, y, rtol=1e-4, atol=5e-3)
assert_allclose(dx_ref, dx, rtol=1e-4, atol=5e-3)
def test_update_scale():
"""Test update_scale"""
num_gemm = 6
recipe = DelayedScaling()
fp8_max = recipe.fp8_format.value.max_fwd
amax_tensor = paddle.rand(shape=[num_gemm], dtype='float32') * fp8_max
scale_tensor = paddle.ones(shape=[num_gemm], dtype='float32')
def calc_ref(amax, scale, fp8_max, margin=0):
"""Calculate reference scale"""
exp = paddle.floor(paddle.log2(fp8_max / amax)) - margin
sf = paddle.round(2**paddle.abs(exp))
sf = paddle.where(amax > 0.0, sf, scale)
sf = paddle.where(paddle.isfinite(amax), sf, scale)
sf = paddle.where(exp < 0, 1 / sf, sf)
return sf
scale_ref = calc_ref(amax_tensor, scale_tensor, fp8_max, 0.)
scale_actual = tex.update_scale(amax_tensor, scale_tensor, fp8_max, 0.)
assert_allclose(scale_ref, scale_actual, rtol=1e-5, atol=1e-5)
| TransformerEngine-main | tests/paddle/test_operators.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.