version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.10 | import os
import torch
import torch.nn as nn
import torch.quantization
from torch.utils.mobile_optimizer import optimize_for_mobile
from openunmix import utils
from openunmix import model
from openunmix.model import OpenUnmix
target_urls_umxhq = {
"bass": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/bass-8d85a5bd.pth",
"drums": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/drums-9619578f.pth",
"other": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/other-b52fbbf7.pth",
"vocals": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/vocals-b62c91ce.pth",
}
target_urls_umxl = {
"bass": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/bass-2ca1ce51.pth",
"drums": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/drums-69e0ebd4.pth",
"other": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/other-c8c5b3e6.pth",
"vocals": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/vocals-bccbd9aa.pth",
}
def get_umx_models(
target_urls, hidden_size=512, targets=None, device="cpu", pretrained=True
):
"""Download openunmix pretrained models
Args:
target_urls: dict with the link to download the model for bass, drums, other and vocals
hidden_size: size for bottleneck layer
targets: list of stems
device: the device on which the model will be used
pretrained: boolean for pretrained weights
Returns:
target_models: list with all the models
"""
if targets is None:
targets = ["vocals", "drums", "bass", "other"]
# determine the maximum bin count for a 16khz bandwidth model
max_bin = int(utils.bandwidth_to_max_bin(rate=44100.0, n_fft=4096, bandwidth=16000))
target_models = {}
for target in targets:
# load open unmix model
target_unmix = OpenUnmix(
nb_bins=4096 // 2 + 1,
nb_channels=2,
hidden_size=hidden_size,
max_bin=max_bin,
)
# enable centering of stft to minimize reconstruction error
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
target_urls[target], map_location=device
)
target_unmix.load_state_dict(state_dict, strict=False)
target_unmix.eval()
target_unmix.to(device)
target_models[target] = target_unmix
return target_models
def create_separator(target_models, device="cpu"):
"""Create separator class which contains all models
Args:
target_models: list of all models
device: the device on which the model will be used
Returns:
separator: separator class which contains all models
"""
separator = (
model.Separator(
target_models=target_models,
niter=1,
residual=False,
n_fft=4096,
n_hop=1024,
nb_channels=2,
sample_rate=44100.0,
filterbank="asteroid",
)
.eval()
.to(device)
)
return separator
def quantize_model(model):
"""Quantize model dynamically
Args:
model: model corresponding to the separator
"""
model = torch.quantization.quantize_dynamic(
model, {nn.LSTM, nn.Linear}, dtype=torch.qint8
)
return model
def create_script(model_name, separator):
"""Create the torchscript model from a separator
Args:
model_name: name of the torchscript file to create
separator: separator class which contains all models
"""
jit_script = torch.jit.script(separator)
torchscript_model_opti = optimize_for_mobile(jit_script)
torchscript_model_opti._save_for_lite_interpreter(f"dist/{model_name}.ptl")
def main():
device = "cpu"
separator_umxhq = create_separator(get_umx_models(target_urls_umxhq), device=device)
separator_umxl = create_separator(
get_umx_models(target_urls_umxl, hidden_size=1024), device=device
)
if not os.path.exists("dist"):
os.mkdir("dist")
separator_umxhq = quantize_model(separator_umxhq)
separator_umxl = quantize_model(separator_umxl)
create_script("umxhq", separator_umxhq)
create_script("umxl", separator_umxl)
if __name__ == "__main__":
main()
| [
"torch.jit.script",
"torch.quantization.quantize_dynamic",
"torch.utils.mobile_optimizer.optimize_for_mobile",
"torch.hub.load_state_dict_from_url"
] | 1.10.1 | demixr/openunmix-torchscript | e0ccb812b6a6e16151e54cb2101372f61eb12c60 |
1.7 | """Pytorch Dataset object that loads 27x27 patches that contain single cells."""
import os
import random
import numpy as np
import scipy.io
import torch
import torch.utils.data as data_utils
import torchvision.transforms as transforms
from skimage import io, color
from sklearn.model_selection import KFold
import utils_augemntation
class ColonCancerBagsCross(data_utils.Dataset):
def __init__(self, path, train=True, test=False, shuffle_bag=False,
data_augmentation=False, loc_info=False, push=False,
nucleus_type=None, folds=10, fold_id=1, random_state=3, all_labels=False):
self.path = path
self.train = train
self.test = test
self.shuffle_bag = shuffle_bag
self.data_augmentation = data_augmentation
self.location_info = loc_info
self.push = push
self.nucleus_type = nucleus_type
self.folds = folds
self.fold_id = fold_id
self.random_state = random_state
self.all_labels = all_labels
self.r = np.random.RandomState(random_state)
tr = [utils_augemntation.RandomHEStain(),
utils_augemntation.HistoNormalize(),
utils_augemntation.RandomRotate(),
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(27, padding=(2, 2), padding_mode='reflect'),
#transforms.RandomRotation(15),
transforms.ToTensor(),
#transforms.RandomApply([utils_augemntation.GaussianNoise()], p=0.5)
]
tst = [utils_augemntation.HistoNormalize(),
transforms.ToTensor()
]
psh = [transforms.ToTensor()]
self.data_augmentation_img_transform = transforms.Compose(tr)
self.normalize_to_tensor_transform = transforms.Compose(tst)
self.to_tensor_transform = transforms.Compose(psh)
self.dir_list = self.get_dir_list(self.path)
folds = list(KFold(n_splits=self.folds, shuffle=True, random_state=self.random_state).split(self.dir_list))
if self.test:
indices = set(folds[self.fold_id][1])
else:
if self.train:
val_indices = self.r.choice(folds[self.fold_id][0], len(folds[self.fold_id][1]))
indices = set(folds[self.fold_id][0]) - set(val_indices)
else: # valid
indices = self.r.choice(folds[self.fold_id][0], len(folds[self.fold_id][1]))
if nucleus_type:
self.bag_list, self.labels_list = self.create_bags_one_type(np.asarray(self.dir_list)[list(indices)])
else:
self.bag_list, self.labels_list = self.create_bags(np.asarray(self.dir_list)[list(indices)])
@staticmethod
def get_dir_list(path):
dirs = [x[0] for x in os.walk(path)]
dirs.pop(0)
dirs.sort()
return dirs
def create_bags_one_type(self, dir_list):
"""Create bags containing only one type of nucleus."""
bag_list = []
labels_list = []
for dir in dir_list:
# Get image name
img_name = dir.split('/')[-1]
# bmp to pillow
img_dir = dir + '/' + img_name + '.bmp'
img = io.imread(img_dir)
if img.shape[2] == 4:
img = color.rgba2rgb(img)
if self.location_info:
xs = np.arange(0, 500)
xs = np.asarray([xs for i in range(500)])
ys = xs.transpose()
img = np.dstack((img, xs, ys))
# crop nucleus_type cells
dir_nucleus_type = dir + '/' + img_name + '_' + self.nucleus_type + '.mat'
with open(dir_nucleus_type, 'rb') as f:
mat_nucleus_type = scipy.io.loadmat(f)
cropped_cells = []
for (x, y) in mat_nucleus_type['detection']:
x = np.round(x)
y = np.round(y)
if self.data_augmentation:
x = x + np.round(np.random.normal(0, 3, 1))
y = y + np.round(np.random.normal(0, 3, 1))
if x < 13:
x_start = 0
x_end = 27
elif x > 500 - 14:
x_start = 500 - 27
x_end = 500
else:
x_start = x - 13
x_end = x + 14
if y < 13:
y_start = 0
y_end = 27
elif y > 500 - 14:
y_start = 500 - 27
y_end = 500
else:
y_start = y - 13
y_end = y + 14
cropped_cells.append(img[int(y_start):int(y_end), int(x_start):int(x_end)])
# if image doesn't contain any specific type nucleus, move to the next image
if cropped_cells == []:
continue
# generate bag
bag = cropped_cells
# store single cell labels
if self.nucleus_type == 'epithelial':
labels = np.ones(len(cropped_cells))
else:
labels = np.zeros(len(cropped_cells))
# shuffle
if self.shuffle_bag:
zip_bag_labels = list(zip(bag, labels))
random.shuffle(zip_bag_labels)
bag, labels = zip(*zip_bag_labels)
# append every bag two times if training
if self.train:
for _ in [0, 1]:
bag_list.append(bag)
labels_list.append(labels)
else:
bag_list.append(bag)
labels_list.append(labels)
# bag_list.append(bag)
# labels_list.append(labels)
return bag_list, labels_list
def create_bags(self, dir_list):
bag_list = []
labels_list = []
for dir in dir_list:
# Get image name
img_name = os.path.basename(dir)
# bmp to pillow
img_dir = os.path.join(dir, img_name + '.bmp')
img = io.imread(img_dir)
if img.shape[2] == 4:
img = color.rgba2rgb(img)
if self.location_info:
xs = np.arange(0, 500)
xs = np.asarray([xs for i in range(500)])
ys = xs.transpose()
img = np.dstack((img, xs, ys))
# crop malignant cells
dir_epithelial = os.path.join(dir, img_name + '_epithelial.mat')
with open(dir_epithelial, 'rb') as f:
mat_epithelial = scipy.io.loadmat(f)
cropped_cells_epithelial = []
for (x, y) in mat_epithelial['detection']:
x = np.round(x)
y = np.round(y)
if self.data_augmentation:
x = x + np.round(np.random.normal(0, 3, 1))
y = y + np.round(np.random.normal(0, 3, 1))
if x < 13:
x_start = 0
x_end = 27
elif x > 500 - 14:
x_start = 500 - 27
x_end = 500
else:
x_start = x - 13
x_end = x + 14
if y < 13:
y_start = 0
y_end = 27
elif y > 500 - 14:
y_start = 500 - 27
y_end = 500
else:
y_start = y - 13
y_end = y + 14
cropped_cells_epithelial.append(img[int(y_start):int(y_end), int(x_start):int(x_end)])
# crop all other cells
dir_inflammatory = os.path.join(dir, img_name + '_inflammatory.mat')
dir_fibroblast = os.path.join(dir, img_name + '_fibroblast.mat')
dir_others = os.path.join(dir, img_name + '_others.mat')
with open(dir_inflammatory, 'rb') as f:
mat_inflammatory = scipy.io.loadmat(f)
with open(dir_fibroblast, 'rb') as f:
mat_fibroblast = scipy.io.loadmat(f)
with open(dir_others, 'rb') as f:
mat_others = scipy.io.loadmat(f)
all_coordinates = np.concatenate(
(mat_inflammatory['detection'], mat_fibroblast['detection'], mat_others['detection']), axis=0)
cropped_cells_others = []
for (x, y) in all_coordinates:
x = np.round(x)
y = np.round(y)
if self.data_augmentation:
x = x + np.round(np.random.normal(0, 3, 1))
y = y + np.round(np.random.normal(0, 3, 1))
if x < 13:
x_start = 0
x_end = 27
elif x > 500 - 14:
x_start = 500 - 27
x_end = 500
else:
x_start = x - 13
x_end = x + 14
if y < 13:
y_start = 0
y_end = 27
elif y > 500 - 14:
y_start = 500 - 27
y_end = 500
else:
y_start = y - 13
y_end = y + 14
cropped_cells_others.append(img[int(y_start):int(y_end), int(x_start):int(x_end)])
# generate bag
bag = cropped_cells_epithelial + cropped_cells_others
# store single cell labels
labels = np.concatenate((np.ones(len(cropped_cells_epithelial)), np.zeros(len(cropped_cells_others))),
axis=0)
# shuffle
if self.shuffle_bag:
zip_bag_labels = list(zip(bag, labels))
random.shuffle(zip_bag_labels)
bag, labels = zip(*zip_bag_labels)
# append every bag two times if training
if self.train:
for _ in [0, 1]:
bag_list.append(bag)
labels_list.append(labels)
else:
bag_list.append(bag)
labels_list.append(labels)
return bag_list, labels_list
def transform_and_data_augmentation(self, bag, raw=False):
if raw:
img_transform = self.to_tensor_transform
elif not raw and self.data_augmentation:
img_transform = self.data_augmentation_img_transform
else:
img_transform = self.normalize_to_tensor_transform
bag_tensors = []
for img in bag:
if self.location_info:
bag_tensors.append(torch.cat(
(img_transform(img[:, :, :3].astype('uint8')),
torch.from_numpy(img[:, :, 3:].astype(float).transpose((2, 0, 1))).float(),
)))
else:
bag_tensors.append(img_transform(img))
return torch.stack(bag_tensors)
def __len__(self):
return len(self.labels_list)
def __getitem__(self, index):
bag = self.bag_list[index]
if self.all_labels:
label = torch.LongTensor(self.labels_list[index])
else:
label = torch.LongTensor(self.labels_list[index]).max().unsqueeze(0)
if self.push:
return self.transform_and_data_augmentation(bag, raw=True), self.transform_and_data_augmentation(
bag), label
else:
return self.transform_and_data_augmentation(bag), label
| [
"torch.stack",
"torch.LongTensor"
] | 1.7.1 | apardyl/ProtoPNet | b2bbd7284bfc84a37385c0e975408c68cdf64205 |
1.0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 12:32:53 2017
@author: wroscoe
"""
import os
import sys
import time
import json
import datetime
import random
import tarfile
import numpy as np
import pandas as pd
from PIL import Image
from donkeycar import util
from ..log import get_logger
logger = get_logger(__name__)
class Tub(object):
"""
A datastore to store sensor data in a key, value format.
Accepts str, int, float, image_array, image, and array data types.
For example:
#Create a tub to store speed values.
>>> path = '~/mydonkey/test_tub'
>>> inputs = ['user/speed', 'cam/image']
>>> types = ['float', 'image']
>>> t=Tub(path=path, inputs=inputs, types=types)
"""
def __init__(self, path, inputs=None, types=None):
self.path = os.path.expanduser(path)
logger.info('path_in_tub: {}'.format(self.path))
self.meta_path = os.path.join(self.path, 'meta.json')
self.df = None
exists = os.path.exists(self.path)
if exists:
# load log and meta
logger.info('Tub exists: {}'.format(self.path))
with open(self.meta_path, 'r') as f:
self.meta = json.load(f)
self.current_ix = self.get_last_ix() + 1
elif not exists and inputs:
logger.info('Tub does NOT exist. Creating new tub...')
# create log and save meta
os.makedirs(self.path)
self.meta = {'inputs': inputs, 'types': types}
with open(self.meta_path, 'w') as f:
json.dump(self.meta, f)
self.current_ix = 0
logger.info('New tub created at: {}'.format(self.path))
else:
msg = "The tub path you provided doesn't exist and you didnt pass any meta info (inputs & types)" + \
"to create a new tub. Please check your tub path or provide meta info to create a new tub."
raise AttributeError(msg)
self.start_time = time.time()
def get_last_ix(self):
index = self.get_index()
if len(index) >= 1:
return max(index)
return -1
def update_df(self):
df = pd.DataFrame([self.get_json_record(i) for i in self.get_index(shuffled=False)])
self.df = df
def get_df(self):
if self.df is None:
self.update_df()
return self.df
def get_index(self, shuffled=True):
files = next(os.walk(self.path))[2]
record_files = [f for f in files if f[:6] == 'record']
def get_file_ix(file_name):
try:
name = file_name.split('.')[0]
num = int(name.split('_')[1])
except:
num = 0
return num
nums = [get_file_ix(f) for f in record_files]
if shuffled:
random.shuffle(nums)
else:
nums = sorted(nums)
return nums
@property
def inputs(self):
return list(self.meta['inputs'])
@property
def types(self):
return list(self.meta['types'])
def get_input_type(self, key):
input_types = dict(zip(self.inputs, self.types))
return input_types.get(key)
def write_json_record(self, json_data):
path = self.get_json_record_path(self.current_ix)
try:
with open(path, 'w') as fp:
json.dump(json_data, fp)
except TypeError:
logger.warn('troubles with record: {}'.format(json_data))
except FileNotFoundError:
raise
except:
logger.error('Unexpected error: {}'.format(sys.exc_info()[0]))
raise
def get_num_records(self):
import glob
files = glob.glob(os.path.join(self.path, 'record_*.json'))
return len(files)
def make_record_paths_absolute(self, record_dict):
d = {}
for k, v in record_dict.items():
if type(v) == str: # filename
if '.' in v:
v = os.path.join(self.path, v)
d[k] = v
return d
def check(self, fix=False):
"""
Iterate over all records and make sure we can load them.
Optionally remove records that cause a problem.
"""
logger.info('Checking tub: {}'.format(self.path))
logger.info('Found: {} records'.format(self.get_num_records()))
problems = False
for ix in self.get_index(shuffled=False):
try:
self.get_record(ix)
except:
problems = True
if fix is False:
logger.warning('problems with record {} : {}'.format(ix, self.path))
else:
logger.warning('problems with record {}, removing: {}'.format(ix, self.path))
self.remove_record(ix)
if not problems:
logger.info('No problems found.')
def remove_record(self, ix):
"""
remove data associate with a record
"""
record = self.get_json_record_path(ix)
os.unlink(record)
def put_record(self, data):
"""
Save values like images that can't be saved in the csv log and
return a record with references to the saved values that can
be saved in a csv.
"""
json_data = {}
for key, val in data.items():
typ = self.get_input_type(key)
if typ in ['str', 'float', 'int', 'boolean']:
json_data[key] = val
elif typ is 'image':
name = self.make_file_name(key, ext='.jpg')
val.save(os.path.join(self.path, name))
json_data[key] = name
elif typ == 'image_array':
img = Image.fromarray(np.uint8(val))
name = self.make_file_name(key, ext='.jpg')
img.save(os.path.join(self.path, name))
json_data[key] = name
else:
msg = 'Tub does not know what to do with this type {}'.format(typ)
raise TypeError(msg)
self.write_json_record(json_data)
self.current_ix += 1
return self.current_ix
def get_json_record_path(self, ix):
# fill zeros
# return os.path.join(self.path, 'record_'+str(ix).zfill(6)+'.json')
# don't fill zeros
return os.path.join(self.path, 'record_' + str(ix) + '.json')
def get_json_record(self, ix):
path = self.get_json_record_path(ix)
try:
with open(path, 'r') as fp:
json_data = json.load(fp)
except UnicodeDecodeError:
raise Exception('bad record: %d. You may want to run `python manage.py check --fix`' % ix)
except FileNotFoundError:
raise
except:
logger.error('Unexpected error: {}'.format(sys.exc_info()[0]))
raise
record_dict = self.make_record_paths_absolute(json_data)
return record_dict
def get_record(self, ix):
json_data = self.get_json_record(ix)
data = self.read_record(json_data)
return data
def read_record(self, record_dict):
data = {}
for key, val in record_dict.items():
typ = self.get_input_type(key)
# load objects that were saved as separate files
if typ == 'image_array':
img = Image.open((val))
val = np.array(img)
data[key] = val
return data
def make_file_name(self, key, ext='.png'):
# name = '_'.join([str(self.current_ix).zfill(6), key, ext])
name = '_'.join([str(self.current_ix), key, ext]) # don't fill zeros
name = name = name.replace('/', '-')
return name
def delete(self):
""" Delete the folder and files for this tub. """
import shutil
shutil.rmtree(self.path)
def shutdown(self):
""" Required by the Part interface """
pass
def get_record_gen(self, record_transform=None, shuffle=True, df=None):
"""
Returns records.
Parameters
----------
record_transform : function
The mapping function should handle records in dict format
shuffle : bool
Shuffle records
df : numpy Dataframe
If df is specified, the generator will use the records specified in that DataFrame. If None,
the internal DataFrame will be used by calling get_df()
Returns
-------
A dict with keys mapping to the specified keys, and values lists of size batch_size.
See Also
--------
get_df
"""
if df is None:
df = self.get_df()
while True:
for _ in self.df.iterrows():
if shuffle:
record_dict = df.sample(n=1).to_dict(orient='record')[0]
record_dict = self.read_record(record_dict)
if record_transform:
record_dict = record_transform(record_dict)
yield record_dict
def get_batch_gen(self, keys=None, batch_size=128, record_transform=None, shuffle=True, df=None):
"""
Returns batches of records.
Additionally, each record in a batch is split up into a dict with inputs:list of values. By specifying keys as a subset of the inputs, you can filter out unnecessary data.
Parameters
----------
keys : list of strings
List of keys to filter out. If None, all inputs are included.
batch_size : int
The number of records in one batch.
Returns
-------
A dict with keys mapping to the specified keys, and values lists of size batch_size.
See Also
--------
get_record_gen
"""
record_gen = self.get_record_gen(record_transform=record_transform, shuffle=shuffle, df=df)
if df is None:
df = self.get_df()
if keys is None:
keys = list(self.df.columns)
while True:
record_list = [ next(record_gen) for _ in range(batch_size) ]
batch_arrays = {}
for i, k in enumerate(keys):
arr = np.array([r[k] for r in record_list])
batch_arrays[k] = arr
yield batch_arrays
def get_train_gen(self, X_keys, Y_keys,
batch_size=128,
record_transform=None,
df=None):
"""
Returns a training/validation set.
The records are always shuffled.
Parameters
----------
X_keys : list of strings
List of the feature(s) to use. Must be included in Tub.inputs.
Y_keys : list of strings
List of the label(s) to use. Must be included in Tub.inputs.
Returns
-------
A tuple (X, Y), where X is a two dimensional array ( len(X_keys) x batch_size ) and Y is a two dimensional array ( len(Y_keys) x batch_size ).
See Also
--------
get_batch_gen
"""
batch_gen = self.get_batch_gen(X_keys + Y_keys,
batch_size=batch_size,
record_transform=record_transform,
df=df)
while True:
batch = next(batch_gen)
X = [batch[k] for k in X_keys]
Y = [batch[k] for k in Y_keys]
yield X, Y
def get_train_val_gen(self, X_keys, Y_keys, batch_size=128, train_frac=.8,
train_record_transform=None, val_record_transform=None):
"""
Create generators for training and validation set.
Parameters
----------
train_frac : float
Training/validation set split.
train_record_transform : function
Transform function for the training set. Used internally by Tub.get_record_gen().
val_record_transform : function
Transform function for the validation set. Used internally by Tub.get_record_gen().
Returns
-------
A tuple (train_gen, val_gen), where where train_gen is the training set generator, and
val_gen the validation set generator.
See Also
--------
get_train_gen
get_record_gen
"""
if self.df is None:
self.update_df()
train_df = self.df.sample(frac=train_frac, random_state=200)
val_df = self.df.drop(train_df.index)
train_gen = self.get_train_gen(X_keys=X_keys, Y_keys=Y_keys, batch_size=batch_size,
record_transform=train_record_transform, df=train_df)
val_gen = self.get_train_gen(X_keys=X_keys, Y_keys=Y_keys, batch_size=batch_size,
record_transform=val_record_transform, df=val_df)
return train_gen, val_gen
def tar_records(self, file_path, start_ix=None, end_ix=None):
"""
Create a tarfile of the records and metadata from a tub.
Compress using gzip.
Parameters
----------
file_path : string
The destination path of the created tar archive
start_ix : int
Start index. Defaults to 0.
end_ix : int
End index. Defaults to last index.
Returns
-------
Path to the tar archive
"""
if not start_ix:
start_ix = 0
if not end_ix:
end_ix = self.get_last_ix() + 1
with tarfile.open(name=file_path, mode='w:gz') as f:
for ix in range(start_ix, end_ix):
record_path = self.get_json_record_path(ix)
f.add(record_path)
f.add(self.meta_path)
return file_path
class TubWriter(Tub):
def __init__(self, *args, **kwargs):
super(TubWriter, self).__init__(*args, **kwargs)
def run(self, *args):
"""
Accepts values, pairs them with their input keys and saves them
to disk.
"""
assert len(self.inputs) == len(args)
record = dict(zip(self.inputs, args))
self.put_record(record)
class TubReader(Tub):
def __init__(self, *args, **kwargs):
super(TubReader, self).__init__(*args, **kwargs)
self.read_ix = 0
def run(self, *args):
"""
Accepts keys to read from the tub and retrieves them sequentially.
"""
if self.read_ix >= self.current_ix:
return None
record_dict = self.get_record(self.read_ix)
self.read_ix += 1
record = [record_dict[key] for key in args ]
return record
class TubHandler():
def __init__(self, path):
self.path = os.path.expanduser(path)
def get_tub_list(self):
folders = next(os.walk(self.path))[1]
return folders
def next_tub_number(self):
def get_tub_num(tub_name):
try:
num = int(tub_name.split('_')[1])
except:
num = 0
return num
folders = self.get_tub_list()
numbers = [get_tub_num(x) for x in folders]
next_number = max(numbers+[0]) + 1
return next_number
def create_tub_path(self):
tub_num = self.next_tub_number()
date = datetime.datetime.now().strftime('%y-%m-%d')
name = '_'.join(['tub', str(tub_num).zfill(2), date])
tub_path = os.path.join(self.path, name)
return tub_path
def new_tub_writer(self, inputs, types):
tub_path = self.create_tub_path()
tw = TubWriter(path=tub_path, inputs=inputs, types=types)
return tw
class TubImageStacker(Tub):
"""
A Tub for training a NN with images that are the last three records stacked
togther as 3 channels of a single image. The idea is to give a simple feedforward
NN some chance of building a model based on motion.
If you drive with the ImageFIFO part, then you don't need this.
Just make sure your inference pass uses the ImageFIFO that the NN will now expect.
"""
def rgb2gray(self, rgb):
"""
take a numpy rgb image return a new single channel image converted to greyscale
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def stack3Images(self, img_a, img_b, img_c):
"""
convert 3 rgb images into grayscale and put them into the 3 channels of
a single output image
"""
width, height, _ = img_a.shape
gray_a = self.rgb2gray(img_a)
gray_b = self.rgb2gray(img_b)
gray_c = self.rgb2gray(img_c)
img_arr = np.zeros([width, height, 3], dtype=np.dtype('B'))
img_arr[...,0] = np.reshape(gray_a, (width, height))
img_arr[...,1] = np.reshape(gray_b, (width, height))
img_arr[...,2] = np.reshape(gray_c, (width, height))
return img_arr
def get_record(self, ix):
"""
get the current record and two previous.
stack the 3 images into a single image.
"""
data = super(TubImageStacker, self).get_record(ix)
if ix > 1:
data_ch1 = super(TubImageStacker, self).get_record(ix - 1)
data_ch0 = super(TubImageStacker, self).get_record(ix - 2)
json_data = self.get_json_record(ix)
for key, val in json_data.items():
typ = self.get_input_type(key)
#load objects that were saved as separate files
if typ == 'image':
val = self.stack3Images(data_ch0[key], data_ch1[key], data[key])
data[key] = val
elif typ == 'image_array':
img = self.stack3Images(data_ch0[key], data_ch1[key], data[key])
val = np.array(img)
return data
class TubTimeStacker(TubImageStacker):
"""
A Tub for training N with records stacked through time.
The idea here is to force the network to learn to look ahead in time.
Init with an array of time offsets from the current time.
"""
def __init__(self, frame_list, *args, **kwargs):
"""
frame_list of [0, 10] would stack the current and 10 frames from now records togther in a single record
with just the current image returned.
[5, 90, 200] would return 3 frames of records, ofset 5, 90, and 200 frames in the future.
"""
super(TubTimeStacker, self).__init__(*args, **kwargs)
self.frame_list = frame_list
def get_record(self, ix):
"""
stack the N records into a single record.
Each key value has the record index with a suffix of _N where N is
the frame offset into the data.
"""
data = {}
for i, iOffset in enumerate(self.frame_list):
iRec = ix + iOffset
try:
json_data = self.get_json_record(iRec)
except FileNotFoundError:
pass
except:
pass
for key, val in json_data.items():
typ = self.get_input_type(key)
# load only the first image saved as separate files
if typ == 'image' and i == 0:
val = Image.open(os.path.join(self.path, val))
data[key] = val
elif typ == 'image_array' and i == 0:
d = super(TubTimeStacker, self).get_record(ix)
data[key] = d[key]
else:
"""
we append a _offset to the key
so user/angle out now be user/angle_0
"""
new_key = key + "_" + str(iOffset)
data[new_key] = val
return data
class TubGroup(Tub):
def __init__(self, tub_paths_arg):
tub_paths = util.files.expand_path_arg(tub_paths_arg)
logger.info('TubGroup:tubpaths: {}'.format(tub_paths))
self.tubs = [Tub(path) for path in tub_paths]
self.input_types = {}
record_count = 0
for t in self.tubs:
t.update_df()
record_count += len(t.df)
self.input_types.update(dict(zip(t.inputs, t.types)))
logger.info('joining the tubs {} records together. This could take {} minutes.'.format(record_count,
int(record_count / 300000)))
self.meta = {'inputs': list(self.input_types.keys()),
'types': list(self.input_types.values())}
self.df = pd.concat([t.df for t in self.tubs], axis=0, join='inner')
@property
def inputs(self):
return list(self.meta['inputs'])
@property
def types(self):
return list(self.meta['types'])
def get_num_tubs(self):
return len(self.tubs)
def get_num_records(self):
return len(self.df)
class TorchTubGroup(Tub):
def __init__(self, tub_paths_arg):
tub_paths = util.files.expand_path_arg(tub_paths_arg)
logger.info('TubGroup:tubpaths: {}'.format(tub_paths))
self.tubs = [Tub(path) for path in tub_paths]
self.input_types = {}
record_count = 0
for t in self.tubs:
t.update_df()
record_count += len(t.df)
self.input_types.update(dict(zip(t.inputs, t.types)))
logger.info('joining the tubs {} records together. This could take {} minutes.'.format(record_count,
int(record_count / 300000)))
self.meta = {'inputs': list(self.input_types.keys()),
'types': list(self.input_types.values())}
self.df = pd.concat([t.df for t in self.tubs], axis=0, join='inner')
@property
def inputs(self):
return list(self.meta['inputs'])
@property
def types(self):
return list(self.meta['types'])
def get_num_tubs(self):
return len(self.tubs)
def get_num_records(self):
return len(self.df)
def get_train_val_gen(self, X_keys, Y_keys, batch_size=128, train_frac=.8,
train_record_transform=None, val_record_transform=None,
sequential=False):
import torch
if self.df is None:
self.update_df()
if sequential:
from torch.utils.data import SequentialSampler, BatchSampler
torch.utils.data.SequentialSampler(Dataset(train_df, X_keys, Y_keys))
else:
# random sampling
train_df = self.df.sample(frac=train_frac, random_state=200)
val_df = self.df.drop(train_df.index)
train_gen = torch.utils.data.DataLoader(Dataset(train_df, X_keys, Y_keys), batch_size)
val_gen = torch.utils.data.DataLoader(Dataset(val_df, X_keys, Y_keys), batch_size)
return train_gen, val_gen
class Dataset(object):
def __init__(self, df_data, X_keys, Y_keys):
self.data = df_data
self.X_keys = X_keys
self.Y_keys = Y_keys
def __getitem__(self, idx):
import torch
try:
img = Image.open((self.data[self.X_keys].iloc[idx, 0]))
val = np.array(img, dtype=np.float32) / 255
steering = self.data[self.Y_keys].iloc[idx, 0].astype(np.float32)
throttle = self.data[self.Y_keys].iloc[idx, 1].astype(np.float32)
except IndexError as e:
print(idx)
print(self.data.size)
raise e
y = [steering, throttle]
y = torch.FloatTensor(y)
val = torch.FloatTensor(val).permute(2, 0, 1)
# move channel dimmension from last to first
return val, y
def __len__(self):
return self.data.shape[0]
| [
"torch.FloatTensor"
] | 1.0.0 | 0h-n0/donkeycar_pytorch | ed19404ad274ff0228cfa290a8b9d318f8781aad |
1.4 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Union
import torch
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from .state import AcceleratorState, DistributedType, is_tpu_available
from .utils import (
RNGType,
broadcast,
broadcast_object_list,
concatenate,
find_batch_size,
get_data_structure,
initialize_tensors,
is_torch_version,
send_to_device,
slice_tensors,
synchronize_rng_states,
)
if is_tpu_available():
import torch_xla.core.xla_model as xm
# kwargs of the DataLoader in min version 1.4.0.
_PYTORCH_DATALOADER_KWARGS = {
"batch_size": 1,
"shuffle": False,
"sampler": None,
"batch_sampler": None,
"num_workers": 0,
"collate_fn": None,
"pin_memory": False,
"drop_last": False,
"timeout": 0,
"worker_init_fn": None,
"multiprocessing_context": None,
}
# kwargs added after by version
_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {
"1.6.0": {"generator": None},
"1.7.0": {"prefetch_factor": 2, "persistent_workers": False},
}
for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
if is_torch_version(">=", v):
_PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
class BatchSamplerShard(BatchSampler):
"""
Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
Args:
batch_sampler (`torch.utils.data.sampler.BatchSampler`):
The batch sampler to split in several shards.
num_processes (`int`, *optional*, defaults to 1):
The number of processes running concurrently.
process_index (`int`, *optional*, defaults to 0):
The index of the current process.
split_batches (`bool`, *optional*, defaults to `False`):
Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
yielding different full batches on each process.
On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
- the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
this argument is set to `False`.
- the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
then `[6, 7]` if this argument is set to `True`.
<Tip warning={true}>
This does not support `BatchSampler` with varying batch size yet.
</Tip>"""
def __init__(
self,
batch_sampler: BatchSampler,
num_processes: int = 1,
process_index: int = 0,
split_batches: bool = False,
):
if split_batches and batch_sampler.batch_size % num_processes != 0:
raise ValueError(
f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) "
f"needs to be a round multiple of the number of processes ({num_processes})."
)
self.batch_sampler = batch_sampler
self.num_processes = num_processes
self.process_index = process_index
self.split_batches = split_batches
self.batch_size = batch_sampler.batch_size
self.drop_last = batch_sampler.drop_last
def __len__(self):
if self.split_batches:
return len(self.batch_sampler)
if len(self.batch_sampler) % self.num_processes == 0:
return len(self.batch_sampler) // self.num_processes
length = len(self.batch_sampler) // self.num_processes
return length if self.drop_last else length + 1
def __iter__(self):
return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
def _iter_with_split(self):
initial_data = []
batch_length = self.batch_sampler.batch_size // self.num_processes
for idx, batch in enumerate(self.batch_sampler):
if idx == 0:
initial_data = batch
if len(batch) == self.batch_size:
# If the batch is full, we yield the part of it this process is responsible of.
yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
# If drop_last is True of the last batch was full, iteration is over, otherwise...
if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
# For degenerate cases where the dataset has less than num_process * batch_size samples
while len(initial_data) < self.batch_size:
initial_data += initial_data
batch = batch + initial_data
yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
def _iter_with_no_split(self):
initial_data = []
batch_to_yield = []
for idx, batch in enumerate(self.batch_sampler):
# We gather the initial indices in case we need to circle back at the end.
if not self.drop_last and idx < self.num_processes:
initial_data += batch
# We identify the batch to yield but wait until we ar sure every process gets a full batch before actually
# yielding it.
if idx % self.num_processes == self.process_index:
batch_to_yield = batch
if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:
yield batch_to_yield
batch_to_yield = []
# If drop_last is True, iteration is over, otherwise...
if not self.drop_last and len(initial_data) > 0:
# ... we yield the complete batch we had saved before if it has the proper length
if len(batch_to_yield) == self.batch_size:
yield batch_to_yield
# For degenerate cases where the dataset has less than num_process * batch_size samples
while len(initial_data) < self.num_processes * self.batch_size:
initial_data += initial_data
# If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
if len(batch) == self.batch_size:
batch = []
idx += 1
# Make sure we yield a multiple of self.num_processes batches
cycle_index = 0
while idx % self.num_processes != 0 or len(batch) > 0:
end_index = cycle_index + self.batch_size - len(batch)
batch += initial_data[cycle_index:end_index]
if idx % self.num_processes == self.process_index:
yield batch
cycle_index = end_index
batch = []
idx += 1
class IterableDatasetShard(IterableDataset):
"""
Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
always yield a number of samples that is a round multiple of the actual batch size (depending of the value of
`split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
`drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
be too small or loop with indices from the beginning.
Args:
dataset (`torch.utils.data.dataset.IterableDataset`):
The batch sampler to split in several shards.
batch_size (`int`, *optional*, defaults to 1):
The size of the batches per shard (if `split_batches=False`) or the size of the batches (if
`split_batches=True`).
drop_last (`bool`, *optional*, defaults to `False`):
Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
beginning.
num_processes (`int`, *optional*, defaults to 1):
The number of processes running concurrently.
process_index (`int`, *optional*, defaults to 0):
The index of the current process.
split_batches (`bool`, *optional*, defaults to `False`):
Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
yielding different full batches on each process.
On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
- the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
argument is set to `False`.
- the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
this argument is set to `True`.
"""
def __init__(
self,
dataset: IterableDataset,
batch_size: int = 1,
drop_last: bool = False,
num_processes: int = 1,
process_index: int = 0,
split_batches: bool = False,
):
if split_batches and batch_size > 1 and batch_size % num_processes != 0:
raise ValueError(
f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) "
f"needs to be a round multiple of the number of processes ({num_processes})."
)
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.num_processes = num_processes
self.process_index = process_index
self.split_batches = split_batches
def __iter__(self):
real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
first_batch = None
current_batch = []
for element in self.dataset:
current_batch.append(element)
# Wait to have a full batch before yielding elements.
if len(current_batch) == real_batch_size:
for i in process_slice:
yield current_batch[i]
if first_batch is None:
first_batch = current_batch.copy()
current_batch = []
# Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
if not self.drop_last and len(current_batch) > 0:
if first_batch is None:
first_batch = current_batch.copy()
while len(current_batch) < real_batch_size:
current_batch += first_batch
for i in process_slice:
yield current_batch[i]
class DataLoaderShard(DataLoader):
"""
Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
Args:
dataset (`torch.utils.data.dataset.Dataset`):
The dataset to use to build this datalaoder.
device (`torch.device`, *optional*):
If passed, the device to put all batches on.
rng_types (list of `str` or [`~utils.RNGType`]):
The list of random number generators to synchronize at the beginning of each iteration. Should be one or
several of:
- `"torch"`: the base torch random number generator
- `"cuda"`: the CUDA random number generator (GPU only)
- `"xla"`: the XLA random number generator (TPU only)
- `"generator"`: an optional `torch.Generator`
generator (`torch.Generator`, *optional*):
A random number generator to keep synchronized across processes.
kwargs:
All other keyword arguments to pass to the regular `DataLoader` initialization.
"""
def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):
super().__init__(dataset, **kwargs)
self.device = device
self.rng_types = rng_types
self.generator = generator
def __iter__(self):
if self.rng_types is not None:
synchronize_rng_states(self.rng_types, self.generator)
state = AcceleratorState()
for batch in super().__iter__():
if state.distributed_type == DistributedType.TPU:
xm.mark_step()
yield batch if self.device is None else send_to_device(batch, self.device)
class DataLoaderDispatcher(DataLoader):
"""
Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
process their part of the batch.
Args:
split_batches (`bool`, *optional*, defaults to `False`):
Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
`num_processes` batches at each iteration).
Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
otherwise.
Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
`batch_size`.
"""
def __init__(self, dataset, split_batches: bool = False, **kwargs):
shuffle = False
if is_torch_version(">=", "1.11.0"):
from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
# We need to save the shuffling state of the DataPipe
if isinstance(dataset, ShufflerIterDataPipe):
shuffle = dataset._shuffle_enabled
super().__init__(dataset, **kwargs)
self.split_batches = split_batches
if is_torch_version("<", "1.8.0"):
raise ImportError(
"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}."
)
if shuffle:
torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
def __iter__(self):
state = AcceleratorState()
if state.process_index == 0:
# We only iterate through the DataLoader on process 0.
main_iterator = super().__iter__()
stop_iteration = False
first_batch = None
while not stop_iteration:
# On process 0, we gather the batch to dispatch.
if state.process_index == 0:
try:
if self.split_batches:
# One batch of the main iterator is dispatched and split.
batch = next(main_iterator)
else:
# num_processes batches of the main iterator are concatenated then dispatched and split.
# We add the batches one by one so we have the remainder available when drop_last=False.
batches = []
for _ in range(state.num_processes):
batches.append(next(main_iterator))
batch = concatenate(batches, dim=0)
# In both cases, we need to get the structure of the batch that we will broadcast on other
# processes to initialize the tensors with the right shape.
# data_structure, stop_iteration
batch_info = [get_data_structure(batch), False]
except StopIteration:
batch_info = [None, True]
else:
batch_info = [None, stop_iteration]
# This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
broadcast_object_list(batch_info)
stop_iteration = batch_info[1]
if stop_iteration:
# If drop_last is False and split_batches is False, we may have a remainder to take care of.
if not self.split_batches and not self.drop_last:
if state.process_index == 0 and len(batches) > 0:
batch = concatenate(batches, dim=0)
batch_info = [get_data_structure(batch), False]
else:
batch_info = [None, True]
broadcast_object_list(batch_info)
if batch_info[1]:
continue
else:
continue
if state.process_index != 0:
# Initialize tensors on other processes than process 0.
batch = initialize_tensors(batch_info[0])
batch = send_to_device(batch, state.device)
# Broadcast the batch before splitting it.
batch = broadcast(batch, from_process=0)
if not self.drop_last and first_batch is None:
# We keep at least num processes elements of the first batch to be able to complete the last batch
first_batch = slice_tensors(batch, slice(0, state.num_processes))
observed_batch_size = find_batch_size(batch)
batch_size = observed_batch_size // state.num_processes
if not self.drop_last and stop_iteration and observed_batch_size % state.num_processes != 0:
# If the last batch is not complete, let's add the first batch to it.
batch = concatenate([batch, first_batch], dim=0)
batch_size += 1
data_slice = slice(state.process_index * batch_size, (state.process_index + 1) * batch_size)
if state.distributed_type == DistributedType.TPU:
xm.mark_step()
yield slice_tensors(batch, data_slice)
def __len__(self):
state = AcceleratorState()
whole_length = super().__len__()
if self.drop_last:
return whole_length // state.num_processes
else:
return math.ceil(whole_length / state.num_processes)
def prepare_data_loader(
dataloader: DataLoader,
device: Optional[torch.device] = None,
num_processes: Optional[int] = None,
process_index: Optional[int] = None,
split_batches: bool = False,
put_on_device: bool = False,
rng_types: Optional[List[Union[str, RNGType]]] = None,
dispatch_batches: Optional[bool] = None,
) -> DataLoader:
"""
Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
Args:
dataloader (`torch.utils.data.dataloader.DataLoader`):
The data loader to split across several devices.
device (`torch.device`):
The target device for the returned `DataLoader`.
num_processes (`int`, *optional*):
The number of processes running concurrently. Will default to the value given by
[`~state.AcceleratorState`].
process_index (`int`, *optional*):
The index of the current process. Will default to the value given by [`~state.AcceleratorState`].
split_batches (`bool`, *optional*, defaults to `False`):
Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
`num_processes` batches at each iteration).
Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
otherwise.
Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
`batch_size`.
put_on_device (`bool`, *optional*, defaults to `False`):
Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or
dictionaries of tensors).
rng_types (list of `str` or [`~utils.RNGType`]):
The list of random number generators to synchronize at the beginning of each iteration. Should be one or
several of:
- `"torch"`: the base torch random number generator
- `"cuda"`: the CUDA random number generator (GPU only)
- `"xla"`: the XLA random number generator (TPU only)
- `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
dispatch_batches (`bool`, *optional*):
If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
are split and broadcast to each process. Will default to `True` when the underlying dataset is an
`IterableDataset`, `False` otherwise.
Returns:
`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
<Tip warning={true}>
This does not support `BatchSampler` with varying batch size yet.
</Tip>"""
if dispatch_batches is None:
if is_torch_version("<", "1.8.0") or not put_on_device:
dispatch_batches = False
else:
dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
if dispatch_batches and not put_on_device:
raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
# Grab defaults from AcceleratorState
state = AcceleratorState()
if num_processes is None:
num_processes = state.num_processes
if process_index is None:
process_index = state.process_index
# Sanity check
if split_batches and dataloader.batch_size > 1 and dataloader.batch_size % num_processes != 0:
raise ValueError(
f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
f"needs to be a round multiple of the number of processes ({num_processes})."
)
new_dataset = dataloader.dataset
# Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
generator = getattr(dataloader, "generator", None)
# No change if no multiprocess
if num_processes != 1 and not dispatch_batches:
if isinstance(new_dataset, IterableDataset):
if getattr(dataloader.dataset, "generator", None) is not None:
generator = dataloader.dataset.generator
new_dataset = IterableDatasetShard(
new_dataset,
batch_size=dataloader.batch_size,
drop_last=dataloader.drop_last,
num_processes=num_processes,
process_index=process_index,
split_batches=split_batches,
)
else:
# New batch sampler for the current process.
if hasattr(dataloader.sampler, "generator"):
if dataloader.sampler.generator is None:
dataloader.sampler.generator = torch.Generator()
generator = dataloader.sampler.generator
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
elif getattr(dataloader.batch_sampler, "generator", None) is not None:
generator = dataloader.batch_sampler.generator
new_batch_sampler = BatchSamplerShard(
dataloader.batch_sampler,
num_processes=num_processes,
process_index=process_index,
split_batches=split_batches,
)
# We ignore all of those since they are all dealt with by our new_batch_sampler
ignore_kwargs = [
"batch_size",
"shuffle",
"sampler",
"batch_sampler",
"drop_last",
"generator",
]
if rng_types is not None and generator is None and "generator" in rng_types:
rng_types.remove("generator")
kwargs = {
k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
for k in _PYTORCH_DATALOADER_KWARGS
if k not in ignore_kwargs
}
# Need to provide batch_size as batch_sampler is None for Iterable dataset
if new_batch_sampler is None:
kwargs["drop_last"] = dataloader.drop_last
kwargs["batch_size"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size
if dispatch_batches:
return DataLoaderDispatcher(
new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs
)
return DataLoaderShard(
new_dataset,
device=device if put_on_device else None,
batch_sampler=new_batch_sampler,
rng_types=rng_types,
generator=generator,
**kwargs,
)
| [
"torch.utils.data.graph_settings.apply_shuffle_settings",
"torch.Generator",
"torch.empty"
] | 1.4.0 | yuxinyuan/accelerate | 450d51ce0191020408bd3481bde85fe1dabaf289 |
1.8 | import warnings
from math import sqrt
from typing import Iterator, List, Optional, Union, Any
import e3nn
import torch
import torch.fx
from e3nn import o3
from e3nn.util import prod
from e3nn.util.codegen import CodeGenMixin
from e3nn.util.jit import compile_mode
from torch import fx
from ._codegen import codegen_tensor_product_left_right, codegen_tensor_product_right
from ._instruction import Instruction
@compile_mode('script')
class TensorProduct(CodeGenMixin, torch.nn.Module):
r"""Tensor product with parametrized paths.
Parameters
----------
irreps_in1 : `e3nn.o3.Irreps`
Irreps for the first input.
irreps_in2 : `e3nn.o3.Irreps`
Irreps for the second input.
irreps_out : `e3nn.o3.Irreps`
Irreps for the output.
instructions : list of tuple
List of instructions ``(i_1, i_2, i_out, mode, train[, path_weight])``.
Each instruction puts ``in1[i_1]`` :math:`\otimes` ``in2[i_2]`` into ``out[i_out]``.
* ``mode``: `str`. Determines the way the multiplicities are treated, ``"uvw"`` is fully connected. Other valid options are: ``'uvw'``, ``'uvu'``, ``'uvv'``, ``'uuw'``, ``'uuu'``, and ``'uvuv'``.
* ``train``: `bool`. `True` if this path should have learnable weights, otherwise `False`.
* ``path_weight``: `float`. A fixed multiplicative weight to apply to the output of this path. Defaults to 1. Note that setting ``path_weight`` breaks the normalization derived from ``in1_var``/``in2_var``/``out_var``.
in1_var : list of float, Tensor, or None
Variance for each irrep in ``irreps_in1``. If ``None``, all default to ``1.0``.
in2_var : list of float, Tensor, or None
Variance for each irrep in ``irreps_in2``. If ``None``, all default to ``1.0``.
out_var : list of float, Tensor, or None
Variance for each irrep in ``irreps_out``. If ``None``, all default to ``1.0``.
irrep_normalization : {'component', 'norm'}
The assumed normalization of the input and output representations. If it is set to "norm":
.. math::
\| x \| = \| y \| = 1 \Longrightarrow \| x \otimes y \| = 1
path_normalization : {'element', 'path'}
If set to ``element``, each output is normalized by the total number of elements (independently of their paths).
If it is set to ``path``, each path is normalized by the total number of elements in the path, then each output is normalized by the number of paths.
internal_weights : bool
whether the `e3nn.o3.TensorProduct` contains its learnable weights as a parameter
shared_weights : bool
whether the learnable weights are shared among the input's extra dimensions
* `True` :math:`z_i = w x_i \otimes y_i`
* `False` :math:`z_i = w_i x_i \otimes y_i`
where here :math:`i` denotes a *batch-like* index.
``shared_weights`` cannot be `False` if ``internal_weights`` is `True`.
compile_left_right : bool
whether to compile the forward function, true by default
compile_right : bool
whether to compile the ``.right`` function, false by default
Examples
--------
Create a module that computes elementwise the cross-product of 16 vectors with 16 vectors :math:`z_u = x_u \wedge y_u`
>>> module = TensorProduct(
... "16x1o", "16x1o", "16x1e",
... [
... (0, 0, 0, "uuu", False)
... ]
... )
Now mix all 16 vectors with all 16 vectors to makes 16 pseudo-vectors :math:`z_w = \sum_{u,v} w_{uvw} x_u \wedge y_v`
>>> module = TensorProduct(
... [(16, (1, -1))],
... [(16, (1, -1))],
... [(16, (1, 1))],
... [
... (0, 0, 0, "uvw", True)
... ]
... )
With custom input variance and custom path weights:
>>> module = TensorProduct(
... "8x0o + 8x1o",
... "16x1o",
... "16x1e",
... [
... (0, 0, 0, "uvw", True, 3),
... (1, 0, 0, "uvw", True, 1),
... ],
... in2_var=[1/16]
... )
Example of a dot product:
>>> irreps = o3.Irreps("3x0e + 4x0o + 1e + 2o + 3o")
>>> module = TensorProduct(irreps, irreps, "0e", [
... (i, i, 0, 'uuw', False)
... for i, (mul, ir) in enumerate(irreps)
... ])
Implement :math:`z_u = x_u \otimes (\sum_v w_{uv} y_v)`
>>> module = TensorProduct(
... "8x0o + 7x1o + 3x2e",
... "10x0e + 10x1e + 10x2e",
... "8x0o + 7x1o + 3x2e",
... [
... # paths for the l=0:
... (0, 0, 0, "uvu", True), # 0x0->0
... # paths for the l=1:
... (1, 0, 1, "uvu", True), # 1x0->1
... (1, 1, 1, "uvu", True), # 1x1->1
... (1, 2, 1, "uvu", True), # 1x2->1
... # paths for the l=2:
... (2, 0, 2, "uvu", True), # 2x0->2
... (2, 1, 2, "uvu", True), # 2x1->2
... (2, 2, 2, "uvu", True), # 2x2->2
... ]
... )
Tensor Product using the xavier uniform initialization:
>>> irreps_1 = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> irreps_2 = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> irreps_out = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> # create a Fully Connected Tensor Product
>>> module = o3.TensorProduct(
... irreps_1,
... irreps_2,
... irreps_out,
... [
... (i_1, i_2, i_out, "uvw", True, mul_1 * mul_2)
... for i_1, (mul_1, ir_1) in enumerate(irreps_1)
... for i_2, (mul_2, ir_2) in enumerate(irreps_2)
... for i_out, (mul_out, ir_out) in enumerate(irreps_out)
... if ir_out in ir_1 * ir_2
... ]
... )
>>> with torch.no_grad():
... for weight in module.weight_views():
... mul_1, mul_2, mul_out = weight.shape
... # formula from torch.nn.init.xavier_uniform_
... a = (6 / (mul_1 * mul_2 + mul_out))**0.5
... new_weight = torch.empty_like(weight)
... new_weight.uniform_(-a, a)
... weight[:] = new_weight
tensor(...)
>>> n = 1_000
>>> vars = module(irreps_1.randn(n, -1), irreps_2.randn(n, -1)).var(0)
>>> assert vars.min() > 1 / 3
>>> assert vars.max() < 3
"""
instructions: List[Any]
shared_weights: bool
internal_weights: bool
weight_numel: int
_specialized_code: bool
_optimize_einsums: bool
_profiling_str: str
_in1_dim: int
_in2_dim: int
def __init__(
self,
irreps_in1: o3.Irreps,
irreps_in2: o3.Irreps,
irreps_out: o3.Irreps,
instructions: List[tuple],
in1_var: Optional[Union[List[float], torch.Tensor]] = None,
in2_var: Optional[Union[List[float], torch.Tensor]] = None,
out_var: Optional[Union[List[float], torch.Tensor]] = None,
irrep_normalization: str = None,
path_normalization: str = None,
internal_weights: Optional[bool] = None,
shared_weights: Optional[bool] = None,
compile_left_right: bool = True,
compile_right: bool = False,
normalization=None, # for backward compatibility
_specialized_code: Optional[bool] = None,
_optimize_einsums: Optional[bool] = None
):
# === Setup ===
super().__init__()
if normalization is not None:
warnings.warn(
"`normalization` is deprecated. Use `irrep_normalization` instead.",
DeprecationWarning
)
irrep_normalization = normalization
if irrep_normalization is None:
irrep_normalization = 'component'
if path_normalization is None:
path_normalization = 'element'
assert irrep_normalization in ['component', 'norm']
assert path_normalization in ['element', 'path']
self.irreps_in1 = o3.Irreps(irreps_in1)
self.irreps_in2 = o3.Irreps(irreps_in2)
self.irreps_out = o3.Irreps(irreps_out)
del irreps_in1, irreps_in2, irreps_out
instructions = [x if len(x) == 6 else x + (1.0,) for x in instructions]
instructions = [
Instruction(
i_in1, i_in2, i_out, connection_mode, has_weight, path_weight,
{
'uvw': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul, self.irreps_out[i_out].mul),
'uvu': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
'uvv': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
'uuw': (self.irreps_in1[i_in1].mul, self.irreps_out[i_out].mul),
'uuu': (self.irreps_in1[i_in1].mul,),
'uvuv': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
}[connection_mode],
)
for i_in1, i_in2, i_out, connection_mode, has_weight, path_weight in instructions
]
if in1_var is None:
in1_var = [1.0 for _ in range(len(self.irreps_in1))]
else:
in1_var = [float(var) for var in in1_var]
assert len(in1_var) == len(self.irreps_in1), "Len of ir1_var must be equal to len(irreps_in1)"
if in2_var is None:
in2_var = [1.0 for _ in range(len(self.irreps_in2))]
else:
in2_var = [float(var) for var in in2_var]
assert len(in2_var) == len(self.irreps_in2), "Len of ir2_var must be equal to len(irreps_in2)"
if out_var is None:
out_var = [1.0 for _ in range(len(self.irreps_out))]
else:
out_var = [float(var) for var in out_var]
assert len(out_var) == len(self.irreps_out), "Len of out_var must be equal to len(irreps_out)"
def num_elements(ins):
return {
'uvw': (self.irreps_in1[ins.i_in1].mul * self.irreps_in2[ins.i_in2].mul),
'uvu': self.irreps_in2[ins.i_in2].mul,
'uvv': self.irreps_in1[ins.i_in1].mul,
'uuw': self.irreps_in1[ins.i_in1].mul,
'uuu': 1,
'uvuv': 1,
}[ins.connection_mode]
normalization_coefficients = []
for ins in instructions:
mul_ir_in1 = self.irreps_in1[ins.i_in1]
mul_ir_in2 = self.irreps_in2[ins.i_in2]
mul_ir_out = self.irreps_out[ins.i_out]
assert mul_ir_in1.ir.p * mul_ir_in2.ir.p == mul_ir_out.ir.p
assert abs(mul_ir_in1.ir.l - mul_ir_in2.ir.l) <= mul_ir_out.ir.l <= mul_ir_in1.ir.l + mul_ir_in2.ir.l
assert ins.connection_mode in ['uvw', 'uvu', 'uvv', 'uuw', 'uuu', 'uvuv']
alpha = 1
if irrep_normalization == 'component':
alpha *= mul_ir_out.ir.dim
if irrep_normalization == 'norm':
alpha *= mul_ir_in1.ir.dim * mul_ir_in2.ir.dim
if path_normalization == 'element':
x = sum(
in1_var[i.i_in1] * in2_var[i.i_in2] * num_elements(i)
for i in instructions
if i.i_out == ins.i_out
)
if path_normalization == 'path':
x = in1_var[ins.i_in1] * in2_var[ins.i_in2] * num_elements(ins)
x *= len([i for i in instructions if i.i_out == ins.i_out])
if x > 0.0:
alpha /= x
alpha *= out_var[ins.i_out]
alpha *= ins.path_weight
normalization_coefficients += [sqrt(alpha)]
self.instructions = [
Instruction(ins.i_in1, ins.i_in2, ins.i_out, ins.connection_mode, ins.has_weight, alpha, ins.path_shape)
for ins, alpha in zip(instructions, normalization_coefficients)
]
self._in1_dim = self.irreps_in1.dim
self._in2_dim = self.irreps_in2.dim
if shared_weights is False and internal_weights is None:
internal_weights = False
if shared_weights is None:
shared_weights = True
if internal_weights is None:
internal_weights = shared_weights and any(i.has_weight for i in self.instructions)
assert shared_weights or not internal_weights
self.internal_weights = internal_weights
self.shared_weights = shared_weights
opt_defaults = e3nn.get_optimization_defaults()
self._specialized_code = _specialized_code if _specialized_code is not None else opt_defaults['specialized_code']
self._optimize_einsums = _optimize_einsums if _optimize_einsums is not None else opt_defaults['optimize_einsums']
del opt_defaults
# Generate the actual tensor product code
if compile_left_right:
graphmod_left_right = codegen_tensor_product_left_right(
self.irreps_in1,
self.irreps_in2,
self.irreps_out,
self.instructions,
self.shared_weights,
self._specialized_code,
self._optimize_einsums
)
else:
graphmod_left_right = fx.Graph()
graphmod_left_right.placeholder('x1', torch.Tensor)
graphmod_left_right.placeholder('x2', torch.Tensor)
graphmod_left_right.placeholder('w', torch.Tensor)
graphmod_left_right.call_function(
torch._assert,
args=(False, "`left_right` method is not compiled, set `compile_left_right` to True when creating the TensorProduct")
)
graphmod_left_right = fx.GraphModule(torch.nn.Module(), graphmod_left_right, class_name="tp_forward")
if compile_right:
graphmod_right = codegen_tensor_product_right(
self.irreps_in1,
self.irreps_in2,
self.irreps_out,
self.instructions,
self.shared_weights,
self._specialized_code,
self._optimize_einsums
)
else:
graphmod_right = fx.Graph()
graphmod_right.placeholder('x2', torch.Tensor)
graphmod_right.placeholder('w', torch.Tensor)
graphmod_right.call_function(
torch._assert,
args=(False, "`right` method is not compiled, set `compile_right` to True when creating the TensorProduct")
)
graphmod_right = fx.GraphModule(torch.nn.Module(), graphmod_right, class_name="tp_forward")
self._codegen_register({
"_compiled_main_left_right": graphmod_left_right,
"_compiled_main_right": graphmod_right
})
# === Determine weights ===
self.weight_numel = sum(prod(ins.path_shape) for ins in self.instructions if ins.has_weight)
if internal_weights and self.weight_numel > 0:
assert self.shared_weights, "Having internal weights impose shared weights"
self.weight = torch.nn.Parameter(torch.randn(self.weight_numel))
else:
# For TorchScript, there always has to be some kind of defined .weight
self.register_buffer('weight', torch.Tensor())
if self.irreps_out.dim > 0:
output_mask = torch.cat([
torch.ones(mul * ir.dim)
if any(
(ins.i_out == i_out) and (ins.path_weight != 0) and (0 not in ins.path_shape)
for ins in self.instructions
)
else torch.zeros(mul * ir.dim)
for i_out, (mul, ir) in enumerate(self.irreps_out)
])
else:
output_mask = torch.ones(0)
self.register_buffer('output_mask', output_mask)
# For TorchScript, this needs to be done in advance:
self._profiling_str = str(self)
def __repr__(self):
npath = sum(prod(i.path_shape) for i in self.instructions)
return (
f"{self.__class__.__name__}"
f"({self.irreps_in1.simplify()} x {self.irreps_in2.simplify()} "
f"-> {self.irreps_out.simplify()} | {npath} paths | {self.weight_numel} weights)"
)
@torch.jit.unused
def _prep_weights_python(self, weight: Optional[Union[torch.Tensor, List[torch.Tensor]]]) -> Optional[torch.Tensor]:
if isinstance(weight, list):
weight_shapes = [ins.path_shape for ins in self.instructions if ins.has_weight]
if not self.shared_weights:
weight = [w.reshape(-1, prod(shape)) for w, shape in zip(weight, weight_shapes)]
else:
weight = [w.reshape(prod(shape)) for w, shape in zip(weight, weight_shapes)]
return torch.cat(weight, dim=-1)
else:
return weight
def _get_weights(self, weight: Optional[torch.Tensor]) -> torch.Tensor:
if not torch.jit.is_scripting():
# If we're not scripting, then we're in Python and `weight` could be a List[Tensor]
# deal with that:
weight = self._prep_weights_python(weight)
if weight is None:
if self.weight_numel > 0 and not self.internal_weights:
raise RuntimeError("Weights must be provided when the TensorProduct does not have `internal_weights`")
return self.weight
else:
if self.shared_weights:
assert weight.shape == (self.weight_numel,), "Invalid weight shape"
else:
assert weight.shape[-1] == self.weight_numel, "Invalid weight shape"
assert weight.ndim > 1, "When shared weights is false, weights must have batch dimension"
return weight
@torch.jit.export
def right(self, y, weight: Optional[torch.Tensor] = None):
r"""Partially evaluate :math:`w x \otimes y`.
It returns an operator in the form of a tensor that can act on an arbitrary :math:`x`.
For example, if the tensor product above is expressed as
.. math::
w_{ijk} x_i y_j \rightarrow z_k
then the right method returns a tensor :math:`b_{ik}` such that
.. math::
w_{ijk} y_j \rightarrow b_{ik}
x_i b_{ik} \rightarrow z_k
The result of this method can be applied with a tensor contraction:
.. code-block:: python
torch.einsum("...ik,...i->...k", right, input)
Parameters
----------
y : `torch.Tensor`
tensor of shape ``(..., irreps_in2.dim)``
weight : `torch.Tensor` or list of `torch.Tensor`, optional
required if ``internal_weights`` is ``False``
tensor of shape ``(self.weight_numel,)`` if ``shared_weights`` is ``True``
tensor of shape ``(..., self.weight_numel)`` if ``shared_weights`` is ``False``
or list of tensors of shapes ``weight_shape`` / ``(...) + weight_shape``.
Use ``self.instructions`` to know what are the weights used for.
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_in1.dim, irreps_out.dim)``
"""
assert y.shape[-1] == self._in2_dim, "Incorrect last dimension for y"
# - PROFILER - with torch.autograd.profiler.record_function(self._profiling_str):
real_weight = self._get_weights(weight)
return self._compiled_main_right(y, real_weight)
def forward(self, x, y, weight: Optional[torch.Tensor] = None):
r"""Evaluate :math:`w x \otimes y`.
Parameters
----------
x : `torch.Tensor`
tensor of shape ``(..., irreps_in1.dim)``
y : `torch.Tensor`
tensor of shape ``(..., irreps_in2.dim)``
weight : `torch.Tensor` or list of `torch.Tensor`, optional
required if ``internal_weights`` is ``False``
tensor of shape ``(self.weight_numel,)`` if ``shared_weights`` is ``True``
tensor of shape ``(..., self.weight_numel)`` if ``shared_weights`` is ``False``
or list of tensors of shapes ``weight_shape`` / ``(...) + weight_shape``.
Use ``self.instructions`` to know what are the weights used for.
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_out.dim)``
"""
assert x.shape[-1] == self._in1_dim, "Incorrect last dimension for x"
assert y.shape[-1] == self._in2_dim, "Incorrect last dimension for y"
# - PROFILER - with torch.autograd.profiler.record_function(self._profiling_str):
real_weight = self._get_weights(weight)
return self._compiled_main_left_right(x, y, real_weight)
def weight_view_for_instruction(
self,
instruction: int,
weight: Optional[torch.Tensor] = None
) -> torch.Tensor:
r"""View of weights corresponding to ``instruction``.
Parameters
----------
instruction : int
The index of the instruction to get a view on the weights for. ``self.instructions[instruction].has_weight`` must be ``True``.
weight : `torch.Tensor`, optional
like ``weight`` argument to ``forward()``
Returns
-------
`torch.Tensor`
A view on ``weight`` or this object's internal weights for the weights corresponding to the ``instruction`` th instruction.
"""
if not self.instructions[instruction].has_weight:
raise ValueError(f"Instruction {instruction} has no weights.")
offset = sum(prod(ins.path_shape) for ins in self.instructions[:instruction])
ins = self.instructions[instruction]
weight = self._get_weights(weight)
batchshape = weight.shape[:-1]
return weight.narrow(-1, offset, prod(ins.path_shape)).view(batchshape + ins.path_shape)
def weight_views(
self,
weight: Optional[torch.Tensor] = None,
yield_instruction: bool = False
):
r"""Iterator over weight views for each weighted instruction.
Parameters
----------
weight : `torch.Tensor`, optional
like ``weight`` argument to ``forward()``
yield_instruction : `bool`, default False
Whether to also yield the corresponding instruction.
Yields
------
If ``yield_instruction`` is ``True``, yields ``(instruction_index, instruction, weight_view)``.
Otherwise, yields ``weight_view``.
"""
weight = self._get_weights(weight)
batchshape = weight.shape[:-1]
offset = 0
for ins_i, ins in enumerate(self.instructions):
if ins.has_weight:
flatsize = prod(ins.path_shape)
this_weight = weight.narrow(-1, offset, flatsize).view(batchshape + ins.path_shape)
offset += flatsize
if yield_instruction:
yield ins_i, ins, this_weight
else:
yield this_weight
def visualize(
self,
weight: Optional[torch.Tensor] = None,
plot_weight: bool = True,
aspect_ratio=1,
ax=None
): # pragma: no cover
r"""Visualize the connectivity of this `e3nn.o3.TensorProduct`
Parameters
----------
weight : `torch.Tensor`, optional
like ``weight`` argument to ``forward()``
plot_weight : `bool`, default True
Whether to color paths by the sum of their weights.
ax : ``matplotlib.Axes``, default None
The axes to plot on. If ``None``, a new figure will be created.
Returns
-------
(fig, ax)
The figure and axes on which the plot was drawn.
"""
import numpy as np
def _intersection(x, u, y, v):
u2 = np.sum(u**2)
v2 = np.sum(v**2)
uv = np.sum(u * v)
det = u2 * v2 - uv**2
mu = np.sum((u * uv - v * u2) * (y - x)) / det
return y + mu * v
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.path import Path
if ax is None:
ax = plt.gca()
fig = ax.get_figure()
# hexagon
verts = [
np.array([np.cos(a * 2 * np.pi / 6), np.sin(a * 2 * np.pi / 6)])
for a in range(6)
]
verts = np.asarray(verts)
# scale it
assert aspect_ratio in ['auto'] or isinstance(aspect_ratio, (float, int))
if aspect_ratio == 'auto':
factor = 0.2 / 2
min_aspect = 1 / 2
h_factor = max(len(self.irreps_in2), len(self.irreps_in1))
w_factor = len(self.irreps_out)
if h_factor / w_factor < min_aspect:
h_factor = min_aspect * w_factor
verts[:, 1] *= h_factor * factor
verts[:, 0] *= w_factor * factor
if isinstance(aspect_ratio, (float, int)):
factor = 0.1 * max(len(self.irreps_in2), len(self.irreps_in1), len(self.irreps_out))
verts[:, 1] *= factor
verts[:, 0] *= aspect_ratio * factor
codes = [
Path.MOVETO,
Path.LINETO,
Path.MOVETO,
Path.LINETO,
Path.MOVETO,
Path.LINETO,
]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=1, zorder=2)
ax.add_patch(patch)
n = len(self.irreps_in1)
b, a = verts[2:4]
c_in1 = (a + b) / 2
s_in1 = [a + (i + 1) / (n + 1) * (b - a) for i in range(n)]
n = len(self.irreps_in2)
b, a = verts[:2]
c_in2 = (a + b) / 2
s_in2 = [a + (i + 1) / (n + 1) * (b - a) for i in range(n)]
n = len(self.irreps_out)
a, b = verts[4:6]
s_out = [a + (i + 1) / (n + 1) * (b - a) for i in range(n)]
# get weights
if weight is None and not self.internal_weights:
plot_weight = False
elif plot_weight:
with torch.no_grad():
path_weight = []
for ins_i, ins in enumerate(self.instructions):
if ins.has_weight:
this_weight = self.weight_view_for_instruction(ins_i, weight=weight)
path_weight.append(this_weight.pow(2).mean())
else:
path_weight.append(0)
path_weight = np.asarray(path_weight)
path_weight /= np.abs(path_weight).max()
cmap = matplotlib.cm.get_cmap('Blues')
for ins_index, ins in enumerate(self.instructions):
y = _intersection(s_in1[ins.i_in1], c_in1, s_in2[ins.i_in2], c_in2)
verts = []
codes = []
verts += [s_out[ins.i_out], y]
codes += [Path.MOVETO, Path.LINETO]
verts += [s_in1[ins.i_in1], y]
codes += [Path.MOVETO, Path.LINETO]
verts += [s_in2[ins.i_in2], y]
codes += [Path.MOVETO, Path.LINETO]
if plot_weight:
color = cmap(path_weight[ins_index]) if ins.has_weight else 'black'
else:
color = 'green' if ins.has_weight else 'black'
ax.add_patch(patches.PathPatch(
Path(verts, codes),
facecolor='none',
edgecolor=color,
alpha=0.5,
ls='-',
lw=1.5 * ins.path_weight / min(i.path_weight for i in self.instructions),
))
# add labels
padding = 3
fontsize = 10
def format_ir(mul_ir):
if mul_ir.mul == 1:
return f"${mul_ir.ir}$"
return f"${mul_ir.mul} \\times {mul_ir.ir}$"
for i, mul_ir in enumerate(self.irreps_in1):
ax.annotate(
format_ir(mul_ir),
s_in1[i],
horizontalalignment='right',
textcoords='offset points',
xytext=(-padding, 0),
fontsize=fontsize
)
for i, mul_ir in enumerate(self.irreps_in2):
ax.annotate(
format_ir(mul_ir),
s_in2[i],
horizontalalignment='left',
textcoords='offset points',
xytext=(padding, 0),
fontsize=fontsize
)
for i, mul_ir in enumerate(self.irreps_out):
ax.annotate(
format_ir(mul_ir),
s_out[i],
horizontalalignment='center',
verticalalignment='top',
rotation=90,
textcoords='offset points',
xytext=(0, -padding),
fontsize=fontsize
)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.axis('equal')
ax.axis('off')
return fig, ax
class FullyConnectedTensorProduct(TensorProduct):
r"""Fully-connected weighted tensor product
All the possible path allowed by :math:`|l_1 - l_2| \leq l_{out} \leq l_1 + l_2` are made.
The output is a sum on different paths:
.. math::
z_w = \sum_{u,v} w_{uvw} x_u \otimes y_v + \cdots \text{other paths}
where :math:`u,v,w` are the indices of the multiplicities.
Parameters
----------
irreps_in1 : `e3nn.o3.Irreps`
representation of the first input
irreps_in2 : `e3nn.o3.Irreps`
representation of the second input
irreps_out : `e3nn.o3.Irreps`
representation of the output
irrep_normalization : {'component', 'norm'}
see `e3nn.o3.TensorProduct`
path_normalization : {'element', 'path'}
see `e3nn.o3.TensorProduct`
internal_weights : bool
see `e3nn.o3.TensorProduct`
shared_weights : bool
see `e3nn.o3.TensorProduct`
"""
def __init__(
self,
irreps_in1,
irreps_in2,
irreps_out,
irrep_normalization: str = None,
path_normalization: str = None,
**kwargs
):
irreps_in1 = o3.Irreps(irreps_in1)
irreps_in2 = o3.Irreps(irreps_in2)
irreps_out = o3.Irreps(irreps_out)
instr = [
(i_1, i_2, i_out, 'uvw', True, 1.0)
for i_1, (_, ir_1) in enumerate(irreps_in1)
for i_2, (_, ir_2) in enumerate(irreps_in2)
for i_out, (_, ir_out) in enumerate(irreps_out)
if ir_out in ir_1 * ir_2
]
super().__init__(
irreps_in1,
irreps_in2,
irreps_out,
instr,
irrep_normalization=irrep_normalization,
path_normalization=path_normalization,
**kwargs
)
class ElementwiseTensorProduct(TensorProduct):
r"""Elementwise connected tensor product.
.. math::
z_u = x_u \otimes y_u
where :math:`u` runs over the irreps. Note that there are no weights.
The output representation is determined by the two input representations.
Parameters
----------
irreps_in1 : `e3nn.o3.Irreps`
representation of the first input
irreps_in2 : `e3nn.o3.Irreps`
representation of the second input
filter_ir_out : iterator of `e3nn.o3.Irrep`, optional
filter to select only specific `e3nn.o3.Irrep` of the output
irrep_normalization : {'component', 'norm'}
see `e3nn.o3.TensorProduct`
Examples
--------
Elementwise scalar product
>>> ElementwiseTensorProduct("5x1o + 5x1e", "10x1e", ["0e", "0o"])
ElementwiseTensorProduct(5x1o+5x1e x 10x1e -> 5x0o+5x0e | 10 paths | 0 weights)
"""
def __init__(
self,
irreps_in1,
irreps_in2,
filter_ir_out=None,
irrep_normalization: str = None,
**kwargs
):
irreps_in1 = o3.Irreps(irreps_in1).simplify()
irreps_in2 = o3.Irreps(irreps_in2).simplify()
if filter_ir_out is not None:
try:
filter_ir_out = [o3.Irrep(ir) for ir in filter_ir_out]
except ValueError:
raise ValueError(f"filter_ir_out (={filter_ir_out}) must be an iterable of e3nn.o3.Irrep")
assert irreps_in1.num_irreps == irreps_in2.num_irreps
irreps_in1 = list(irreps_in1)
irreps_in2 = list(irreps_in2)
i = 0
while i < len(irreps_in1):
mul_1, ir_1 = irreps_in1[i]
mul_2, ir_2 = irreps_in2[i]
if mul_1 < mul_2:
irreps_in2[i] = (mul_1, ir_2)
irreps_in2.insert(i + 1, (mul_2 - mul_1, ir_2))
if mul_2 < mul_1:
irreps_in1[i] = (mul_2, ir_1)
irreps_in1.insert(i + 1, (mul_1 - mul_2, ir_1))
i += 1
out = []
instr = []
for i, ((mul, ir_1), (mul_2, ir_2)) in enumerate(zip(irreps_in1, irreps_in2)):
assert mul == mul_2
for ir in ir_1 * ir_2:
if filter_ir_out is not None and ir not in filter_ir_out:
continue
i_out = len(out)
out.append((mul, ir))
instr += [
(i, i, i_out, 'uuu', False)
]
super().__init__(
irreps_in1,
irreps_in2,
out,
instr,
irrep_normalization=irrep_normalization,
**kwargs
)
class FullTensorProduct(TensorProduct):
r"""Full tensor product between two irreps.
.. math::
z_{uv} = x_u \otimes y_v
where :math:`u` and :math:`v` run over the irreps. Note that there are no weights.
The output representation is determined by the two input representations.
Parameters
----------
irreps_in1 : `e3nn.o3.Irreps`
representation of the first input
irreps_in2 : `e3nn.o3.Irreps`
representation of the second input
filter_ir_out : iterator of `e3nn.o3.Irrep`, optional
filter to select only specific `e3nn.o3.Irrep` of the output
irrep_normalization : {'component', 'norm'}
see `e3nn.o3.TensorProduct`
"""
def __init__(
self,
irreps_in1: o3.Irreps,
irreps_in2: o3.Irreps,
filter_ir_out: Iterator[o3.Irrep] = None,
irrep_normalization: str = None,
**kwargs
):
irreps_in1 = o3.Irreps(irreps_in1).simplify()
irreps_in2 = o3.Irreps(irreps_in2).simplify()
if filter_ir_out is not None:
try:
filter_ir_out = [o3.Irrep(ir) for ir in filter_ir_out]
except ValueError:
raise ValueError(f"filter_ir_out (={filter_ir_out}) must be an iterable of e3nn.o3.Irrep")
out = []
instr = []
for i_1, (mul_1, ir_1) in enumerate(irreps_in1):
for i_2, (mul_2, ir_2) in enumerate(irreps_in2):
for ir_out in ir_1 * ir_2:
if filter_ir_out is not None and ir_out not in filter_ir_out:
continue
i_out = len(out)
out.append((mul_1 * mul_2, ir_out))
instr += [
(i_1, i_2, i_out, 'uvuv', False)
]
out = o3.Irreps(out)
out, p, _ = out.sort()
instr = [
(i_1, i_2, p[i_out], mode, train)
for i_1, i_2, i_out, mode, train in instr
]
super().__init__(
irreps_in1,
irreps_in2,
out,
instr,
irrep_normalization=irrep_normalization,
**kwargs
)
| [
"torch.zeros",
"torch.cat",
"torch.no_grad",
"torch.ones",
"torch.fx.Graph",
"torch.jit.is_scripting",
"torch.nn.Module",
"torch.Tensor",
"torch.randn"
] | 1.8.0 | dmadisetti/e3nn | 224ac5a4a4911626a7a04cf408d3f1872e5ff239 |
1.0 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model. """
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_albert import AlbertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "albert-base-v2"
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
# See all ALBERT models at https://huggingface.co/models?filter=albert
]
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
# If saved from the TF HUB module
name = name.replace("module/", "")
# Renaming and simplifying
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
# The feed forward layer had an 'intermediate' step which has been abstracted away
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
# ALBERT attention was split between self and output which have been abstracted away
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
# The pooler is a linear layer
name = name.replace("pooler/dense", "pooler")
# The classifier was simplified to predictions from cls/predictions
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
# Naming was changed to be more explicit
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
# Classifier
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
# No ALBERT model currently handles the next sentence prediction task
if "seq_relationship" in name:
name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
name = name.replace("weights", "weight")
name = name.split("/")
# Ignore the gradients applied by the LAMB/ADAM optimizers.
if (
"adam_m" in name
or "adam_v" in name
or "AdamWeightDecayOptimizer" in name
or "AdamWeightDecayOptimizer_1" in name
or "global_step" in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name} from {original_name}")
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AlbertAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads}"
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(2, 1).flatten(2)
projected_context_layer = self.dense(context_layer)
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False
):
attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
ffn_output = apply_chunking_to_forward(
self.ff_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[0],
)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them
def ff_chunk(self, attention_output):
ffn_output = self.ffn(attention_output)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
return ffn_output
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(
self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False
):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
hidden_states = layer_output[0]
if output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
output_attentions,
output_hidden_states,
)
hidden_states = layer_group_output[0]
if output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class AlbertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = AlbertConfig
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class AlbertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.AlbertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
sop_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ALBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
base_model_prefix = "albert"
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
else:
self.pooler = None
self.pooler_activation = None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`sentence order prediction (classification)` head.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForPreTraining(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.sop_classifier = AlbertSOPHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.predictions.decoder = new_embeddings
def get_input_embeddings(self):
return self.albert.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
sentence_order_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence
A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A).
Returns:
Example::
>>> from transformers import AlbertTokenizer, AlbertForPreTraining
>>> import torch
>>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
>>> model = AlbertForPreTraining.from_pretrained('albert-base-v2')
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(sequence_output)
sop_scores = self.sop_classifier(pooled_output)
total_loss = None
if labels is not None and sentence_order_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
total_loss = masked_lm_loss + sentence_order_loss
if not return_dict:
output = (prediction_scores, sop_scores) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return AlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states
return prediction_scores
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
class AlbertSOPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, pooled_output):
dropout_pooled_output = self.dropout(pooled_output)
logits = self.classifier(dropout_pooled_output)
return logits
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config, add_pooling_layer=False)
self.predictions = AlbertMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.predictions.decoder = new_embeddings
def get_input_embeddings(self):
return self.albert.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,
config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForTokenClassification(AlbertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config, add_pooling_layer=False)
classifier_dropout_prob = (
config.classifier_dropout_prob
if config.classifier_dropout_prob is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForMultipleChoice(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.einsum",
"torch.ones",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.tensor",
"torch.zeros",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.from_numpy",
"torch.nn.Embedding"
] | 1.0 | luyug/transformers | a59e7c1ed4ef1568ca0ba4140c9af641b17fa37e |
1.6 | import albumentations as albu
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def get_training_augmentation():
train_transform = [
albu.HorizontalFlip(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
albu.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0),
albu.RandomCrop(height=320, width=320, always_apply=True),
albu.IAAAdditiveGaussianNoise(p=0.2),
albu.IAAPerspective(p=0.5),
albu.OneOf(
[
albu.CLAHE(p=1),
albu.RandomBrightness(p=1),
albu.RandomGamma(p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.IAASharpen(p=1),
albu.Blur(blur_limit=3, p=1),
albu.MotionBlur(blur_limit=3, p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.RandomContrast(p=1),
albu.HueSaturationValue(p=1),
],
p=0.9,
),
]
return albu.Compose(train_transform)
def get_validation_augmentation():
"""Add paddings to make image shape divisible by 32"""
test_transform = [albu.PadIfNeeded(384, 480)]
return albu.Compose(test_transform)
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype("float32")
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
# albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor, mask=to_tensor),
]
return albu.Compose(_transform)
def ToTensor(pic):
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
nchannel = 3
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
def de_norm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def to_var(x, requires_grad=True):
if not requires_grad:
return Variable(x, requires_grad=requires_grad)
else:
return Variable(x)
| [
"torch.autograd.Variable"
] | 1.6.0 | loayghawji/CPM | 8d1c1d0e15bba04c0ef06997411a09765f736cfa |
1.5 | from metric.recall_at_k import RecallAtK
from loss.vae_loss import VAELoss
from dataloader.mamo_dataset import MamoDataset
from validator import Validator
from torch.utils.data import DataLoader
from models.multi_VAE import MultiVAE
import os
import numpy as np
import pytest
import yaml
# Packages needed to run test:
# os
# numpy
# torch
# pytest
# yaml
with open('yaml_files/data_info.yaml', 'r') as stream:
try:
data_info = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# create temporary directories
if not os.path.isdir('test_data_val'):
os.mkdir('test_data_val')
# generate random data
np.random.seed(42)
dir_path = 'test_data_val/'
test_input_data_path = os.path.join(
dir_path, 'movielens_small_test_input.npy')
test_output_data_path = os.path.join(
dir_path, 'movielens_small_test_test.npy')
np.save(test_input_data_path, np.random.rand(2000, 8936).astype('float32'))
np.save(test_output_data_path, np.random.rand(2000, 8936).astype('float32'))
# Variables
dataset = MamoDataset(np.load(test_input_data_path),
np.load(test_output_data_path))
model = MultiVAE(params='yaml_files/params_multi_VAE.yaml')
model.initialize_model()
dataloader = DataLoader(dataset, batch_size=data_info['batch_size'],
shuffle=True, drop_last=True)
metrics = [RecallAtK(10)]
objectives = [VAELoss()]
obj_results = [0.4, 0.5, 0.7]
alphas = [0.5, 0.2, 0.3]
max_normalization = [1, 0.5, 2]
# A Validator object cannot be created without a model.
def test_validator_init_no_model():
with pytest.raises(TypeError, match='Argument: model must be set.'):
Validator(None, dataloader, metrics, objectives)
# A Validator object cannot be created without a dataloader.
def test_validator_init_no_dataloader():
with pytest.raises(TypeError, match='Argument: dataloader must be set.'):
Validator(model, None, metrics, objectives)
# A Validator object cannot be created without metrics and objectives.
def test_validator_init_no_metrics_and_objectives():
with pytest.raises(TypeError, match='Either argument: metrics or argument:'
+ ' objectives must be set.'):
Validator(model, dataloader, None, None)
# A Validator object cannot be created with an incorrect model.
def test_validator_init_bad_model():
with pytest.raises(TypeError, match='Argument: model must be derived'
+ ' from nn.Module.'):
Validator('model', dataloader, metrics, objectives)
# A Validator object cannot be created with an incorrect dataloader.
def test_validator_init_bad_dataloader():
with pytest.raises(TypeError, match='Argument: dataloader must be a'
+ ' pytorch DataLoader.'):
Validator(model, 'dataloader', metrics, objectives)
# A Validator object cannot be created with an incorrect metrics argument.
def test_validator_init_bad_metrics():
with pytest.raises(TypeError, match='Argument: metrics must be a list.'):
Validator(model, dataloader, 'metrics', objectives)
with pytest.raises(TypeError, match='All elements of argument: metrics'
+ ' must be of type MetricAtK.'):
Validator(model, dataloader, ['metric1', RecallAtK(2)], objectives)
# A Validator object cannot be created with an incorrect objectives argument.
def test_validator_init_bad_objectives():
with pytest.raises(TypeError, match='Argument: objectives must'
+ ' be a list.'):
Validator(model, dataloader, metrics, 'objectives')
with pytest.raises(TypeError, match='All elements of argument: objectives'
+ ' must be of type Loss.'):
Validator(model, dataloader, metrics, ['objective', VAELoss()])
# Testing the combine_objectives method
# combine_objectives cannot run if missing obj_results or in incorrect format
def test_validator_combine_objectives_bad_obj_results():
v = Validator(model, dataloader, metrics, objectives)
with pytest.raises(TypeError, match='Argument: obj_results must be set.'):
v.combine_objectives(None, alphas, max_normalization)
with pytest.raises(TypeError, match='Argument:'
+ ' obj_results must be a list.'):
v.combine_objectives('Results', alphas, max_normalization)
with pytest.raises(TypeError, match='All elements of argument: obj_results'
+ ' must be of type int or float.'):
v.combine_objectives([1, 2.5, 'number'], alphas, max_normalization)
# combine_objectives cannot run if alphas is in incorrect format
def test_validator_combine_objectives_bad_alphas():
v = Validator(model, dataloader, metrics, objectives)
with pytest.raises(TypeError, match='Argument:'
+ ' alphas must be a list.'):
v.combine_objectives(obj_results, 'alphas', max_normalization)
with pytest.raises(TypeError, match='All elements of argument: alphas'
+ ' must be of type int or float.'):
v.combine_objectives(obj_results, [1, 2.5, 'number'],
max_normalization)
with pytest.raises(ValueError, match='The length of alphas must be equal'
+ ' to that of obj_results'):
v.combine_objectives(obj_results, [1, 2.5], max_normalization)
# combine_objectives cannot run if max_normalization is in incorrect format
def test_validator_combine_objectives_bad_max_normalization():
v = Validator(model, dataloader, metrics, objectives)
with pytest.raises(TypeError, match='Argument:'
+ ' max_normalization must be a list.'):
v.combine_objectives(obj_results, alphas, 'max_normalization')
with pytest.raises(TypeError, match='All elements of argument:'
+ ' max_normalization must be of type int or float.'):
v.combine_objectives(obj_results, alphas, [1, 2.5, 'number'])
with pytest.raises(ValueError, match='The length of max_normalization must'
+ ' be equal to that of obj_results'):
v.combine_objectives(obj_results, alphas, [1, 2.5])
# Correct runs of combine_objectives
def test_validator_combine_objectives_no_problem():
v = Validator(model, dataloader, metrics, objectives)
assert(v.combine_objectives(obj_results, alphas, max_normalization)
== 0.505)
assert(v.combine_objectives(obj_results, None, max_normalization) == 1.75)
assert(v.combine_objectives(obj_results, alphas, None) == 0.51)
assert(v.combine_objectives(obj_results) == sum(obj_results))
def test_validator_evaluate_bad_inputs():
v = Validator(model, dataloader, metrics, objectives)
with pytest.raises(TypeError, match='Argument: disable_anneal'
+ ' must be a bool.'):
v.evaluate(disable_anneal='True')
with pytest.raises(TypeError, match='Argument: verbose must be a bool.'):
v.evaluate(verbose='True')
# Small test just to show it works
def test_validator_evaluate_no_problem():
v = Validator(model, dataloader, metrics, objectives)
results = v.evaluate()
assert isinstance(results, tuple)
assert isinstance(results[0], list)
assert isinstance(results[1], list)
# removing generated data
def test_cleanup():
os.remove(test_input_data_path)
os.remove(test_output_data_path)
os.rmdir('test_data_val')
| [
"torch.utils.data.DataLoader"
] | 1.5.0 | blagojce95/ai-research-mamo-framework | 7f3b5a5a9fb8b19c9eef453b81b03b6046a33bf2 |
1.9 | ######################################
## 数据文件夹下LRW文件夹的名字. ##
######################################
LRW1000_DATA_PATH_NAME = '/data/zhangyk/data/CAS-VSR-W1k/audio/LRW1000_Public/audio'
LRW1000_AUDIO_DATA_PATH_NAME = '/data/zhangyk/data/lrw1000_audio_pkl'
######################################
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import glob
import time
import os
import os.path as osp
import sys
sys.path.append("../")
from models.metrics import ROOT_PATH
from models.utils import mkdir, save_pickle, parse_dataloader_split_csv, nan_assert
from tqdm.contrib import tzip
import librosa
import warnings
warnings.filterwarnings('ignore')
import torchaudio
data_root_path = osp.join('/data/zhangyk/data', LRW1000_AUDIO_DATA_PATH_NAME)
source_l, target_l = [], []
for stype in ['train', 'val', 'test', 'aux_val', 'aux_test']:
csv_path = osp.join(ROOT_PATH, f'data/lrw1000/split/{stype}.csv')
with open(csv_path, 'r') as f:
csv_tmp = f.readlines()
target_l.extend([osp.join(data_root_path, i.strip().split(',')[0]) for i in csv_tmp[1:]])
for i in csv_tmp[1:]:
i_split = i.strip().split(',')[0]
source_l.append(f'{i_split[i_split.rfind("_") + 1 : i_split.find(".pkl")]}.wav')
seq_len = 26880
for i, j in tzip(source_l, target_l):
waveform, sample_rate = torchaudio.load(osp.join(LRW1000_DATA_PATH_NAME, i))
assert sample_rate == 16000
waveform = waveform.squeeze(0)
if waveform.shape[0] > seq_len:
beg = int((waveform.shape[0] - seq_len) / 2)
waveform = waveform[beg : beg + seq_len]
elif waveform.shape[0] < seq_len:
waveform = torch.cat([waveform, torch.zeros(seq_len - waveform.shape[0])])
assert waveform.shape[0] == seq_len
waveform = waveform.cpu().detach().numpy()
save_pickle(j, waveform)
| [
"torch.zeros"
] | 1.9.0 | ZhangYikaii/Proto-CAT | 57bb2c7fd88a9489faa88e3b904218bf5fb01b4e |
1.0 | import os
import math
import numpy as np
from PIL import Image
import skimage.transform as trans
import cv2
import torch
from data import dataset_info
from data.base_dataset import BaseDataset
import util.util as util
dataset_info = dataset_info()
class CasiaDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true',
help='If specified, skip sanity check of correct label-image file pairing')
return parser
def cv2_loader(self, img_str):
img_array = np.frombuffer(img_str, dtype=np.uint8)
return cv2.imdecode(img_array, cv2.IMREAD_COLOR)
def fill_list(self, tmp_list):
length = len(tmp_list)
if length % self.opt.batchSize != 0:
end = math.ceil(length / self.opt.batchSize) * self.opt.batchSize
tmp_list = tmp_list + tmp_list[-1 * (end - length) :]
return tmp_list
def initialize(self, opt):
self.opt = opt
dataset_num = dataset_info.get_dataset(opt)
self.prefix = [dataset_info.prefix[num] for num in dataset_num]
file_list = [dataset_info.file_list[num] for num in dataset_num]
land_mark_list = [dataset_info.land_mark_list[num] for num in dataset_num]
self.params_dir = [dataset_info.params_dir[num] for num in dataset_num]
self.folder_level = [dataset_info.folder_level[num] for num in dataset_num]
self.num_datasets = len(file_list)
assert len(land_mark_list) == self.num_datasets, \
'num of landmk dir should be the num of datasets'
assert len(self.params_dir) == self.num_datasets, \
'num of params_dir should be the num of datasets'
self.dataset_lists = []
self.landmark_paths = []
self.sizes = []
for n in range(self.num_datasets):
with open(file_list[n]) as f:
img_lists = f.readlines()
img_lists = self.fill_list(img_lists)
self.sizes.append(len(img_lists))
self.dataset_lists.append(sorted(img_lists))
with open(land_mark_list[n]) as f:
landmarks = f.readlines()
landmarks = self.fill_list(landmarks)
self.landmark_paths.append(sorted(landmarks))
self.dataset_size = min(self.sizes)
self.initialized = False
def get_landmarks(self, landmark, img_list):
landmark_split = landmark.strip().split(' ')
filename1_without_ext = os.path.basename(img_list.strip())
filename2_without_ext = os.path.basename(landmark_split[0])
assert (filename1_without_ext == filename2_without_ext), \
"The image_path %s and params_path %s don't match." % \
(img_list, landmark_split[0])
label = landmark_split[1]
landmarks = landmark_split[2:]
landmarks = list(map(float, landmarks))
landmarks_array = np.array(landmarks).reshape(5, 2)
return landmarks_array, label
def get_param_file(self, img_list, dataset_num):
img_name = os.path.splitext(img_list)[0]
name_split = img_name.split("/")
folder_level = self.folder_level[dataset_num]
param_folder = os.path.join(self.params_dir[dataset_num],
"/".join([name_split[i] for i in range(len(name_split) - folder_level, len(name_split))]) + ".txt")
# params = np.loadtxt(param_folder)
return param_folder
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1)[-10:])[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2)[-10:])[0]
return filename1_without_ext == filename2_without_ext
def affine_align(self, img, landmark=None, **kwargs):
M = None
h, w, c = img.shape
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041]], dtype=np.float32)
src = src * 290 / 112
src[:, 0] += 50
src[:, 1] += 60
src = src / 400 * self.opt.crop_size
dst = landmark
# dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
warped = cv2.warpAffine(img, M, (self.opt.crop_size, self.opt.crop_size), borderValue=0.0)
return warped, M
def __getitem__(self, index):
# Label Image
randnum = np.random.randint(sum(self.sizes))
dataset_num = np.random.randint(self.num_datasets)
image_path = self.dataset_lists[dataset_num][index].strip()
image_path = os.path.join(self.prefix[dataset_num], image_path)
print(image_path)
img = cv2.imread(image_path)
if img is None:
raise Exception('None Image')
param_path = self.get_param_file(image_path, dataset_num)
#mesh_path = image_path.replace('CASIA-WebFace', 'CASIA_RR_new/input')
mesh_path = image_path
mesh = cv2.imread(mesh_path)
if mesh is None:
raise Exception('None mesh Image')
mesh = cv2.cvtColor(mesh, cv2.COLOR_BGR2RGB)
# Load mask
#mask_path = image_path.replace('CASIA-WebFace', 'CASIA_RR_new/mask')
mask_path = image_path.replace('input', 'mask')
mask = cv2.imread(mask_path)
if mesh is None:
raise Exception('None mask Image')
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mesh[mask[:, :, 0] < 125] = [255, 255, 255]
cv2.imwrite('/mnt2/download/test/test_masked.jpg', mesh)
#cv2.imwrite('/mnt2/download/test/test.jpg', mesh)
mesh = cv2.resize(mesh, (self.opt.crop_size, self.opt.crop_size))
mesh = mesh.transpose(2, 0, 1) / 255.0
mesh = torch.from_numpy(mesh).float()
# img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (self.opt.crop_size, self.opt.crop_size))
M = None
landmark_path = self.landmark_paths[dataset_num][index].strip()
landmarks, label = self.get_landmarks(landmark_path, image_path)
wrapped_img, M = self.affine_align(img, landmarks)
M = torch.from_numpy(M).float()
wrapped_img = img.transpose(2, 0, 1) / 255.0
wrapped_img = torch.from_numpy(wrapped_img).float()
#print('loaded image', img.shape)
#print(wrapped_img[:3, :3, :3])
input_dict = {
'image': wrapped_img,
'mesh': mesh,
'param_path': param_path,
'M': M,
'path': image_path
}
# Give subclasses a chance to modify the final output
self.postprocess(input_dict)
return input_dict
def postprocess(self, input_dict):
return input_dict
def __len__(self):
return self.dataset_size
| [
"torch.from_numpy"
] | 1.0.0 | kangzhiq/Rotate-and-Render | e4b0946260f9ece8af7066f2668ee889a1ee9f23 |
1.4 | # coding: utf-8
from itertools import chain, starmap
from collections import Counter
import torch
from torchtext.data import Dataset as TorchtextDataset
from torchtext.data import Example
from torchtext.vocab import Vocab
def _join_dicts(*args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args]))
def _dynamic_dict(example, src_field, sim_field, tgt_field): #20201209 tmr add sim
"""Create copy-vocab and numericalize with it.
In-place adds ``"src_map"`` to ``example``. That is the copy-vocab
numericalization of the tokenized ``example["src"]``. If ``example``
has a ``"tgt"`` key, adds ``"alignment"`` to example. That is the
copy-vocab numericalization of the tokenized ``example["tgt"]``. The
alignment has an initial and final UNK token to match the BOS and EOS
tokens.
Args:
example (dict): An example dictionary with a ``"src"`` key and
maybe a ``"tgt"`` key. (This argument changes in place!)
src_field (torchtext.data.Field): Field object.
tgt_field (torchtext.data.Field): Field object.
Returns:
torchtext.data.Vocab and ``example``, changed as described.
"""
src = src_field.tokenize(example["src"])
sim = sim_field.tokenize(example["sim"]) #20201209 tme add sim
# make a small vocab containing just the tokens in the source sequence
unk = src_field.unk_token
pad = src_field.pad_token
src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])
sim_ex_vocab = Vocab(Counter(sim), specials=[unk, pad]) #20201209 tmr add sim
unk_idx = src_ex_vocab.stoi[unk]
# Map source tokens to indices in the dynamic dict.
src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])
example["src_map"] = src_map
example["src_ex_vocab"] = src_ex_vocab
# Map source tokens to indices in the dynamic dict. #20201209 tme add sim
sim_map = torch.LongTensor([sim_ex_vocab.stoi[w] for w in sim])
example["sim_map"] = sim_map
example["sim_ex_vocab"] = sim_ex_vocab
if "tgt" in example:
tgt = tgt_field.tokenize(example["tgt"])
mask = torch.LongTensor(
[unk_idx] + [src_ex_vocab.stoi[w] for w in tgt] + [unk_idx])
example["alignment"] = mask
return src_ex_vocab, sim_ex_vocab, example
class Dataset(TorchtextDataset):
"""Contain data and process it.
A dataset is an object that accepts sequences of raw data (sentence pairs
in the case of machine translation) and fields which describe how this
raw data should be processed to produce tensors. When a dataset is
instantiated, it applies the fields' preprocessing pipeline (but not
the bit that numericalizes it or turns it into batch tensors) to the raw
data, producing a list of :class:`torchtext.data.Example` objects.
torchtext's iterators then know how to use these examples to make batches.
Args:
fields (dict[str, Field]): a dict with the structure
returned by :func:`onmt.inputters.get_fields()`. Usually
that means the dataset side, ``"src"`` or ``"tgt"``. Keys match
the keys of items yielded by the ``readers``, while values
are lists of (name, Field) pairs. An attribute with this
name will be created for each :class:`torchtext.data.Example`
object and its value will be the result of applying the Field
to the data that matches the key. The advantage of having
sequences of fields for each piece of raw input is that it allows
the dataset to store multiple "views" of each input, which allows
for easy implementation of token-level features, mixed word-
and character-level models, and so on. (See also
:class:`onmt.inputters.TextMultiField`.)
readers (Iterable[onmt.inputters.DataReaderBase]): Reader objects
for disk-to-dict. The yielded dicts are then processed
according to ``fields``.
data (Iterable[Tuple[str, Any]]): (name, ``data_arg``) pairs
where ``data_arg`` is passed to the ``read()`` method of the
reader in ``readers`` at that position. (See the reader object for
details on the ``Any`` type.)
dirs (Iterable[str or NoneType]): A list of directories where
data is contained. See the reader object for more details.
sort_key (Callable[[torchtext.data.Example], Any]): A function
for determining the value on which data is sorted (i.e. length).
filter_pred (Callable[[torchtext.data.Example], bool]): A function
that accepts Example objects and returns a boolean value
indicating whether to include that example in the dataset.
Attributes:
src_vocabs (List[torchtext.data.Vocab]): Used with dynamic dict/copy
attention. There is a very short vocab for each src example.
It contains just the source words, e.g. so that the generator can
predict to copy them.
"""
def __init__(self, fields, readers, data, dirs, sort_key,
filter_pred=None, corpus_id=None):
self.sort_key = sort_key
can_copy = 'src_map' in fields and 'alignment' in fields
read_iters = [r.read(dat[1], dat[0], dir_) for r, dat, dir_
in zip(readers, data, dirs)]
# self.src_vocabs is used in collapse_copy_scores and Translator.py
self.src_vocabs = []
self.sim_vocabs = [] #20201209 tmr add sim
examples = []
for ex_dict in starmap(_join_dicts, zip(*read_iters)):
if corpus_id is not None:
ex_dict["corpus_id"] = corpus_id
else:
ex_dict["corpus_id"] = "train"
if can_copy:
src_field = fields['src']
sim_field = fields['sim'] #29291129 tmr add sim
tgt_field = fields['tgt']
# this assumes src_field and tgt_field are both text
src_ex_vocab, ex_dict = _dynamic_dict(ex_dict, src_field.base_field, sim_field.base_field, tgt_field.base_field) #20201209 tmr add sim
self.src_vocabs.append(src_ex_vocab)
self.sim_vocabs.append(sim_ex_vocab) #20201209 tmr add sim
ex_fields = {k: [(k, v)] for k, v in fields.items() if
k in ex_dict}
ex = Example.fromdict(ex_dict, ex_fields)
examples.append(ex)
# fields needs to have only keys that examples have as attrs
fields = []
for _, nf_list in ex_fields.items():
assert len(nf_list) == 1
fields.append(nf_list[0])
super(Dataset, self).__init__(examples, fields, filter_pred)
def __getattr__(self, attr):
# avoid infinite recursion when fields isn't defined
if 'fields' not in vars(self):
raise AttributeError
if attr in self.fields:
return (getattr(x, attr) for x in self.examples)
else:
raise AttributeError
def save(self, path, remove_fields=True):
if remove_fields:
self.fields = []
torch.save(self, path)
@staticmethod
def config(fields):
readers, data, dirs = [], [], []
for name, field in fields:
if field["data"] is not None:
readers.append(field["reader"])
data.append((name, field["data"]))
dirs.append(field["dir"])
return readers, data, dirs
| [
"torch.save",
"torch.LongTensor"
] | 1.4.0 | futuran/OpenNMT-py | 0fef19aff6ea839e0684314792d41f7e08d1232d |
1.4 | import itertools as it
from collections import namedtuple
from typing import Dict, List, Optional, Union, NamedTuple
import torch
from torchaudio._torchaudio_decoder import (
_CriterionType,
_LM,
_KenLM,
_LexiconDecoder,
_LexiconDecoderOptions,
_SmearingMode,
_Trie,
_Dictionary,
_create_word_dict,
_load_words,
_ZeroLM,
)
from torchaudio.utils import download_asset
__all__ = ["Hypothesis", "LexiconDecoder", "lexicon_decoder"]
_PretrainedFiles = namedtuple("PretrainedFiles", ["lexicon", "tokens", "lm"])
class Hypothesis(NamedTuple):
r"""Represents hypothesis generated by CTC beam search decoder :py:func`LexiconDecoder`.
:ivar torch.LongTensor tokens: Predicted sequence of token IDs. Shape `(L, )`, where
`L` is the length of the output sequence
:ivar List[str] words: List of predicted words
:ivar float score: Score corresponding to hypothesis
:ivar torch.IntTensor timesteps: Timesteps corresponding to the tokens. Shape `(L, )`,
where `L` is the length of the output sequence
"""
tokens: torch.LongTensor
words: List[str]
score: float
timesteps: torch.IntTensor
class LexiconDecoder:
"""torchaudio.prototype.ctc_decoder.LexiconDecoder()
Lexically contrained CTC beam search decoder from *Flashlight* [:footcite:`kahn2022flashlight`].
Note:
To build the decoder, please use factory function
:py:func:`lexicon_decoder`.
Args:
nbest (int): number of best decodings to return
lexicon (Dict): lexicon mapping of words to spellings
word_dict (_Dictionary): dictionary of words
tokens_dict (_Dictionary): dictionary of tokens
lm (_LM): language model
decoder_options (_LexiconDecoderOptions): parameters used for beam search decoding
blank_token (str): token corresopnding to blank
sil_token (str): token corresponding to silence
unk_word (str): word corresponding to unknown
"""
def __init__(
self,
nbest: int,
lexicon: Dict,
word_dict: _Dictionary,
tokens_dict: _Dictionary,
lm: _LM,
decoder_options: _LexiconDecoderOptions,
blank_token: str,
sil_token: str,
unk_word: str,
) -> None:
self.nbest = nbest
self.word_dict = word_dict
self.tokens_dict = tokens_dict
unk_word = word_dict.get_index(unk_word)
self.blank = self.tokens_dict.get_index(blank_token)
silence = self.tokens_dict.get_index(sil_token)
vocab_size = self.tokens_dict.index_size()
trie = _Trie(vocab_size, silence)
start_state = lm.start(False)
for word, spellings in lexicon.items():
word_idx = self.word_dict.get_index(word)
_, score = lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idx = [self.tokens_dict.get_index(token) for token in spelling]
trie.insert(spelling_idx, word_idx, score)
trie.smear(_SmearingMode.MAX)
self.decoder = _LexiconDecoder(
decoder_options,
trie,
lm,
silence,
self.blank,
unk_word,
[],
False, # word level LM
)
def _get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor:
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
def _get_timesteps(self, idxs: torch.IntTensor) -> torch.IntTensor:
"""Returns frame numbers corresponding to non-blank tokens."""
timesteps = []
for i, idx in enumerate(idxs):
if idx == self.blank:
continue
if i == 0 or idx != idxs[i - 1]:
timesteps.append(i)
return torch.IntTensor(timesteps)
def __call__(self, emissions: torch.FloatTensor, lengths: Optional[torch.Tensor] = None) -> List[List[Hypothesis]]:
# Overriding the signature so that the return type is correct on Sphinx
"""__call__(self, emissions: torch.FloatTensor, lengths: Optional[torch.Tensor] = None) -> \
List[List[torchaudio.prototype.ctc_decoder.Hypothesis]]
Args:
emissions (torch.FloatTensor): tensor of shape `(batch, frame, num_tokens)` storing sequences of
probability distribution over labels; output of acoustic model
lengths (Tensor or None, optional): tensor of shape `(batch, )` storing the valid length of
in time axis of the output Tensor in each batch
Returns:
List[List[Hypothesis]]:
List of sorted best hypotheses for each audio sequence in the batch.
"""
assert emissions.dtype == torch.float32
B, T, N = emissions.size()
if lengths is None:
lengths = torch.full((B,), T)
float_bytes = 4
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + float_bytes * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, lengths[b], N)
nbest_results = results[: self.nbest]
hypos.append(
[
Hypothesis(
tokens=self._get_tokens(result.tokens),
words=[self.word_dict.get_entry(x) for x in result.words if x >= 0],
score=result.score,
timesteps=self._get_timesteps(result.tokens),
)
for result in nbest_results
]
)
return hypos
def idxs_to_tokens(self, idxs: torch.LongTensor) -> List:
"""
Map raw token IDs into corresponding tokens
Args:
idxs (LongTensor): raw token IDs generated from decoder
Returns:
List: tokens corresponding to the input IDs
"""
return [self.tokens_dict.get_entry(idx.item()) for idx in idxs]
def lexicon_decoder(
lexicon: str,
tokens: Union[str, List[str]],
lm: Optional[str] = None,
nbest: int = 1,
beam_size: int = 50,
beam_size_token: Optional[int] = None,
beam_threshold: float = 50,
lm_weight: float = 2,
word_score: float = 0,
unk_score: float = float("-inf"),
sil_score: float = 0,
log_add: bool = False,
blank_token: str = "-",
sil_token: str = "|",
unk_word: str = "<unk>",
) -> LexiconDecoder:
"""
Builds lexically constrained CTC beam search decoder from
*Flashlight* [:footcite:`kahn2022flashlight`].
Args:
lexicon (str): lexicon file containing the possible words and corresponding spellings.
Each line consists of a word and its space separated spelling
tokens (str or List[str]): file or list containing valid tokens. If using a file, the expected
format is for tokens mapping to the same index to be on the same line
lm (str or None, optional): file containing language model, or `None` if not using a language model
nbest (int, optional): number of best decodings to return (Default: 1)
beam_size (int, optional): max number of hypos to hold after each decode step (Default: 50)
beam_size_token (int, optional): max number of tokens to consider at each decode step.
If None, it is set to the total number of tokens (Default: None)
beam_threshold (float, optional): threshold for pruning hypothesis (Default: 50)
lm_weight (float, optional): weight of language model (Default: 2)
word_score (float, optional): word insertion score (Default: 0)
unk_score (float, optional): unknown word insertion score (Default: -inf)
sil_score (float, optional): silence insertion score (Default: 0)
log_add (bool, optional): whether or not to use logadd when merging hypotheses (Default: False)
blank_token (str, optional): token corresponding to blank (Default: "-")
sil_token (str, optional): token corresponding to silence (Default: "|")
unk_word (str, optional): word corresponding to unknown (Default: "<unk>")
Returns:
LexiconDecoder: decoder
Example
>>> decoder = lexicon_decoder(
>>> lexicon="lexicon.txt",
>>> tokens="tokens.txt",
>>> lm="kenlm.bin",
>>> )
>>> results = decoder(emissions) # List of shape (B, nbest) of Hypotheses
"""
lexicon = _load_words(lexicon)
word_dict = _create_word_dict(lexicon)
lm = _KenLM(lm, word_dict) if lm else _ZeroLM()
tokens_dict = _Dictionary(tokens)
decoder_options = _LexiconDecoderOptions(
beam_size=beam_size,
beam_size_token=beam_size_token or tokens_dict.index_size(),
beam_threshold=beam_threshold,
lm_weight=lm_weight,
word_score=word_score,
unk_score=unk_score,
sil_score=sil_score,
log_add=log_add,
criterion_type=_CriterionType.CTC,
)
return LexiconDecoder(
nbest=nbest,
lexicon=lexicon,
word_dict=word_dict,
tokens_dict=tokens_dict,
lm=lm,
decoder_options=decoder_options,
blank_token=blank_token,
sil_token=sil_token,
unk_word=unk_word,
)
def _get_filenames(model: str) -> _PretrainedFiles:
if model not in ["librispeech", "librispeech-3-gram", "librispeech-4-gram"]:
raise ValueError(
f"{model} not supported. Must be one of ['librispeech-3-gram', 'librispeech-4-gram', 'librispeech']"
)
prefix = f"decoder-assets/{model}"
return _PretrainedFiles(
lexicon=f"{prefix}/lexicon.txt",
tokens=f"{prefix}/tokens.txt",
lm=f"{prefix}/lm.bin" if model != "librispeech" else None,
)
def download_pretrained_files(model: str) -> _PretrainedFiles:
"""
Retrieves pretrained data files used for CTC decoder.
Args:
model (str): pretrained language model to download.
Options: ["librispeech-3-gram", "librispeech-4-gram", "librispeech"]
Returns:
Object with the following attributes
lm:
path corresponding to downloaded language model, or `None` if the model is not associated with an lm
lexicon:
path corresponding to downloaded lexicon file
tokens:
path corresponding to downloaded tokens file
"""
files = _get_filenames(model)
lexicon_file = download_asset(files.lexicon)
tokens_file = download_asset(files.tokens)
if files.lm is not None:
lm_file = download_asset(files.lm)
else:
lm_file = None
return _PretrainedFiles(
lexicon=lexicon_file,
tokens=tokens_file,
lm=lm_file,
)
| [
"torch.IntTensor",
"torch.full"
] | 1.4.0 | spital/audio | 414f4f774e9b649bf17c523fbd56b2c54c493508 |
1.8 | import argparse
import inspect
from . import gaussian_diffusion as gd
from .respace import SpacedDiffusion, space_timesteps
from .unet import SuperResModel, UNetModel, EncoderUNetModel
NUM_CLASSES = 1000
def diffusion_defaults():
"""
Defaults for image and classifier training.
"""
return dict(
learn_sigma=False,
diffusion_steps=1000,
noise_schedule="linear",
timestep_respacing="",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
)
def classifier_defaults():
"""
Defaults for classifier models.
"""
return dict(
image_size=64,
classifier_use_fp16=False,
classifier_width=128,
classifier_depth=2,
classifier_attention_resolutions="32,16,8", # 16
classifier_use_scale_shift_norm=True, # False
classifier_resblock_updown=True, # False
classifier_pool="attention",
)
def model_and_diffusion_defaults():
"""
Defaults for image training.
"""
res = dict(
image_size=64,
num_channels=128,
num_res_blocks=2,
num_heads=4,
num_heads_upsample=-1,
num_head_channels=-1,
attention_resolutions="16,8",
channel_mult="",
dropout=0.0,
class_cond=False,
use_checkpoint=False,
use_scale_shift_norm=True,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
z_cond=False,
)
res.update(diffusion_defaults())
return res
def classifier_and_diffusion_defaults():
res = classifier_defaults()
res.update(diffusion_defaults())
return res
def create_model_and_diffusion(
image_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
channel_mult,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
use_new_attention_order,
z_cond,
):
model = create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult=channel_mult,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
use_new_attention_order=use_new_attention_order,
z_cond=z_cond,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult="",
learn_sigma=False,
class_cond=False,
use_checkpoint=False,
attention_resolutions="16",
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
dropout=0,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
z_cond=False,
):
if channel_mult == "":
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
else:
channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return UNetModel(
image_size=image_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
use_fp16=use_fp16,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_new_attention_order=use_new_attention_order,
z_cond=z_cond,
)
def create_classifier_and_diffusion(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
learn_sigma,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
):
classifier = create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return classifier, diffusion
def create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
):
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
attention_ds = []
for res in classifier_attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return EncoderUNetModel(
image_size=image_size,
in_channels=3,
model_channels=classifier_width,
out_channels=1000,
num_res_blocks=classifier_depth,
attention_resolutions=tuple(attention_ds),
channel_mult=channel_mult,
use_fp16=classifier_use_fp16,
num_head_channels=64,
use_scale_shift_norm=classifier_use_scale_shift_norm,
resblock_updown=classifier_resblock_updown,
pool=classifier_pool,
)
def sr_model_and_diffusion_defaults():
res = model_and_diffusion_defaults()
res["large_size"] = 128
res["small_size"] = 64
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
for k in res.copy().keys():
if k not in arg_names:
del res[k]
return res
def sr_create_model_and_diffusion(
large_size,
small_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
):
model = sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma,
class_cond,
use_checkpoint,
attention_resolutions,
num_heads,
num_head_channels,
num_heads_upsample,
use_scale_shift_norm,
dropout,
resblock_updown,
use_fp16,
):
_ = small_size # hack to prevent unused variable
if large_size == 512:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif large_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported large size: {large_size}")
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(large_size // int(res))
return SuperResModel(
image_size=large_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
def create_gaussian_diffusion(
*,
steps=1000,
learn_sigma=False,
sigma_small=False,
noise_schedule="linear",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
timestep_respacing="",
):
betas = gd.get_named_beta_schedule(noise_schedule, steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
return SpacedDiffusion(
use_timesteps=space_timesteps(steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type,
rescale_timesteps=rescale_timesteps,
)
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
def seed_all(seed: int):
"""
Seeding everything for paired indendent training
:param seed: seed number for a number generator.
"""
import os
import numpy as np
import torch as th
import random
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
th.manual_seed(seed)
th.cuda.manual_seed(seed)
th.cuda.manual_seed_all(seed)
th.backends.cudnn.deterministic = True
th.backends.cudnn.benchmark = False | [
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all"
] | 1.8.2 | XezXey/guided-diffusion | 5e156d0c1135e3432c33e5a5efb382f2095d9a59 |
1.8 | from abc import ABC, abstractmethod
import numpy as np
from numpy.lib.function_base import diff
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
# print(batch_size)
# print(w)
# print(p)
# print(indices_np)
# print(indices)
# print(weights_np)
# print(weights)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| [
"torch.zeros",
"torch.distributed.get_world_size",
"torch.distributed.all_gather",
"torch.from_numpy",
"torch.tensor"
] | 1.8.2 | XezXey/guided-diffusion | 5e156d0c1135e3432c33e5a5efb382f2095d9a59 |
1.7 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
from loguru import logger
import torch
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel as DDP
from yolox.core import launch
from yolox.exp import get_exp
from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger
import argparse
import os
import random
import warnings
def make_parser():
parser = argparse.ArgumentParser("YOLOX Eval")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
# distributed
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--dist-url",
default=None,
type=str,
help="url used to set up distributed training",
)
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"--local_rank", default=0, type=int, help="local rank for dist training"
)
parser.add_argument(
"--num_machines", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="pls input your expriment description file",
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument("--conf", default=None, type=float, help="test conf")
parser.add_argument("--nms", default=None, type=float, help="test nms threshold")
parser.add_argument("--tsize", default=None, type=int, help="test img size")
parser.add_argument("--seed", default=None, type=int, help="eval seed")
parser.add_argument(
"--fp16",
dest="fp16",
default=False,
action="store_true",
help="Adopting mix precision evaluating.",
)
parser.add_argument(
"--fuse",
dest="fuse",
default=False,
action="store_true",
help="Fuse conv and bn for testing.",
)
parser.add_argument(
"--trt",
dest="trt",
default=False,
action="store_true",
help="Using TensorRT model for testing.",
)
parser.add_argument(
"--test",
dest="test",
default=False,
action="store_true",
help="Evaluating on test-dev set.",
)
parser.add_argument(
"--speed",
dest="speed",
default=False,
action="store_true",
help="speed test only.",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args, num_gpu):
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed testing. This will turn on the CUDNN deterministic setting, "
)
is_distributed = num_gpu > 1
# set environment variables for distributed training
cudnn.benchmark = True
rank = args.local_rank
# rank = get_local_rank()
file_name = os.path.join(exp.output_dir, args.experiment_name)
if rank == 0:
os.makedirs(file_name, exist_ok=True)
setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a")
logger.info("Args: {}".format(args))
if args.conf is not None:
exp.test_conf = args.conf
if args.nms is not None:
exp.nmsthre = args.nms
if args.tsize is not None:
exp.test_size = (args.tsize, args.tsize)
model = exp.get_model()
logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
logger.info("Model Structure:\n{}".format(str(model)))
evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test)
torch.cuda.set_device(rank)
model.cuda(rank)
model.eval()
if not args.speed and not args.trt:
if args.ckpt is None:
ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
else:
ckpt_file = args.ckpt
logger.info("loading checkpoint")
loc = "cuda:{}".format(rank)
ckpt = torch.load(ckpt_file, map_location=loc)
# load the model state dict
model.load_state_dict(ckpt["model"])
logger.info("loaded checkpoint done.")
if is_distributed:
model = DDP(model, device_ids=[rank])
if args.fuse:
logger.info("\tFusing model...")
model = fuse_model(model)
if args.trt:
assert (
not args.fuse and not is_distributed and args.batch_size == 1
), "TensorRT model is not support model fusing and distributed inferencing!"
trt_file = os.path.join(file_name, "model_trt.pth")
assert os.path.exists(
trt_file
), "TensorRT model is not found!\n Run tools/trt.py first!"
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
else:
trt_file = None
decoder = None
# start evaluate
*_, summary = evaluator.evaluate(
model, is_distributed, args.fp16, trt_file, decoder, exp.test_size
)
logger.info("\n" + summary)
if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
exp.merge(args.opts)
if not args.experiment_name:
args.experiment_name = exp.exp_name
num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
assert num_gpu <= torch.cuda.device_count()
launch(
main,
num_gpu,
args.num_machines,
args.machine_rank,
backend=args.dist_backend,
dist_url=args.dist_url,
args=(exp, args, num_gpu),
)
| [
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.load"
] | 1.7 | XHYsdjkdsjsk2021/Yolox_xhy | a60f585d4d2bf36f9fa90b0a078efb7b315e0118 |
1.4 | import colorama
import spacy
import torch
import torch.nn.functional as F
from nltk.corpus import stopwords
from spacy.pipeline import EntityRuler
from spacy.tokens import Token
from transformers import GPT2Tokenizer
from KID.agent import BaseAgent
from KID.agent.agent_utils import calc_banned_ngram_tokens, top_k_filter, top_p_filter
from KID.envs import BaseEnv
from KID.infra import Frame
from KID.policy import BasePolicy
STOP_WORDS = set(stopwords.words('english'))
getter = lambda token: token.is_stop \
or token.lower_ in STOP_WORDS or token.lemma_ in STOP_WORDS
Token.set_extension('is_stop', getter=getter, force=True) # set attribute with getter
nlp = spacy.load("en_core_web_sm")
ruler = EntityRuler(nlp)
nlp.add_pipe(ruler)
class KIDAgent(BaseAgent):
def __init__(
self,
env: BaseEnv,
policy: BasePolicy,
is_kid: bool,
):
super(KIDAgent, self).__init__()
self.env = env
self.policy = policy
self.is_kid = is_kid
self.temperature = 1
self.repetition_penalty = 1
self.banned_ngram_size = 2
self.min_length = 0
self.gm_scale = 0.99
self.top_p = 0.92
self.top_k = 20
self.sampling = True
self.color = False
self._cur_norm_ids = torch.empty(0)
self._cur_kid_ids = torch.empty(0)
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium')
def forward(self, action: 'Frame') -> 'Frame':
# action should have past and last
past, last = action['past'], action['last']
if past is None: # initial step
action = self.env.step(action) # now action['past'] should not be None
self._cur_norm_ids = last
if self.is_kid:
self._cur_kid_ids = last
observation = self.env.step(action) # do the real step with valid action
norm_logits = observation['logits']
norm_past = observation['past']
if not self.is_kid:
# by default, we are using sampling decoding
norm_last_prob = F.softmax(norm_logits[:, -1, :], dim=-1)
if self.sampling:
norm_last_prob = norm_last_prob[~torch.
any(norm_last_prob.isnan(), dim=-1)]
norm_last = torch.multinomial(norm_last_prob, num_samples=1)
else: # or we can choose greedy decoding
_, norm_last = torch.topk(norm_last_prob, k=1, dim=-1)
self._cur_norm_ids = norm_last if self._cur_norm_ids is None \
else torch.cat((self._cur_norm_ids, norm_last), dim=1)
gen_text_norm = self.tokenizer.decode(
self._cur_norm_ids.tolist()[0], skip_special_tokens=True
)
observation['last'] = norm_last
observation['gen_text'] = gen_text_norm
observation['cur_gen_id'] = self._cur_norm_ids
return observation
else: # if it is for KID
norm_last_prob = self.pos_func(norm_logits, is_kid=True)
kid_action = self.policy.update_policy(
Frame(
past=norm_past,
last=last,
logits=norm_logits,
cur_gen_ids=self._cur_kid_ids,
is_kid=True,
)
)
kid_observation = self.env.step(kid_action)
kid_kg_indices = kid_observation['kg_indices']
kid_kg_indices = list(filter(lambda x: len(x) <= 1, kid_kg_indices))
kid_kg_indices = [ind[0] for ind in kid_kg_indices]
kid_logits = kid_observation['logits']
kid_last_prob = self.pos_func(kid_logits, is_kid=True)
# fuse the two probabilities
kid_last_prob = (kid_last_prob**
self.gm_scale) * (norm_last_prob**(1 - self.gm_scale))
kid_last_prob = top_k_filter(
kid_last_prob,
top_k=self.top_k,
min_tokens_to_keep=self.min_length,
is_probs=True
)
kid_last_prob = top_p_filter(
kid_last_prob,
top_p=self.top_p,
min_tokens_to_keep=self.min_length,
is_probs=True
)
# rescale
if torch.sum(kid_last_prob) <= 1:
kid_last_prob = kid_last_prob / torch.sum(kid_last_prob)
if self.sampling:
kid_last_prob = kid_last_prob[~torch.
any(kid_last_prob.isnan(), dim=-1)]
kid_last = torch.multinomial(kid_last_prob, num_samples=1)
else: # or we can choose greedy decoding
_, kid_last = torch.topk(kid_last_prob, k=1, dim=-1)
self._cur_kid_ids = kid_last if self._cur_kid_ids is None \
else torch.cat((self._cur_kid_ids, kid_last), dim=1)
if self.color:
gen_text_kid = ""
for word_id in self._cur_kid_ids.tolist()[0]:
if word_id in kid_kg_indices:
gen_text_kid += "{}{}{}".format(
colorama.Fore.RED,
self.tokenizer.decode([word_id], skip_special_tokens=True),
colorama.Style.RESET_ALL,
)
else:
gen_text_kid += self.tokenizer.decode(
[word_id], skip_special_tokens=True
)
else:
gen_text_kid = self.tokenizer.decode(
self._cur_kid_ids.tolist()[0], skip_special_tokens=True
)
kid_observation['last'] = kid_last
kid_observation['gen_text'] = gen_text_kid
kid_observation['cur_gen_id'] = self._cur_kid_ids
return kid_observation
def pos_func(self, logits: torch.Tensor, is_kid: bool = False) -> torch.Tensor:
last_logits = logits[:, -1, :]
# load already generated tokens
if is_kid:
gen_toks_ids = self._cur_kid_ids
else:
gen_toks_ids = self._cur_norm_ids
# repetition penalty
for token_idx in set(gen_toks_ids[0].tolist()):
if last_logits[0, token_idx] < 0:
last_logits[0, token_idx] *= self.repetition_penalty
else:
last_logits[0, token_idx] /= self.repetition_penalty
# ban duplicated ngrams
cur_length = gen_toks_ids.size(1)
banned_batch_tokens = calc_banned_ngram_tokens(
self.banned_ngram_size, gen_toks_ids, cur_length
)
# print("banned tokens", banned_batch_tokens)
for banned_tokens in enumerate(banned_batch_tokens):
last_logits[:, banned_tokens] = -float("inf")
# # del banned_batch_tokens
# min_length guarantee
if cur_length < self.min_length:
last_logits[:, self.tokenizer.eos_token_id] = -float("inf")
last_prob = F.softmax(last_logits, dim=-1)
return last_prob
| [
"torch.cat",
"torch.sum",
"torch.multinomial",
"torch.nn.functional.softmax",
"torch.empty",
"torch.topk"
] | 1.4.0 | microsoft/KID | a23e9d819b53605b6426170124feed10288c6f8b |
1.7 | import re
from typing import Tuple, List, Iterator, Dict, Any
import numpy as np
from PIL import Image
from numpy.core.fromnumeric import resize
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.applications as tensorflow_models
from tensorflow.keras import layers
import thingsvision.cornet as cornet
import thingsvision.clip as clip
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms as T
import torchvision.models as torchvision_models
class Model():
def __init__(self,
model_name: str,
pretrained: bool,
device: str,
model_path: str=None,
backend: str='pt'):
"""
Parameters
----------
Model_name : str
Model name. Name of model for which features should
subsequently be extracted.
pretrained : bool
Whether to load a model with pretrained or
randomly initialized weights into memory.
device : str
Device. Whether model weights should be moved
to CUDA or left on the CPU.
model_path : str (optional)
path/to/weights. If pretrained is set to False,
model weights can be loaded from a path on the
user's machine. This is useful when operating
on a server without network access, or when
features should be extracted for a model that
was fine-tuned (or trained) on a custom image
dataset.
backend : str (optional)
Deep learning framework that should be used.
'pt' for PyTorch and 'tf' for Tensorflow
"""
self.model_name = model_name
self.backend = backend
self.pretrained = pretrained
self.device = device
self.model_path = model_path
self.load_model()
def load_model(self) -> Tuple[Any, Any]:
"""Load a pretrained *torchvision* or CLIP model into memory."""
if self.backend == 'pt':
if re.search(r'^clip', self.model_name):
clip_model_name = "RN50"
if re.search(r'ViT$', self.model_name):
clip_model_name = "ViT-B/32"
self.model, self.clip_n_px = clip.load(
clip_model_name,
device=self.device,
model_path=self.model_path,
pretrained=self.pretrained,
jit=False,
)
else:
device = torch.device(self.device)
if re.search(r'^cornet', self.model_name):
try:
self.model = getattr(cornet, f'cornet_{self.model_name[-1]}')
except:
self.model = getattr(cornet, f'cornet_{self.model_name[-2:]}')
self.model = self.model(pretrained=self.pretrained, map_location=device)
self.model = self.model.module # remove DataParallel
elif self.model_name == 'vgg16_bn_ecoset':
self.model = torchvision_models.vgg16_bn(pretrained=False)
self.model.classifier[6] = nn.Linear(4096, 565, bias=True)
self.model_path = 'https://osf.io/fe7s5/download'
else:
self.model = getattr(torchvision_models, self.model_name)
self.model = self.model(pretrained=self.pretrained)
self.model = self.model.to(device)
if self.model_path:
try:
state_dict = torch.load(self.model_path, map_location=device)
except FileNotFoundError:
state_dict = torch.hub.load_state_dict_from_url(self.model_path, map_location=device)
self.model.load_state_dict(state_dict)
self.model.eval()
elif self.backend == 'tf':
model = getattr(tensorflow_models, self.model_name)
if self.pretrained:
weights = 'imagenet'
elif self.model_path:
weights = self.model_path
else:
weights = None
self.model = model(weights=weights)
def show(self) -> str:
"""Show architecture of model to select a layer."""
if re.search(r'^clip', self.model_name):
for l, (n, p) in enumerate(self.model.named_modules()):
if l > 1:
if re.search(r'^visual', n):
print(n)
print('visual')
else:
print(self.model)
print(f'\nEnter module name for which you would like to extract features:\n')
module_name = str(input())
print()
return module_name
def extract_features(
self,
data_loader: Iterator,
module_name: str,
batch_size: int,
flatten_acts: bool,
clip: bool = False,
return_probabilities: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Extract hidden unit activations (at specified layer) for every image in the database.
Parameters
----------
data_loader : Iterator
Mini-batches. Iterator with equally sized
mini-batches, where each element is a
subsample of the full image dataset.
module_name : str
Layer name. Name of neural network layer for
which features should be extraced.
flatten_acts : bool
Whether activation tensor (e.g., activations
from an early layer of the neural network model)
should be transformed into a vector.
clip : bool (optional)
Whether neural network model is a CNN-based
torchvision or CLIP-based model. Since CLIP
has a different training objective, feature
extraction must be performed differently.
return_probabilities : bool (optional)
Whether class probabilities (softmax predictions)
should be returned in addition to the feature matrix
and the target vector.
Returns
-------
output : Tuple[np.ndarray, np.ndarray] OR Tuple[np.ndarray, np.ndarray, np.ndarray]
Returns the feature matrix and the target vector OR in addition to the feature
matrix and the target vector, the class probabilities.
"""
features, targets = [], []
if return_probabilities:
assert not clip, '\nCannot extract activations for CLIP and return class predictions simultaneously. This feature will be implemented in a future version.\n'
probabilities = []
if self.backend == 'pt':
if re.search(r'ensemble$', module_name):
ensembles = ['conv_ensemble', 'maxpool_ensemble']
assert module_name in ensembles, f'\nIf aggregating filters across layers and subsequently concatenating features, module name must be one of {ensembles}\n'
if re.search(r'^conv', module_name):
feature_extractor = nn.Conv2d
else:
feature_extractor = nn.MaxPool2d
device = torch.device(self.device)
# initialise dictionary to store hidden unit activations on the fly
global activations
activations = {}
# register forward hook to store activations
model = self.register_hook()
with torch.no_grad():
for i, batch in enumerate(data_loader):
batch = (t.to(device) for t in batch)
X, y = batch
if clip:
img_features = model.encode_image(X)
if module_name == 'visual':
assert torch.unique(activations[module_name] == img_features).item(
), '\nImage features should represent activations in last encoder layer.\n'
else:
out = model(X)
if return_probabilities:
probas = F.softmax(out, dim=1)
probabilities.append(probas)
if re.search(r'ensemble$', module_name):
layers = self.enumerate_layers(feature_extractor)
act = self.nsemble_featmaps(activations, layers, 'max')
else:
act = activations[module_name]
if flatten_acts:
if clip:
if re.search(r'attn$', module_name):
act = act[0]
else:
if act.size(0) != X.shape[0] and len(act.shape) == 3:
act = act.permute(1, 0, 2)
act = act.view(act.size(0), -1)
features.append(act.cpu())
targets.extend(y.squeeze(-1).cpu())
elif self.backend == 'tf':
for i, batch in enumerate(data_loader):
X, y = batch
layer_outputs = [self.model.get_layer(module_name).output]
activation_model = keras.models.Model(inputs=self.model.input, outputs=layer_outputs)
activations = activation_model.predict(X)
features.append(activations)
targets.extend(y.numpy())
if return_probabilities:
out = self.model.predict(X)
probas = tf.nn.softmax(out, axis=1)
probabilities.append(probas)
# stack each mini-batch of hidden activations to obtain an N x F matrix, and flatten targets to yield vector
features = np.vstack(features)
targets = np.asarray(targets).ravel()
if return_probabilities:
probabilities = np.vstack(probabilities)
assert len(features) == len(targets) == len(
probabilities), '\nFeatures, targets, and probabilities must correspond to the same number of images.\n'
return features, targets, probabilities
assert len(features) == len(
targets), '\nFeatures and targets must correspond to the same number of images.\n'
print(f'...Features successfully extracted for all {len(features)} images in the database.')
print(f'...Features shape: {features.shape}')
return features, targets
def enumerate_layers(self, feature_extractor) -> List[int]:
layers = []
k = 0
for n, m in self.model.named_modules():
if re.search(r'\d+$', n):
if isinstance(m, feature_extractor):
layers.append(k)
k += 1
return layers
def ensemble_featmaps(
self,
activations: dict,
layers: list,
pooling: str = 'max',
alpha: float = 3.,
beta: float = 5.,
) -> torch.Tensor:
"""Concatenate globally (max or average) pooled feature maps."""
acts = [activations[''.join(('features.', str(l)))] for l in layers]
func = torch.max if pooling == 'max' else torch.mean
pooled_acts = [torch.tensor([list(map(func, featmaps))
for featmaps in acts_i]) for acts_i in acts]
# upweight second-to-last conv layer by 5.
pooled_acts[-2] = pooled_acts[-2] * alpha
# upweight last conv layer by 10.
pooled_acts[-1] = pooled_acts[-1] * beta
stacked_acts = torch.cat(pooled_acts, dim=1)
return stacked_acts
def get_activation(self, name):
"""Store hidden unit activations at each layer of model."""
def hook(model, input, output):
try:
activations[name] = output.detach()
except AttributeError:
activations[name] = output
return hook
def register_hook(self):
"""Register a forward hook to store activations."""
for n, m in self.model.named_modules():
m.register_forward_hook(self.get_activation(n))
return self.model
def get_transformations(self, resize_dim: int = 256, crop_dim: int = 224):
if re.search(r'^clip', self.model_name):
if self.backend != 'pt':
raise Exception("You need to use Tensorflow 'tf' as backend if you want to use the CLIP model.")
composition = T.Compose([
T.Resize(self.clip_n_px, interpolation=Image.BICUBIC),
T.CenterCrop(self.clip_n_px),
lambda image: image.convert("RGB"),
T.ToTensor(),
T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
return composition
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if self.backend == 'pt':
normalize = T.Normalize(mean=mean, std=std)
composition = T.Compose([T.Resize(resize_dim), T.CenterCrop(crop_dim), T.ToTensor(), normalize])
return composition
elif self.backend == 'tf':
resize_dim = crop_dim
resize_crop_and_normalize = tf.keras.Sequential([
layers.experimental.preprocessing.Resizing(resize_dim, resize_dim),
#layers.experimental.preprocessing.CenterCrop(crop_dim, crop_dim)
layers.experimental.preprocessing.Normalization(mean=mean, variance=[std_ * std_ for std_ in std])
])
return resize_crop_and_normalize
| [
"torch.nn.Linear",
"torch.device",
"torch.cat",
"torch.unique",
"torch.no_grad",
"torch.hub.load_state_dict_from_url",
"torch.load",
"torch.nn.functional.softmax"
] | 1.7.1 | ViCCo-Group/THINGSvision | 27273564631605639287f9b3bd3c57ba8cdb720f |
1.4 | import argparse
import torch
import warnings
from argformat import StructuredFormatter
from .preprocessor import Preprocessor
from .tiresias import Tiresias
if __name__ == "__main__":
########################################################################
# Parse arguments #
########################################################################
# Parse arguments
parser = argparse.ArgumentParser(
prog = "tiresias.py",
description = "Tiresias: Predicting Security Events Through Deep Learning",
formatter_class=StructuredFormatter
)
# Add Tiresias mode arguments, run in different modes
parser.add_argument('mode', help="mode in which to run Tiresias", choices=(
'train',
'predict',
))
# Add input arguments
group_input = parser.add_argument_group("Input parameters")
group_input.add_argument('--csv' , help="CSV events file to process")
group_input.add_argument('--txt' , help="TXT events file to process")
group_input.add_argument('--length' , type=int , default=20 , help="sequence LENGTH ")
group_input.add_argument('--timeout' , type=float, default=float('inf'), help="sequence TIMEOUT (seconds)")
# Tiresias
group_tiresias = parser.add_argument_group("Tiresias parameters")
group_tiresias.add_argument( '--hidden', type=int, default=128, help='hidden dimension')
group_tiresias.add_argument('-i', '--input' , type=int, default=300, help='input dimension')
group_tiresias.add_argument('-k', '--k' , type=int, default=4 , help='number of concurrent memory cells')
group_tiresias.add_argument('-o', '--online', action='store_true' , help='use online training while predicting')
group_tiresias.add_argument('-t', '--top' , type=int, default=1 , help='accept any of the TOP predictions')
group_tiresias.add_argument('--save', help="save Tiresias to specified file")
group_tiresias.add_argument('--load', help="load Tiresias from specified file")
# Training
group_training = parser.add_argument_group("Training parameters")
group_training.add_argument('-b', '--batch-size', type=int, default=128, help="batch size")
group_training.add_argument('-d', '--device' , default='auto' , help="train using given device (cpu|cuda|auto)")
group_training.add_argument('-e', '--epochs' , type=int, default=10, help="number of epochs to train with")
# Parse arguments
args = parser.parse_args()
########################################################################
# Load data #
########################################################################
# Set device
if args.device is None or args.device == 'auto':
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Create preprocessor
preprocessor = Preprocessor(
length = args.length,
timeout = args.timeout,
)
# Load files
if args.csv is not None and args.txt is not None:
# Raise an error if both csv and txt are specified
raise ValueError("Please specify EITHER --csv OR --txt.")
if args.csv:
# Load csv file
X, y, label, mapping = preprocessor.csv(args.csv)
elif args.txt:
# Load txt file
X, y, label, mapping = preprocessor.txt(args.txt)
X = X.to(args.device)
y = y.to(args.device)
########################################################################
# Tiresias #
########################################################################
# Create instance of Tiresias
tiresias = Tiresias(
input_size = args.input,
hidden_size = args.hidden,
output_size = args.input,
k = args.k,
).to(args.device)
# Load Tiresias from file, if necessary
if args.load:
tiresias = Tiresias.load(args.load).to(args.device)
# Train Tiresias
if args.mode == "train":
# Print warning if training Tiresias without saving it
if args.save is None:
warnings.warn("Training Tiresias without saving it to output.")
# Fit Tiresias with data
tiresias.fit(
X = X,
y = y,
epochs = args.epochs,
batch_size = args.batch_size,
)
# Save Tiresias to file
if args.save:
tiresias.save(args.save)
# Predict with Tiresias
if args.mode == "predict":
if args.online:
y_pred, confidence = tiresias.predict_online(X, y, k=args.top)
else:
y_pred, confidence = tiresias.predict(X, k=args.top)
####################################################################
# Show predictions #
####################################################################
# Initialise predictions
y_pred_top = y_pred[:, 0].clone()
# Compute top TOP predictions
for top in range(1, args.top):
print(top, y_pred.shape)
# Get mask
mask = y == y_pred[:, top]
# Set top values
y_pred_top[mask] = y[mask]
from sklearn.metrics import classification_report
print(classification_report(y.cpu(), y_pred_top.cpu(), digits=4))
| [
"torch.cuda.is_available"
] | 1.4.0 | Thijsvanede/Tiresias | b007e19fb1bb5d073001aa156673c9dd382f939a |
1.5 | import os
import json
import argparse
import math
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
from data_utils import TextMelLoader, TextMelCollate
import models
import commons
import utils
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "80000"
hps = utils.get_hparams()
mp.spawn(
train_and_eval,
nprocs=n_gpus,
args=(
n_gpus,
hps,
),
)
def train_and_eval(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.log_dir)
logger.info(hps)
utils.check_git_hash(hps.log_dir)
writer = SummaryWriter(log_dir=hps.log_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.log_dir, "eval"))
dist.init_process_group(
backend="nccl", init_method="env://", world_size=n_gpus, rank=rank
)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextMelLoader(hps.data.training_files, hps.data)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=n_gpus, rank=rank, shuffle=True
)
collate_fn = TextMelCollate(1)
train_loader = DataLoader(
train_dataset,
num_workers=8,
shuffle=False,
batch_size=hps.train.batch_size,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn,
sampler=train_sampler,
)
if rank == 0:
val_dataset = TextMelLoader(hps.data.validation_files, hps.data)
val_loader = DataLoader(
val_dataset,
num_workers=8,
shuffle=False,
batch_size=hps.train.batch_size,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn,
)
symbols = hps.data.punc + hps.data.chars
generator = models.FlowGenerator(
n_vocab=len(symbols) + getattr(hps.data, "add_blank", False),
out_channels=hps.data.n_mel_channels,
**hps.model
).cuda(rank)
optimizer_g = commons.Adam(
generator.parameters(),
scheduler=hps.train.scheduler,
dim_model=hps.model.hidden_channels,
warmup_steps=hps.train.warmup_steps,
lr=hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps,
)
if hps.train.fp16_run:
generator, optimizer_g._optim = amp.initialize(
generator, optimizer_g._optim, opt_level="O1"
)
generator = DDP(generator)
epoch_str = 1
global_step = 0
try:
_, _, _, epoch_str = utils.load_checkpoint(
utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"),
generator,
optimizer_g,
)
epoch_str += 1
optimizer_g.step_num = (epoch_str - 1) * len(train_loader)
optimizer_g._update_learning_rate()
global_step = (epoch_str - 1) * len(train_loader)
except:
if hps.train.ddi and os.path.isfile(os.path.join(hps.model_dir, "ddi_G.pth")):
_ = utils.load_checkpoint(
os.path.join(hps.model_dir, "ddi_G.pth"), generator, optimizer_g
)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank == 0:
train(
rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer
)
evaluate(
rank,
epoch,
hps,
generator,
optimizer_g,
val_loader,
logger,
writer_eval,
)
if epoch % hps.train.save_epoch == 0:
utils.save_checkpoint(
generator,
optimizer_g,
hps.train.learning_rate,
epoch,
os.path.join(hps.model_dir, "G_{}.pth".format(epoch)),
)
else:
train(rank, epoch, hps, generator, optimizer_g, train_loader, None, None)
def train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer):
train_loader.sampler.set_epoch(epoch)
global global_step
generator.train()
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(
rank, non_blocking=True
)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(
rank, non_blocking=True
)
# Train Generator
optimizer_g.zero_grad()
(
(z, z_m, z_logs, logdet, z_mask),
(x_m, x_logs, x_mask),
(attn, logw, logw_),
) = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)
l_length = commons.duration_loss(logw, logw_, x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if hps.train.fp16_run:
with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:
scaled_loss.backward()
grad_norm = commons.clip_grad_value_(
amp.master_params(optimizer_g._optim), 5
)
else:
loss_g.backward()
grad_norm = commons.clip_grad_value_(generator.parameters(), 5)
optimizer_g.step()
if rank == 0:
if batch_idx % hps.train.log_interval == 0:
(y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)
logger.info(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(x),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss_g.item(),
)
)
logger.info(
[x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()]
)
scalar_dict = {
"loss/g/total": loss_g,
"learning_rate": optimizer_g.get_lr(),
"grad_norm": grad_norm,
}
scalar_dict.update(
{"loss/g/{}".format(i): v for i, v in enumerate(loss_gs)}
)
utils.summarize(
writer=writer,
global_step=global_step,
images={
"y_org": utils.plot_spectrogram_to_numpy(
y[0].data.cpu().numpy()
),
"y_gen": utils.plot_spectrogram_to_numpy(
y_gen[0].data.cpu().numpy()
),
"attn": utils.plot_alignment_to_numpy(
attn[0, 0].data.cpu().numpy()
),
},
scalars=scalar_dict,
)
global_step += 1
if rank == 0:
logger.info("====> Epoch: {}".format(epoch))
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval):
if rank == 0:
global global_step
generator.eval()
losses_tot = []
with torch.no_grad():
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(val_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(
rank, non_blocking=True
)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(
rank, non_blocking=True
)
(
(z, z_m, z_logs, logdet, z_mask),
(x_m, x_logs, x_mask),
(attn, logw, logw_),
) = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)
l_length = commons.duration_loss(logw, logw_, x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if batch_idx == 0:
losses_tot = loss_gs
else:
losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]
if batch_idx % hps.train.log_interval == 0:
logger.info(
"Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(x),
len(val_loader.dataset),
100.0 * batch_idx / len(val_loader),
loss_g.item(),
)
)
logger.info([x.item() for x in loss_gs])
losses_tot = [x / len(val_loader) for x in losses_tot]
loss_tot = sum(losses_tot)
scalar_dict = {"loss/g/total": loss_tot}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_tot)})
utils.summarize(
writer=writer_eval, global_step=global_step, scalars=scalar_dict
)
logger.info("====> Epoch: {}".format(epoch))
if __name__ == "__main__":
main()
| [
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.tensorboard.SummaryWriter"
] | 1.5.1 | techthiyanes/vakyansh-tts | b090eb0aa3b36cf19de99902da7caef959dad45b |
1.10 | """Utility function for the PPO-based safe explorer.
"""
import numpy as np
import torch
import torch.nn as nn
from gym.spaces import Box
from safe_control_gym.math_and_models.neural_networks import MLP, CNN, RNN, init_
from safe_control_gym.math_and_models.distributions import Normal, Categorical
import safe_control_gym.controllers.ppo.ppo_utils as ppo_utils
class SafePPOAgent(ppo_utils.PPOAgent):
"""A PPO class that encapsulates models, optimizers and update functions.
"""
def __init__(self,
obs_space,
act_space,
hidden_dim=64,
use_clipped_value=False,
clip_param=0.2,
target_kl=0.01,
entropy_coef=0.01,
actor_lr=0.0003,
critic_lr=0.001,
opt_epochs=10,
mini_batch_size=64,
action_modifier=None,
**kwargs
):
# Parameters.
self.obs_space = obs_space
self.act_space = act_space
self.use_clipped_value = use_clipped_value
self.clip_param = clip_param
self.target_kl = target_kl
self.entropy_coef = entropy_coef
self.opt_epochs = opt_epochs
self.mini_batch_size = mini_batch_size
# Model.
self.ac = MLPActorCritic(obs_space,
act_space,
hidden_dims=[hidden_dim] * 2,
activation="tanh",
action_modifier=action_modifier)
# Optimizers.
self.actor_opt = torch.optim.Adam(self.ac.actor.parameters(), actor_lr)
self.critic_opt = torch.optim.Adam(self.ac.critic.parameters(), critic_lr)
def compute_policy_loss(self, batch):
"""Returns policy loss(es) given batch of data.
"""
obs, act, logp_old, adv, c = batch["obs"], batch["act"], batch["logp"], batch["adv"], batch["c"]
dist, logp = self.ac.actor(obs, act, c=c)
# Policy.
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1 - self.clip_param, 1 + self.clip_param) * adv
policy_loss = -torch.min(ratio * adv, clip_adv).mean()
# Entropy.
entropy_loss = -dist.entropy().mean()
# KL/trust region.
approx_kl = (logp_old - logp).mean()
return policy_loss, entropy_loss, approx_kl
class MLPActor(nn.Module):
"""Actor model.
"""
def __init__(self,
obs_dim,
act_dim,
hidden_dims,
activation,
discrete=False,
action_modifier=None
):
"""
"""
super().__init__()
self.pi_net = MLP(obs_dim, act_dim, hidden_dims, activation)
# Construct output action distribution.
self.discrete = discrete
if discrete:
self.dist_fn = lambda x: Categorical(logits=x)
else:
self.logstd = nn.Parameter(-0.5 * torch.ones(act_dim))
self.dist_fn = lambda x: Normal(x, self.logstd.exp())
# Safety filter.
self.action_modifier = action_modifier
def forward(self,
obs,
act=None,
c=None
):
"""
"""
mu = self.pi_net(obs)
# Filter action if needed.
if self.action_modifier:
if len(mu.shape) == 1:
# During evalution or single env runs.
mu_safe = self.action_modifier(obs.unsqueeze(0),
mu.unsqueeze(0),
c.unsqueeze(0)).view(-1)
else:
# During training or vectorized runs.
mu_safe = self.action_modifier(obs, mu, c)
else:
mu_safe = mu
dist = self.dist_fn(mu_safe)
logp_a = None
if act is not None:
logp_a = dist.log_prob(act)
return dist, logp_a
class MLPActorCritic(ppo_utils.MLPActorCritic):
"""Model for the actor-critic agent.
Attributes:
actor (MLPActor): policy network.
critic (MLPCritic): value network.
"""
def __init__(self,
obs_space,
act_space,
hidden_dims=(64, 64),
activation="tanh",
action_modifier=None
):
"""
"""
nn.Module.__init__(self)
obs_dim = obs_space.shape[0]
if isinstance(act_space, Box):
act_dim = act_space.shape[0]
discrete = False
else:
act_dim = act_space.n
discrete = True
# Policy.
self.actor = MLPActor(obs_dim, act_dim, hidden_dims, activation, discrete, action_modifier)
# Value function.
self.critic = ppo_utils.MLPCritic(obs_dim, hidden_dims, activation)
def step(self,
obs,
c=None
):
"""
"""
dist, _ = self.actor(obs, c=c)
a = dist.sample()
logp_a = dist.log_prob(a)
v = self.critic(obs)
return a.numpy(), v.numpy(), logp_a.numpy()
def act(self,
obs,
c=None
):
"""
"""
dist, _ = self.actor(obs, c=c)
a = dist.mode()
return a.numpy()
class SafePPOBuffer(ppo_utils.PPOBuffer):
"""Storage for a batch of episodes during training.
Attributes:
max_length (int): maximum length of episode.
batch_size (int): number of episodes per batch.
scheme (dict): describs shape & other info of data to be stored.
keys (list): names of all data from scheme.
"""
def __init__(self,
obs_space,
act_space,
num_constraints,
max_length,
batch_size
):
"""
"""
self.max_length = max_length
self.batch_size = batch_size
T, N = max_length, batch_size
obs_dim = obs_space.shape
if isinstance(act_space, Box):
act_dim = act_space.shape[0]
else:
act_dim = act_space.n
self.scheme = {
"obs": {
"vshape": (T, N, *obs_dim)
},
"act": {
"vshape": (T, N, act_dim)
},
"rew": {
"vshape": (T, N, 1)
},
"mask": {
"vshape": (T, N, 1),
"init": np.ones
},
"v": {
"vshape": (T, N, 1)
},
"logp": {
"vshape": (T, N, 1)
},
"ret": {
"vshape": (T, N, 1)
},
"adv": {
"vshape": (T, N, 1)
},
"terminal_v": {
"vshape": (T, N, 1)
},
"c": {
"vshape": (T, N, num_constraints)
},
}
self.keys = list(self.scheme.keys())
self.reset()
| [
"torch.min",
"torch.nn.Module.__init__",
"torch.clamp",
"torch.ones",
"torch.exp"
] | 1.10.2 | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d |
1.9 | from pathlib import Path
from typing import Optional, Dict, List, Type, Any, Union
import pytorch_lightning as pl
import torch
from torch import nn as nn
from torchmetrics import Metric
from schnetpack.model.base import AtomisticModel
__all__ = ["ModelOutput", "AtomisticTask"]
class ModelOutput(nn.Module):
"""
Defines an output of a model, including mappings to a loss function and weight for training
and metrics to be logged.
"""
def __init__(
self,
name: str,
loss_fn: Optional[nn.Module] = None,
loss_weight: float = 1.0,
metrics: Optional[Dict[str, Metric]] = None,
target_property: Optional[str] = None,
):
"""
Args:
name: name of output in results dict
target_property: Name of target in training batch. Only required for supervised training.
If not given, the output name is assumed to also be the target name.
loss_fn: function to compute the loss
loss_weight: loss weight in the composite loss: $l = w_1 l_1 + \dots + w_n l_n$
metrics: dictionary of metrics with names as keys
"""
super().__init__()
self.name = name
self.target_property = target_property or name
self.loss_fn = loss_fn
self.loss_weight = loss_weight
self.metrics = nn.ModuleDict(metrics)
def calculate_loss(self, pred, target):
if self.loss_weight == 0 or self.loss_fn is None:
return 0.0
loss = self.loss_weight * self.loss_fn(
pred[self.name], target[self.target_property]
)
return loss
def calculate_metrics(self, pred, target):
metrics = {
metric_name: metric(pred[self.name], target[self.target_property])
for metric_name, metric in self.metrics.items()
}
return metrics
class AtomisticTask(pl.LightningModule):
"""
The basic learning task in SchNetPack, which ties model, loss and optimizer together.
"""
def __init__(
self,
model: AtomisticModel,
outputs: List[ModelOutput],
optimizer_cls: Type[torch.optim.Optimizer] = torch.optim.Adam,
optimizer_args: Optional[Dict[str, Any]] = None,
scheduler_cls: Optional[Type] = None,
scheduler_args: Optional[Dict[str, Any]] = None,
scheduler_monitor: Optional[str] = None,
warmup_steps: int = 0,
):
"""
Args:
model: the neural network model
outputs: list of outputs an optional loss functions
optimizer_cls: type of torch optimizer,e.g. torch.optim.Adam
optimizer_args: dict of optimizer keyword arguments
scheduler_cls: type of torch learning rate scheduler
scheduler_args: dict of scheduler keyword arguments
scheduler_monitor: name of metric to be observed for ReduceLROnPlateau
warmup_steps: number of steps used to increase the learning rate from zero linearly to the target learning
rate at the beginning of training
"""
super().__init__()
self.model = model
self.optimizer_cls = optimizer_cls
self.optimizer_kwargs = optimizer_args
self.scheduler_cls = scheduler_cls
self.scheduler_kwargs = scheduler_args
self.schedule_monitor = scheduler_monitor
self.outputs = nn.ModuleList(outputs)
self.grad_enabled = len(self.model.required_derivatives) > 0
self.lr = optimizer_args["lr"]
self.warmup_steps = warmup_steps
def setup(self, stage=None):
if stage == "fit":
self.model.initialize_postprocessors(self.trainer.datamodule)
def forward(self, inputs: Dict[str, torch.Tensor]):
results = self.model(inputs)
return results
def loss_fn(self, pred, batch):
loss = 0.0
for output in self.outputs:
loss += output.calculate_loss(pred, batch)
return loss
def log_metrics(self, pred, targets, subset):
for output in self.outputs:
for metric_name, metric in output.calculate_metrics(pred, targets).items():
self.log(
f"{subset}_{output.name}_{metric_name}",
metric,
on_step=False,
on_epoch=True,
prog_bar=False,
)
def training_step(self, batch, batch_idx):
targets = {
output.target_property: batch[output.target_property]
for output in self.outputs
}
pred = self.predict_without_postprocessing(batch)
loss = self.loss_fn(pred, targets)
self.log("train_loss", loss, on_step=True, on_epoch=False, prog_bar=False)
self.log_metrics(targets, pred, "train")
return loss
def validation_step(self, batch, batch_idx):
torch.set_grad_enabled(self.grad_enabled)
targets = {
output.target_property: batch[output.target_property]
for output in self.outputs
}
pred = self.predict_without_postprocessing(batch)
loss = self.loss_fn(pred, targets)
self.log("val_loss", loss, on_step=False, on_epoch=True, prog_bar=True)
self.log_metrics(targets, pred, "val")
return {"val_loss": loss}
def test_step(self, batch, batch_idx):
torch.set_grad_enabled(self.grad_enabled)
targets = {
output.target_property: batch[output.target_property]
for output in self.outputs
}
pred = self.predict_without_postprocessing(batch)
loss = self.loss_fn(pred, targets)
self.log("test_loss", loss, on_step=False, on_epoch=True, prog_bar=True)
self.log_metrics(targets, pred, "test")
return {"test_loss": loss}
def predict_without_postprocessing(self, batch):
pp = self.model.do_postprocessing
self.model.do_postprocessing = False
pred = self(batch)
self.model.do_postprocessing = pp
return pred
def configure_optimizers(self):
optimizer = self.optimizer_cls(
params=self.parameters(), **self.optimizer_kwargs
)
if self.scheduler_cls:
schedulers = []
schedule = self.scheduler_cls(optimizer=optimizer, **self.scheduler_kwargs)
optimconf = {"scheduler": schedule, "name": "lr_schedule"}
if self.schedule_monitor:
optimconf["monitor"] = self.schedule_monitor
schedulers.append(optimconf)
return [optimizer], schedulers
else:
return optimizer
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer=None,
optimizer_idx: int = None,
optimizer_closure=None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
):
if self.trainer.global_step < self.warmup_steps:
lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.warmup_steps)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.lr
# update params
optimizer.step(closure=optimizer_closure)
| [
"torch.nn.ModuleDict",
"torch.set_grad_enabled",
"torch.nn.ModuleList"
] | 1.9 | sxie22/schnetpack | a421e7c121c7bdb2838fb30f887812110ecfa3c6 |
1.6 | import torch
import torch.nn.functional as F
from torch import nn
from vit_pytorch.vit import ViT
from vit_pytorch.t2t import T2TViT
from vit_pytorch.efficient import ViT as EfficientViT
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
# classes
class DistillMixin:
def forward(self, img, distill_token = None):
distilling = exists(distill_token)
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim = 1)
x += self.pos_embedding[:, :(n + 1)]
if distilling:
distill_tokens = repeat(distill_token, '() n d -> b n d', b = b)
x = torch.cat((x, distill_tokens), dim = 1)
x = self._attend(x)
if distilling:
x, distill_tokens = x[:, :-1], x[:, -1]
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
out = self.mlp_head(x)
if distilling:
return out, distill_tokens
return out
class DistillableViT(DistillMixin, ViT):
def __init__(self, *args, **kwargs):
super(DistillableViT, self).__init__(*args, **kwargs)
self.args = args
self.kwargs = kwargs
self.dim = kwargs['dim']
self.num_classes = kwargs['num_classes']
def to_vit(self):
v = ViT(*self.args, **self.kwargs)
v.load_state_dict(self.state_dict())
return v
def _attend(self, x):
x = self.dropout(x)
x = self.transformer(x)
return x
class DistillableT2TViT(DistillMixin, T2TViT):
def __init__(self, *args, **kwargs):
super(DistillableT2TViT, self).__init__(*args, **kwargs)
self.args = args
self.kwargs = kwargs
self.dim = kwargs['dim']
self.num_classes = kwargs['num_classes']
def to_vit(self):
v = T2TViT(*self.args, **self.kwargs)
v.load_state_dict(self.state_dict())
return v
def _attend(self, x):
x = self.dropout(x)
x = self.transformer(x)
return x
class DistillableEfficientViT(DistillMixin, EfficientViT):
def __init__(self, *args, **kwargs):
super(DistillableEfficientViT, self).__init__(*args, **kwargs)
self.args = args
self.kwargs = kwargs
self.dim = kwargs['dim']
self.num_classes = kwargs['num_classes']
def to_vit(self):
v = EfficientViT(*self.args, **self.kwargs)
v.load_state_dict(self.state_dict())
return v
def _attend(self, x):
return self.transformer(x)
# knowledge distillation wrapper
class DistillWrapper(nn.Module):
def __init__(
self,
*,
teacher,
student,
temperature = 1.,
alpha = 0.5,
hard = False
):
super().__init__()
assert (isinstance(student, (DistillableViT, DistillableT2TViT, DistillableEfficientViT))) , 'student must be a vision transformer'
self.teacher = teacher
self.student = student
dim = student.dim
num_classes = student.num_classes
self.temperature = temperature
self.alpha = alpha
self.hard = hard
self.distillation_token = nn.Parameter(torch.randn(1, 1, dim))
self.distill_mlp = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img, labels, temperature = None, alpha = None, **kwargs):
b, *_ = img.shape
alpha = alpha if exists(alpha) else self.alpha
T = temperature if exists(temperature) else self.temperature
with torch.no_grad():
teacher_logits = self.teacher(img)
student_logits, distill_tokens = self.student(img, distill_token = self.distillation_token, **kwargs)
distill_logits = self.distill_mlp(distill_tokens)
loss = F.cross_entropy(student_logits, labels)
if not self.hard:
distill_loss = F.kl_div(
F.log_softmax(distill_logits / T, dim = -1),
F.softmax(teacher_logits / T, dim = -1).detach(),
reduction = 'batchmean')
distill_loss *= T ** 2
else:
teacher_labels = teacher_logits.argmax(dim = -1)
distill_loss = F.cross_entropy(distill_logits, teacher_labels)
return loss * (1 - alpha) + distill_loss * alpha
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LayerNorm",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.randn"
] | 1.6 | zarszz/vit-pytorch | d93cd84ccdb338572fe978c08bd95ca93bee11cc |
1.0 | import os
import torch
from torchvision import utils
class Visualizer():
def __init__(self,
netG,
device,
out,
num_samples=10,
num_columns=None,
batch_size=100,
range=(-1, 1)):
self.netG = netG
self.device = device
self.out = out
self.num_samples = num_samples
if num_columns is None:
self.num_columns = self.netG.num_classes
else:
self.num_columns = num_columns
self.batch_size = batch_size
self.range = range
z_base = netG.sample_z(num_samples).to(device)
z = z_base.clone().unsqueeze(1).repeat(1, self.num_columns, 1)
self.fixed_z = z.view(-1, netG.latent_dim)
def visualize(self, iteration):
netG = self.netG
netG.eval()
with torch.no_grad():
y = torch.arange(self.num_columns).repeat(self.num_samples).to(
self.device)
if y.size(0) < self.batch_size:
x = netG(self.fixed_z, y)
else:
xs = []
for i in range(0, y.size(0), self.batch_size):
x = netG(self.fixed_z[i:i + self.batch_size],
y[i:i + self.batch_size])
xs.append(x)
x = torch.cat(xs, dim=0)
utils.save_image(x.detach(),
os.path.join(self.out,
'samples_iter_%d.png' % iteration),
self.num_columns,
0,
normalize=True,
range=self.range)
| [
"torch.no_grad",
"torch.cat",
"torch.arange"
] | 1.0.1 | takuhirok/rGAN | 6f7a092de5814c662fd17224b3d48bebe7e03c2f |
1.2 | #!/usr/bin/env python3
from typing import Any, Callable, List, Tuple, Union
import torch
from torch.nn import Module
from captum.log import log_usage
from ...._utils.common import _verify_select_column
from ...._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from ..._utils.attribution import NeuronAttribution, PerturbationAttribution
from ..._utils.gradient import _forward_layer_eval
from ..feature_ablation import FeatureAblation
class NeuronFeatureAblation(NeuronAttribution, PerturbationAttribution):
r"""
A perturbation based approach to computing neuron attribution,
involving replacing each input feature with a given baseline /
reference, and computing the difference in the neuron's input / output.
By default, each scalar value within
each input tensor is taken as a feature and replaced independently. Passing
a feature mask, allows grouping features to be ablated together. This can
be used in cases such as images, where an entire segment or region
can be ablated, measuring the importance of the segment (feature group).
Each input scalar in the group will be given the same attribution value
equal to the change in target as a result of ablating the entire feature
group.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Attributions for a particular neuron in the input or output
of this layer are computed using the argument neuron_index
in the attribute method.
Currently, it is assumed that the inputs or the outputs
of the layer, depending on which one is used for
attribution, can only be a single tensor.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
NeuronAttribution.__init__(self, forward_func, layer, device_ids)
PerturbationAttribution.__init__(self, forward_func)
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
neuron_index: Union[int, Tuple[int, ...]],
baselines: BaselineType = None,
additional_forward_args: Any = None,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
attribute_to_neuron_input: bool = False,
perturbations_per_eval: int = 1,
) -> TensorOrTupleOfTensorsGeneric:
r"""
Args:
inputs (tensor or tuple of tensors): Input for which neuron
attributions are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
neuron_index (int or tuple): Index of neuron in output of given
layer for which attribution is desired. The length of
this tuple must be one less than the number of
dimensions in the output of the given layer (since
dimension 0 corresponds to number of examples).
An integer may be provided instead of a tuple of
length 1.
baselines (scalar, tensor, tuple of scalars or tensors, optional):
Baselines define reference value which replaces each
feature when ablated.
Baselines can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or
broadcastable to match the dimensions of inputs
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
additional_forward_args (any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
feature_mask (tensor or tuple of tensors, optional):
feature_mask defines a mask for the input, grouping
features which should be ablated together. feature_mask
should contain the same number of tensors as inputs.
Each tensor should
be the same size as the corresponding input or
broadcastable to match the input tensor. Each tensor
should contain integers in the range 0 to num_features
- 1, and indices corresponding to the same feature should
have the same value.
Note that features within each input tensor are ablated
independently (not across tensors).
If None, then a feature mask is constructed which assigns
each scalar within a tensor as a separate feature, which
is ablated independently.
Default: None
attribute_to_neuron_input (bool, optional): Indicates whether to
compute the attributions with respect to the neuron input
or output. If `attribute_to_neuron_input` is set to True
then the attributions will be computed with respect to
neuron's inputs, otherwise it will be computed with respect
to neuron's outputs.
Note that currently it is assumed that either the input
or the output of internal neurons, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
perturbations_per_eval (int, optional): Allows ablation of multiple
features to be processed simultaneously in one call to
forward_fn.
Each forward pass will contain a maximum of
perturbations_per_eval * #examples samples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain at most
(perturbations_per_eval * #examples) / num_devices
samples.
Default: 1
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Attributions of particular neuron with respect to each input
feature. Attributions will always be the same size as the
provided inputs, with each value providing the attribution
of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
Examples::
>>> # SimpleClassifier takes a single input tensor of size Nx4x4,
>>> # and returns an Nx3 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x3x3.
>>> net = SimpleClassifier()
>>> # Generating random input with size 2 x 4 x 4
>>> input = torch.randn(2, 4, 4)
>>> # Defining NeuronFeatureAblation interpreter
>>> ablator = NeuronFeatureAblation(net, net.conv1)
>>> # To compute neuron attribution, we need to provide the neuron
>>> # index for which attribution is desired. Since the layer output
>>> # is Nx12x3x3, we need a tuple in the form (0..11,0..2,0..2)
>>> # which indexes a particular neuron in the layer output.
>>> # For this example, we choose the index (4,1,2).
>>> # Computes neuron gradient for neuron with
>>> # index (4,1,2).
>>> # Computes ablation attribution, ablating each of the 16
>>> # scalar inputs independently.
>>> attr = ablator.attribute(input, neuron_index=(4,1,2))
>>> # Alternatively, we may want to ablate features in groups, e.g.
>>> # grouping each 2x2 square of the inputs and ablating them together.
>>> # This can be done by creating a feature mask as follows, which
>>> # defines the feature groups, e.g.:
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 0 | 0 | 1 | 1 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # | 2 | 2 | 3 | 3 |
>>> # +---+---+---+---+
>>> # With this mask, all inputs with the same value are ablated
>>> # simultaneously, and the attribution for each input in the same
>>> # group (0, 1, 2, and 3) per example are the same.
>>> # The attributions can be calculated as follows:
>>> # feature mask has dimensions 1 x 4 x 4
>>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
>>> [2,2,3,3],[2,2,3,3]]])
>>> attr = ablator.attribute(input, neuron_index=(4,1,2),
>>> feature_mask=feature_mask)
"""
def neuron_forward_func(*args: Any):
with torch.no_grad():
layer_eval, _ = _forward_layer_eval(
self.forward_func,
args,
self.layer,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
assert len(layer_eval) == 1, (
"Layers with multiple inputs /"
" outputs are not supported for neuron ablation."
)
return _verify_select_column(layer_eval[0], neuron_index)
ablator = FeatureAblation(neuron_forward_func)
# NOTE: using __wrapped__ to not log
return ablator.attribute.__wrapped__(
ablator, # self
inputs,
baselines=baselines,
additional_forward_args=additional_forward_args,
feature_mask=feature_mask,
perturbations_per_eval=perturbations_per_eval,
)
| [
"torch.no_grad"
] | 1.2 | caraya10/captum | 258928905875c18e85a2413b3bb97def1bfb730a |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
# MMBTModel, ModalEmbeddings is copied from [1]
# as we have internal dependency on transformers v2.3.
# These will be removed when we upgrade to package v2.5+.
# [1]: https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_mmbt.py # noqa
import os
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
from multimodelity.common.registry import registry
from multimodelity.models.base_model import BaseModel
from multimodelity.models.interfaces.mmbt import MMBTGridHMInterface
from multimodelity.modules.encoders import (
EncoderFactory,
ImageEncoderFactory,
ImageEncoderTypes,
MultiModalEncoderBase,
ResNet152ImageEncoder,
TextEncoderFactory,
TextEncoderTypes,
TransformerEncoder,
)
from multimodelity.modules.hf_layers import replace_with_jit
from multimodelity.utils.checkpoint import load_pretrained_model
from multimodelity.utils.configuration import get_multimodelity_cache_dir
from multimodelity.utils.modeling import get_optimizer_parameters_for_bert
from omegaconf import II, DictConfig, OmegaConf
from torch import Tensor, nn
from transformers.modeling_bert import BertForPreTraining, BertPredictionHeadTransform
# TODO: Remove after transformers package upgrade to 2.5
class MMBTConfig:
"""Configuration class to store the configuration of a `MMBT Model`.
Args:
config (:obj:`~transformers.PreTrainedConfig`):
Config of the underlying Transformer models. Its values are
copied over to use a single config.
num_labels (:obj:`int` or :obj:`None`, optional, defaults to `None`):
Size of final Linear layer for classification.
modal_hidden_size (:obj:`int`, optional, defautls to 2048):
Embedding dimension of the non-text modality encoder.
"""
def __init__(self, config, num_labels=None, modal_hidden_size=2048):
self.__dict__ = config.__dict__
self.modal_hidden_size = modal_hidden_size
if num_labels:
self.num_labels = num_labels
# TODO: Remove after transformers package upgrade to 2.5
class ModalEmbeddings(nn.Module):
"""Generic Modal Embeddings which takes in an encoder, and a transformer embedding.
"""
def __init__(self, config, encoder, embeddings):
super().__init__()
self.config = config
self.encoder = encoder
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
self.position_embeddings = embeddings.position_embeddings
self.token_type_embeddings = embeddings.token_type_embeddings
self.word_embeddings = embeddings.word_embeddings
self.LayerNorm = embeddings.LayerNorm
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
def forward(
self,
input_modal: Tensor,
start_token: Optional[Tensor] = None,
end_token: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
):
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
seq_length = token_embeddings.size(1)
if start_token is not None:
start_token_embeds = self.word_embeddings(start_token)
seq_length += 1
token_embeddings = torch.cat(
[start_token_embeds.unsqueeze(1), token_embeddings], dim=1
)
if end_token is not None:
end_token_embeds = self.word_embeddings(end_token)
seq_length += 1
token_embeddings = torch.cat(
[token_embeddings, end_token_embeds.unsqueeze(1)], dim=1
)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_modal.device
)
position_ids = position_ids.unsqueeze(0).expand(
input_modal.size(0), seq_length
)
if token_type_ids is None:
token_type_ids = torch.zeros(
(input_modal.size(0), seq_length),
dtype=torch.long,
device=input_modal.device,
)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = token_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# TODO: Remove after transformers package upgrade to 2.5
class MMBTModel(nn.Module):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration
(config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape
``(batch_size, sequence_length, hidden_size)``. Sequence of
hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape
``(batch_size, hidden_size)``. Last layer hidden-state of the
first token of the sequence (classification token) further processed
by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction
(classification) objective during Bert pretraining. This output
is usually *not* a good summary of the semantic content of the
input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when
``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer +
the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the
initial embedding outputs.
**attentions**: (`optional`, returned when
``config.output_attentions=True``) list of ``torch.FloatTensor``
(one for each layer) of shape ``(batch_size, num_heads,
sequence_length, sequence_length)``: Attentions weights after
the attention softmax, used to compute the weighted average in the
self-attention heads.
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
mmbt = MMBTModel(config, transformer, encoder)
"""
def __init__(self, config, transformer, encoder):
super().__init__()
self.is_decoder = config.is_decoder
self.num_hidden_layers = config.num_hidden_layers
self.transformer = transformer
self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
def forward(
self,
input_modal: Tensor,
input_ids: Tensor,
modal_start_tokens: Optional[Tensor] = None,
modal_end_tokens: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
modal_token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
modal_position_ids: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_txt_shape = input_ids.size()
elif inputs_embeds is not None:
input_txt_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = inputs_embeds.device if inputs_embeds is not None else input_ids.device
modal_embeddings = self.modal_encoder(
input_modal,
start_token=modal_start_tokens,
end_token=modal_end_tokens,
position_ids=modal_position_ids,
token_type_ids=modal_token_type_ids,
)
input_modal_shape = modal_embeddings.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.ones(
input_txt_shape, dtype=torch.long, device=device
)
txt_embeddings = self.transformer.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
input_shape = embedding_output.size()[:-1]
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
else:
attention_mask = torch.cat(
[
torch.ones(input_modal_shape, device=device, dtype=torch.long),
attention_mask,
],
dim=1,
)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
else:
encoder_attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device), encoder_attention_mask],
dim=1,
)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the
# padding mask
# - if the model is an encoder, make the mask broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
if self.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# Python builtin next is currently not supported in Torchscript
if not torch.jit.is_scripting():
attention_mask = attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to
# [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = encoder_attention_mask[:, None, None, :]
# Python builtin next is currently not supported in Torchscript
if not torch.jit.is_scripting():
encoder_attention_mask = encoder_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_attention_mask = (1.0 - encoder_attention_mask) * -10000.0
encoder_outputs = self.transformer.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.transformer.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
encoder_outputs[1:],
) # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
class MMBTBase(MultiModalEncoderBase):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
# Replace transformer layers with scriptable JIT layers
replace_with_jit()
def build(self):
encoders = self._build_encoders(self.config)
text_encoder, modal_encoder = encoders[0], encoders[1]
self._encoder_config = text_encoder.config
self._mmbt_config = MMBTConfig(
self._encoder_config,
num_labels=self.config.num_labels,
modal_hidden_size=self.config.modal_hidden_size,
)
self.use_modal_start_token = self.config.use_modal_start_token
self.use_modal_end_token = self.config.use_modal_end_token
self.num_max_segment = self.config.text_encoder.params.get("num_segments", 2)
self.mmbt = MMBTModel(self._mmbt_config, text_encoder, modal_encoder)
def extract_modal_end_token(self, sample_list: Dict[str, Tensor]):
# compute the position of the last non-masked token, which is <sep>
gather_index = sample_list["input_mask"].sum(1, keepdim=True) - 1
modal_end_token = (
torch.gather(sample_list["input_ids"], 1, gather_index)
.squeeze(1)
.clone()
.detach()
)
batch_size = sample_list["input_ids"].size(0)
device = sample_list["input_ids"].device
# remove start_token in input_ids
sample_list["input_ids"] = torch.cat(
[sample_list["input_ids"][:, 1:], sample_list["input_ids"][:, -1:]], dim=1
)
# update input_mask
sample_list["input_mask"] = torch.cat(
[
sample_list["input_mask"][:, 1:],
torch.zeros([batch_size, 1], dtype=torch.long, device=device),
],
dim=1,
)
return modal_end_token
def forward(self, sample_list: Dict[str, Tensor]):
if self._is_direct_features_input:
if "input_modal" in sample_list:
input_modal = sample_list["input_modal"]
else:
input_modal = sample_list["image_feature_0"]
else:
input_modal = sample_list["image"]
modal_start_token: Optional[Tensor] = None
if self.use_modal_start_token:
modal_start_token = sample_list["input_ids"][:, 0].clone().detach()
modal_end_token: Optional[Tensor] = None
if self.use_modal_end_token:
modal_end_token = self.extract_modal_end_token(sample_list)
if "modal_token_type_ids" in sample_list:
modal_token_type_ids = sample_list["modal_token_type_ids"]
else:
token_value = 0
segment_ids = sample_list["segment_ids"]
max_id = segment_ids.max()
min_id = segment_ids.min()
# Case of only one segment
if max_id == min_id:
# If max_id is greater than 0, that means text is at 0 segment
# which means modal will be at 1
# In other case, it will be zero, which it already is
# NOTE: We compare with tensor here due to TorchScript compliance
if max_id == torch.tensor(0, dtype=max_id.dtype):
token_value = 1
else:
max_segment = self.num_max_segment - 1
# If max id is not equal to max_segment, it means
# text segments start from 0 which means modal will
# be last, otherwise, it is 0, which it already is
if max_id != torch.tensor(max_segment, dtype=max_id.dtype):
token_value = max_segment
modal_token_type_ids = torch.full(
(input_modal.size(0), 1),
fill_value=token_value,
dtype=torch.long,
device=input_modal.device,
)
# In case of XRAY, there might be only two dims
if input_modal.dim() == 2:
input_modal = input_modal.unsqueeze(dim=1)
# See details of inputs at
# https://github.com/huggingface/transformers/blob/1789c7/src/transformers/modeling_mmbt.py#L101 # noqa
output = self.mmbt(
input_modal,
input_ids=sample_list["input_ids"],
modal_start_tokens=modal_start_token,
modal_end_tokens=modal_end_token,
attention_mask=sample_list["input_mask"],
token_type_ids=sample_list["segment_ids"],
modal_token_type_ids=modal_token_type_ids,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
)
return output
class MMBTForPreTraining(nn.Module):
def __init__(self, config, *args, **kwargs):
super().__init__()
self.config = config
self.bert = MMBTBase(config, *args, **kwargs)
self.encoder_config = self.bert.encoder_config
# TODO : Switch to AutoModelForPreTraining after transformers
# package upgrade to 2.5
pretraining_module = BertForPreTraining.from_pretrained(
self.config.bert_model_name,
config=self.encoder_config,
cache_dir=os.path.join(get_multimodelity_cache_dir(), "distributed_{}".format(-1)),
)
self.cls = deepcopy(pretraining_module.cls)
self.loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we
are cloning them instead.
"""
if hasattr(self, "cls"):
self.bert.mmbt.transformer._tie_or_clone_weights(
self.cls.predictions.decoder,
self.bert.mmbt.transformer.embeddings.word_embeddings,
)
def forward(self, sample_list):
module_output = self.bert(sample_list)
sequence_output, pooled_output = module_output[0], module_output[1]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output
)
output = {}
if (
self.encoder_config.output_hidden_states
or self.encoder_config.output_attentions
):
output["extras"] = module_output[2:]
loss_key = f"{sample_list.dataset_name}/{sample_list.dataset_type}"
if "lm_label_ids" in sample_list and sample_list.lm_label_ids is not None:
output["logits"] = prediction_scores
lm_label_ids = sample_list.lm_label_ids
# Only take last scores which are text's scores and ignore image scores
text_scores = (
prediction_scores[:, -(lm_label_ids.size(1)) :]
.contiguous()
.view(-1, self.encoder_config.vocab_size)
)
masked_lm_loss = self.loss_fct(
text_scores, sample_list.lm_label_ids.contiguous().view(-1)
)
output["losses"] = {}
output["losses"][f"{loss_key}/masked_lm_loss"] = masked_lm_loss
# Add alignment loss if present
if (
"image_text_alignment" in sample_list
and sample_list.image_text_alignment is not None
):
output["seq_relationship_logits"] = seq_relationship_score
alignment_loss = self.loss_fct(
seq_relationship_score.contiguous().view(-1),
sample_list.image_text_alignment.contiguous().view(-1),
)
output["losses"][f"{loss_key}/alignment_loss"] = alignment_loss
return output
class MMBTForClassification(nn.Module):
def __init__(self, config, *args, **kwargs):
super().__init__()
self.config = config
self.bert = MMBTBase(config, *args, **kwargs)
self.encoder_config = self.bert.encoder_config
self.num_labels = self.config.num_labels
self.output_hidden_states = self.encoder_config.output_hidden_states
self.output_attentions = self.encoder_config.output_attentions
self.fused_feature_only = self.config.fused_feature_only
self.dropout = nn.Dropout(self.encoder_config.hidden_dropout_prob)
self.classifier = nn.Sequential(
BertPredictionHeadTransform(self.encoder_config),
nn.Linear(self.encoder_config.hidden_size, self.config.num_labels),
)
def forward(self, sample_list: Dict[str, Tensor]):
module_output = self.bert(sample_list)
pooled_output = module_output[1]
output = {}
if not torch.jit.is_scripting():
if self.output_hidden_states or self.output_attentions:
output["extras"] = module_output[2:]
else:
assert not (
self.output_hidden_states or self.output_attentions
), "output_attentions or output_hidden_states not supported in script mode"
pooled_output = self.dropout(pooled_output)
if self.fused_feature_only:
output["fused_feature"] = self.classifier[0](pooled_output)
return output
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_labels)
output["scores"] = reshaped_logits
return output
@registry.register_model("mmbt")
class MMBT(BaseModel):
@dataclass
class Config(BaseModel.Config):
model: str = "mmbt"
# classification or pretraining
training_head_type: str = "pretraining"
bert_model_name: str = "bert-base-uncased"
direct_features_input: bool = False
freeze_text: bool = False
freeze_modal: bool = False
freeze_complete_base: bool = False
finetune_lr_multiplier: float = 1
# Dimension of the embedding finally returned by the modal encoder
modal_hidden_size: int = 2048
text_hidden_size: int = 768
num_labels: int = 2
# This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]
modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()
)
text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(
type=TextEncoderTypes.transformer,
params=TransformerEncoder.Config(bert_model_name=II("bert_model_name")),
)
use_modal_start_token: bool = True
use_modal_end_token: bool = True
fused_feature_only: bool = False
output_dim: int = 768
def __init__(self, config: Union[DictConfig, Config], *args, **kwargs):
super().__init__(config)
def build(self):
if self.config.training_head_type == "pretraining":
self.model = MMBTForPreTraining(self.config)
else:
self.model = MMBTForClassification(self.config)
if self.config.freeze_complete_base or self.config.freeze_text:
for p in self.model.bert.mmbt.transformer.parameters():
p.requires_grad = False
if self.config.freeze_complete_base or self.config.freeze_modal:
for p in self.model.bert.mmbt.modal_encoder.parameters():
p.requires_grad = False
# Backward compatibility for code from older mmbt
@classmethod
def format_state_key(cls, key):
return (
key.replace("base.bert", "model.bert")
.replace("base.cls", "model.cls")
.replace("base.classifier", "model.classifier")
)
@classmethod
def from_pretrained(cls, model_name, *args, **kwargs):
model = super().from_pretrained(model_name, *args, **kwargs)
config = load_pretrained_model(model_name)["full_config"]
OmegaConf.set_struct(config, True)
if model_name == "mmbt.hateful_memes.images" or kwargs.get("interface"):
return MMBTGridHMInterface(model, config)
return model
@classmethod
def config_path(cls):
return "configs/models/mmbt/pretrain.yaml"
def forward(self, sample_list: Dict[str, Tensor]):
return self.model(sample_list)
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cat",
"torch.arange",
"torch.gather",
"torch.ones",
"torch.jit.is_scripting",
"torch.tensor",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | hahaxun/mmf | 6d32c3925ed9bf938e19a071aaa5e72a5cf01ee1 |
1.7 | # -*- coding: utf-8 -*-
# Copyright 2021 National Institute of Information and Communication Technology (Raj Dabre)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
## Basic imports
import os
import sys
import argparse
import time
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
##
## Huggingface imports
import transformers
from transformers import AutoTokenizer, MBartTokenizer, MBart50Tokenizer, BartTokenizer
from transformers import MBartForConditionalGeneration, BartForConditionalGeneration, MBartConfig, get_linear_schedule_with_warmup
from transformers import AdamW
##
## Pytorch imports
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.optim import Adam
from torch.utils.tensorboard import SummaryWriter
##
## Our imports
from common_utils import *
##
## Other imports
import math
import random
import numpy as np
import sacrebleu
from rouge_score import rouge_scorer
import gc
import functools
##
## Seed setting here
torch.manual_seed(621311)
##
def model_create_load_run_save(gpu, args, train_files, dev_files, quit_condition):
"""The main function which does the overall training. Should be split into multiple parts in the future. Currently monolithc intentionally."""
rank = args.nr * args.gpus + gpu ## The rank of the current process out of the total number of processes indicated by world_size.
print("Launching process:", rank)
dist.init_process_group(backend='nccl', init_method='env://', world_size=args.world_size, rank=rank)
if args.shard_files and rank == 0: ## First shard the data using process 0 aka the prime process or master process. Other processes will wait.
shard_files_bi(train_files, args)
dist.barrier() ## Stop other processes from proceeding till sharding is done.
if args.use_official_pretrained:
if "mbart" in args.model_path:
if "50" in args.model_path:
tok = MBart50Tokenizer.from_pretrained(args.tokenizer_name_or_path)
else:
tok = MBartTokenizer.from_pretrained(args.tokenizer_name_or_path)
else:
tok = BartTokenizer.from_pretrained(args.tokenizer_name_or_path)
else:
tok = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path, do_lower_case=False, use_fast=False, keep_accents=True) ## Fast tokenizers are not good because their behavior is weird. Accents should be kept or else the segmentation will be messed up on languages with accented characters. No lower case obviously because we want to train on the original case. Set to false if you are ok with the model not dealing with cases.
scorer = rouge_scorer.RougeScorer(['rouge1', 'rougeL'], use_stemmer=False) ## In case we do summarization.
print("Tokenizer is:", tok)
print(f"Running DDP checkpoint example on rank {rank}.")
if args.fp16: ## Although the code supports FP16/AMP training, it tends to be unstable in distributed setups so use this carefully.
print("We will do fp16 training")
scaler = torch.cuda.amp.GradScaler() ## Gradient scaler which will be used with torch's automatic mixed precision
else:
print("We will do fp32 training")
if args.encoder_tying_config is not None:
print("We will use recurrently stacked layers for the encoder with configuration:", args.encoder_tying_config)
if args.decoder_tying_config is not None:
print("We will use recurrently stacked layers for the decoder with configuration:", args.decoder_tying_config)
if args.unidirectional_encoder:
print("Using unidirectional encoder.")
writer = SummaryWriter(args.model_path+".tflogs")
if args.use_official_pretrained:
if "mbart" in args.pretrained_model:
model = MBartForConditionalGeneration.from_pretrained(args.pretrained_model) ## We may use FBs official model and fine-tune it for our purposes.
elif "bart" in args.pretrained_model:
model = BartForConditionalGeneration.from_pretrained(args.pretrained_model) ## We may use FBs official model and fine-tune it for our purposes.
model.config.dropout = args.dropout ## We should set dropouts manually
model.attention_dropout = args.attention_dropout ## We should set dropouts manually
model.activation_dropout = args.activation_dropout ## We should set dropouts manually
else:
config = MBartConfig(vocab_size=len(tok), encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_dim, decoder_ffn_dim=args.decoder_ffn_dim, d_model=args.d_model, no_embed_norm=args.no_embed_norm, scale_embedding=args.scale_embedding, pad_token_id=tok.pad_token_id, eos_token_id=tok(["</s>"], add_special_tokens=False).input_ids[0][0], bos_token_id=tok(["<s>"], add_special_tokens=False).input_ids[0][0], encoder_tying_config=args.encoder_tying_config, decoder_tying_config=args.decoder_tying_config, multilayer_softmaxing=args.multilayer_softmaxing, wait_k=args.wait_k, additional_source_wait_k=args.additional_source_wait_k, unidirectional_encoder=args.unidirectional_encoder, multi_source=args.multi_source, multi_source_method=args.multi_source_method, softmax_temperature=args.softmax_temperature, temperature_calibration=args.temperature_calibration, layerdrop=args.layerdrop, no_scale_attention_embedding=args.no_scale_attention_embedding, positional_encodings=args.positional_encodings, num_domains_for_domain_classifier=args.num_domains_for_domain_classifier, gradient_reversal_for_domain_classifier=args.gradient_reversal_for_domain_classifier) ## Configuration. TODO: Save this configuration somehow.
model = MBartForConditionalGeneration(config)
model.train()
if args.distillation: ## When distilling we need a parent model. The creation of the model is in the same way as the child. This model is immediately loaded with some pretrained params and then loaded into the GPU.
print("We will do distillation from a parent model.")
if args.use_official_parent_pretrained:
if "mbart" in args.parent_pretrained_model:
parent_model = MBartForConditionalGeneration.from_pretrained(args.parent_pretrained_model) ## We may use FBs official model and fine-tune it for our purposes.
elif "bart" in args.parent_pretrained_model:
parent_model = BartForConditionalGeneration.from_pretrained(args.parent_pretrained_model) ## We may use FBs official model and fine-tune it for our purposes.
parent_model.config.dropout = args.dropout ## We should set dropouts manually
parent_model.attention_dropout = args.attention_dropout ## We should set dropouts manually
parent_model.activation_dropout = args.activation_dropout ## We should set dropouts manually
else:
parent_config = MBartConfig(vocab_size=len(tok), encoder_layers=args.parent_encoder_layers, decoder_layers=args.parent_decoder_layers, dropout=args.parent_dropout, attention_dropout=args.parent_attention_dropout, activation_dropout=args.parent_activation_dropout, encoder_attention_heads=args.parent_encoder_attention_heads, decoder_attention_heads=args.parent_decoder_attention_heads, encoder_ffn_dim=args.parent_encoder_ffn_dim, decoder_ffn_dim=args.parent_decoder_ffn_dim, d_model=args.parent_d_model, no_embed_norm=args.no_embed_norm, scale_embedding=args.scale_embedding, pad_token_id=tok.pad_token_id, eos_token_id=tok(["</s>"], add_special_tokens=False).input_ids[0][0], bos_token_id=tok(["<s>"], add_special_tokens=False).input_ids[0][0], encoder_tying_config=args.encoder_tying_config, decoder_tying_config=args.decoder_tying_config, wait_k=args.wait_k, additional_source_wait_k=args.additional_source_wait_k, unidirectional_encoder=args.unidirectional_encoder, multi_source=args.multi_source, multi_source_method=args.multi_source_method, softmax_temperature=args.softmax_temperature, temperature_calibration=args.temperature_calibration, layerdrop=args.layerdrop, no_scale_attention_embedding=args.no_scale_attention_embedding, positional_encodings=args.positional_encodings)
parent_model = MBartForConditionalGeneration(config)
parent_model.cuda(gpu)
parent_model.train() ## We do this to enable dropout but we wont have an optimizer for this so we wont train this model. For now. Future implementations should ask if we want to do co-distill or not. By co-distillation I mean, the parent will learn together with the child.
parent_model = DistributedDataParallel(parent_model, device_ids=[gpu], output_device=gpu)
print("Loading a parent model from which distillation will be done.")
dist.barrier()
# configure map_location properly
map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu}
if not args.use_official_parent_pretrained:
parent_checkpoint_dict = torch.load(args.parent_pretrained_model, map_location=map_location)
if type(parent_checkpoint_dict) == dict:
parent_model.load_state_dict(parent_checkpoint_dict['model']) # We never do any remapping of the parent. We always reuse it as it is.
else:
parent_model.module.load_state_dict(parent_checkpoint_dict) # We never do any remapping of the parent. We always reuse it as it is.
parent_model.train()
torch.cuda.set_device(gpu) ## Set the device to the current GPU. This is different from the rank so keep this in mind.
if args.freeze_embeddings: ## If we wish to freeze the model embeddings. This may be useful when fine-tuning a pretrained model.
print("Freezing embeddings")
freeze_embeds(model)
if args.freeze_encoder: ## If we wish to freeze the encoder itself. This may be useful when fine-tuning a pretrained model.
print("Freezing encoder")
freeze_params(model.get_encoder())
assert_all_frozen(model.get_encoder())
model.cuda(gpu) ## Move the model to the GPU.
model = DistributedDataParallel(model, device_ids=[gpu], output_device=gpu) ## This wrapper around the model will enable distributed training.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
] ## We suppose that weight decay will be used except for biases and layer norm weights.
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-09) ## Our glorious optimizer.
model.train()
scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, args.num_batches) ## A warmup and decay scheduler. We use the linear scheduler for now. TODO: Enable other schedulers with a flag.
while scheduler.get_lr()[0] < 1e-7: ## We want to keep a minimum learning rate else for the initial batch or initial few batches barely anything will be learned which is a waste of computation. This minimum value is kept to 1e-7 by default in accordance with previous literature, other implementations and the Paris peace accords.
scheduler.step()
print("Initial LR is:", scheduler.get_lr()[0])
if args.pretrained_model != "" and not args.use_official_pretrained: ## Here we load a pretrained NMT model or a previous checkpoint in case training crashed.
print("Loading from checkpoint. Strict loading by default but if there are missing or non matching keys, they will be ignored when layer remapping or component selection is done.")
dist.barrier()
# configure map_location properly
map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu}
checkpoint_dict = torch.load(args.pretrained_model, map_location=map_location)
if type(checkpoint_dict) == dict:
model.load_state_dict(remap_embeddings_eliminate_components_and_eliminate_mismatches(model.state_dict(), remap_layers(checkpoint_dict['model'], 4, args), args), strict=True if (args.remap_encoder == "" and args.remap_decoder == "" and not args.eliminate_encoder_before_initialization and not args.eliminate_decoder_before_initialization and not args.eliminate_embeddings_before_initialization) else False)
if not args.no_reload_optimizer_ctr_and_scheduler and args.remap_encoder is '' and args.remap_decoder is '' and not args.eliminate_encoder_before_initialization and not args.eliminate_decoder_before_initialization and not args.eliminate_embeddings_before_initialization: ## Do not load optimizers, ctr and schedulers when remapping or resuming training.
if 'optimizer' in checkpoint_dict:
print("Reloading optimizer")
optimizer.load_state_dict(checkpoint_dict['optimizer']) ## Dubious
if 'scheduler' in checkpoint_dict:
print("Reloading scheduler")
scheduler.load_state_dict(checkpoint_dict['scheduler']) ## Dubious
if 'ctr' in checkpoint_dict:
print("Reloading ctr. This means we resume training.")
ctr = checkpoint_dict['ctr']
else:
ctr = 0
else:
model.module.load_state_dict(remap_embeddings_eliminate_components_and_eliminate_mismatches(model.state_dict(), remap_layers(checkpoint_dict, 3, args), args), strict=True if (args.remap_encoder == "" and args.remap_decoder == "" and not args.eliminate_encoder_before_initialization and not args.eliminate_decoder_before_initialization and not args.eliminate_embeddings_before_initialization) else False)
ctr = 0
else:
print("Training from scratch")
CHECKPOINT_PATH = args.model_path
if rank == 0:
checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': 0}
torch.save(checkpoint_dict, CHECKPOINT_PATH) ## Save a model by default every eval_every steps. This model will be saved with the same file name each time.
torch.save(model.state_dict(), CHECKPOINT_PATH+".pure_model")
dist.barrier()
map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu}
checkpoint_dict = torch.load(CHECKPOINT_PATH, map_location=map_location)
model.load_state_dict(checkpoint_dict['model'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
scheduler.load_state_dict(checkpoint_dict['scheduler'])
ctr = checkpoint_dict['ctr']
model.train()
print("Using label smoothing of", args.label_smoothing)
print("Using gradient clipping norm of", args.max_gradient_clip_value)
print("Using softmax temperature of", args.softmax_temperature)
if args.max_ent_weight != -1:
print("Doing entropy maximization during loss computation.")
if args.multistep_optimizer_steps > 1:
print("Using a multistep optimizer where gradients will be accumulated over", args.multistep_optimizer_steps, "batches.")
num_batches_this_optimizer_step = 0
losses = 0
global_sbleu_history = [] ## To save the global evaluation metric history.
max_global_sbleu = 0 ## Maximum global evaluation metric score.
max_global_sbleu_step = 0 ## Step at which we achieved the maximum global evaluation metric score.
individual_sbleu_history = {dev_pair: [] for dev_pair in dev_files} ## For multilingual NMT settings we suppose that we will keep a track of the histories for individual language pairs being evaluated and this dictionary keeps track of the history.
max_individual_sbleu = {dev_pair: 0 for dev_pair in dev_files} ## The maximum score per pair.
max_individual_sbleu_step = {dev_pair: 0 for dev_pair in dev_files} ## The step at which maximum score was achieved per pair.
curr_eval_step = 0
annealing_attempt = 0 ## We use this to limit the number of times annealing will take place. When we anneal the LR is divided by a factor. How this is achieved will be explained below.
inps = {dev_pair: [inpline.strip() for inpline in open(dev_files[dev_pair][0])][:args.max_eval_batches*args.dev_batch_size] for dev_pair in dev_files} ## Get all inputs for each pair. Select up to args.max_eval_batches*args.dev_batch_size examples.
if args.is_summarization: ## Slight data structure difference for summarization vs translation when computing the evaluation metric. For summarization the metric is Rouge.
refs = {dev_pair: [[refline.strip() for refline in open(dev_files[dev_pair][1])][:args.max_eval_batches*args.dev_batch_size]] for dev_pair in dev_files} ## Get all references for each input. Select up to args.max_eval_batches*args.dev_batch_size examples.
scores = {dev_pair: 0 for dev_pair in dev_files} ## The rouge scorer works at the sentence level so we have to add all individual scores per sentence and this dictionary keeps track of the score. This dictionary may not be needed.
else:
refs = {dev_pair: [[refline.strip() for refline in open(dev_files[dev_pair][1])][:args.max_eval_batches*args.dev_batch_size]] for dev_pair in dev_files} ## Get all references for each input. Select up to args.max_eval_batches*args.dev_batch_size examples.
for input_ids, input_masks, decoder_input_ids, labels in generate_batches_bilingual(tok, args, train_files, rank): #Batches are generated from here. The argument (0.30, 0.40) is a range which indicates the percentage of the source sentence to be masked in case we want masking during training just like we did during BART pretraining. The argument 3.5 is the lambda to the poisson length sampler which indicates the average length of a word sequence that will be masked.
start = time.time()
if ctr % args.eval_every == 0 and num_batches_this_optimizer_step == 0: ## We have to evaluate our model every eval_every steps.
CHECKPOINT_PATH = args.model_path
if rank == 0: ## Evaluation will be done only on the prime/master process which is at rank 0. Other processes will sleep.
if not args.no_eval: ## If we dont care about early stopping and only on training for a bazillion batches then you can save time by skipping evaluation.
print("Running eval on dev set(s)")
if args.mixed_wait_k:
model.module.config.wait_k = args.wait_k
hyp = {dev_pair: [] for dev_pair in dev_files}
sbleus = {}
model.eval() ## We go to eval mode so that there will be no dropout.
checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': ctr} ## This training state will be saved.
for dev_pair in dev_files: ## For each evaluation pair we will decode and compute scores.
slangtlang =dev_pair.strip().split("-")
if args.multi_source: ## In case we do multisource NMT
slang=slangtlang[0]+"-"+slangtlang[1] ## This will be split in the generate_batches_eval function as we expect a triplet.
tlang=slangtlang[2]
else:
slang=slangtlang[0]
tlang=slangtlang[1]
eval_batch_counter = 0
for dev_input_ids, dev_input_masks in generate_batches_eval_bilingual(tok, args, inps[dev_pair], slang):
if args.multi_source:
dev_input_ids_parent = dev_input_ids[1]
dev_input_ids = dev_input_ids[0]
dev_input_masks_parent = dev_input_masks[1]
dev_input_masks = dev_input_masks[0]
dev_input_ids_parent = dev_input_ids_parent.to(gpu) ## Move to GPU.
dev_input_masks_parent = dev_input_masks_parent.to(gpu) ## Move to GPU.
start = time.time()
dev_input_ids = dev_input_ids.to(gpu) ## Move to GPU.
dev_input_masks = dev_input_masks.to(gpu) ## Move to GPU.
if args.is_summarization: ## Things can be slow so best show progress
print("Decoding batch from a pool of", len(inps[dev_pair]), "examples")
with torch.no_grad(): ## torch.no_grad is apparently known to prevent the code from allocating memory for gradient computation in addition to making things faster. I have not verified this but have kept it as a safety measure to ensure that my model is not being directly tuned on the development set.
translations = model.module.generate(dev_input_ids, use_cache=True, num_beams=1, max_length=int((len(dev_input_ids[0])*args.max_decode_length_multiplier) if args.max_decode_length_multiplier > 0 else -args.max_decode_length_multiplier), min_length=int((len(dev_input_ids[0])*args.min_decode_length_multiplier) if args.min_decode_length_multiplier > 0 else -args.min_decode_length_multiplier), early_stopping=True, attention_mask=dev_input_masks, pad_token_id=tok.pad_token_id, eos_token_id=tok(["</s>"], add_special_tokens=False).input_ids[0][0], decoder_start_token_id=tok([tlang if args.use_official_pretrained else "<2"+tlang+">"], add_special_tokens=False).input_ids[0][0], bos_token_id=tok(["<s>"], add_special_tokens=False).input_ids[0][0], length_penalty=args.length_penalty, repetition_penalty=args.repetition_penalty, encoder_no_repeat_ngram_size=args.encoder_no_repeat_ngram_size, no_repeat_ngram_size=args.no_repeat_ngram_size, additional_input_ids=dev_input_ids_parent if args.multi_source else None, additional_input_ids_mask=dev_input_masks_parent if args.multi_source else None) ## We translate the batch.
dev_input_ids = dev_input_ids.to('cpu') ## Move to cpu. Not needed but its a safe step.
dev_input_masks = dev_input_masks.to('cpu') ## Move to cpu. Not needed but its a safe step.
translations=translations.to('cpu') ## Move to cpu. Not needed but its a safe step.
if args.multi_source:
dev_input_ids_parent = dev_input_ids_parent.to('cpu') ## Move to cpu. Not needed but its a safe step.
dev_input_masks_parent = dev_input_masks_parent.to('cpu') ## Move to cpu. Not needed but its a safe step.
for translation in translations:
translation = tok.decode(translation, skip_special_tokens=args.no_skip_special_tokens, clean_up_tokenization_spaces=False) ### Get the raw sentences.
hyp[dev_pair].append(translation)
if args.use_rouge: ## Get the evaluation metric score.
for curr_ref, curr_pred in zip(refs[dev_pair][0], hyp[dev_pair]):
score = scorer.score(curr_ref, curr_pred)
scores[dev_pair] += score['rougeL'].fmeasure
sbleu = scores[dev_pair]/len(hyp[dev_pair])
metric = 'Rouge'
else:
sbleu = get_sacrebleu(refs[dev_pair], hyp[dev_pair])
metric = 'BLEU'
individual_sbleu_history[dev_pair].append([sbleu, ctr]) ## Update the score history for this pair.
sbleus[dev_pair] = sbleu
print(metric, "score using sacrebleu after", ctr, "iterations is", sbleu, "for language pair", dev_pair)
writer.add_scalar(dev_pair+" bleu/rouge", sbleu, ctr)
if sbleu > max_individual_sbleu[dev_pair]: ## Update the best score and step number. If the score has improved then save a model copy for this pair. Although we will stop on the global score (average across scores over all pairs) we save these models if we want a model that performs the best on a single pair.
max_individual_sbleu[dev_pair] = sbleu
max_individual_sbleu_step[dev_pair] = curr_eval_step
print("New peak reached for", dev_pair,". Saving.")
torch.save(checkpoint_dict, CHECKPOINT_PATH+".best_dev_bleu."+dev_pair+"."+str(ctr))
torch.save(model.module.state_dict(), CHECKPOINT_PATH+".best_dev_bleu."+dev_pair+"."+str(ctr)+".pure_model") ## Pure model without any ddp markers or optimizer info.
## Global stats
sbleu = sum(sbleus.values())/len(sbleus) ## The global score.
global_sbleu_history.append([sbleu, ctr]) ## Update the global score history.
print("Global", metric, "score using sacrebleu after", ctr, "iterations is:", sbleu)
writer.add_scalar("global bleu/rouge", sbleu, ctr)
if sbleu > max_global_sbleu: ## Update the best score and step number. If this has improved then save a copy for the model. Note that this model MAY NOT be the model that gives the best performance for all pairs.
max_global_sbleu = sbleu
max_global_sbleu_step = curr_eval_step
print("New peak reached. Saving.")
torch.save(checkpoint_dict, CHECKPOINT_PATH+".best_dev_bleu.global."+str(ctr))
torch.save(model.module.state_dict(), CHECKPOINT_PATH+".best_dev_bleu.global."+str(ctr)+".pure_model") ## Pure model without any ddp markers or optimizer info.
if curr_eval_step - max_global_sbleu_step > (args.early_stop_checkpoints + annealing_attempt*args.additional_early_stop_checkpoints_per_anneal_step): ## If the global scores have not improved for more than early_stop_checkpoints + some additional checkpoints to wait for till annealing is done then we stop training.
if annealing_attempt < args.max_annealing_attempts: ## We will only downscale the LR a fixed number of times. Each time we downscale the number of checkpoints to wait for declaring convergence will increase by a fixed value.
annealing_attempt += 1
curr_lr = scheduler.get_lr()[0]
print("LR before annealing is:", curr_lr)
while scheduler.get_lr()[0] > (curr_lr/args.learning_rate_scaling): ## Currently we down scale the LR by advancing the scheduler by some steps. Now this is a bad idea because the scheduler may reach maximum number of steps where the LR is 0. However the training loop will continue and nothing will be updated. The loophole I have used is to set the maximum number of steps to a large value. Thus far I have not seen a case where this has a bad effect but users who do not trust this part of the code should not use annealing.
scheduler.step()
print("LR after annealing is:", scheduler.get_lr()[0])
else: ## Convergence has been reached and we stop and report the final metrics.
print("We have seemingly converged as", metric, "failed to increase for the following number of checkpoints:", args.early_stop_checkpoints+annealing_attempt*args.additional_early_stop_checkpoints_per_anneal_step, ". You may want to consider increasing the number of tolerance steps, doing additional annealing or having a lower peak learning rate or something else.")
print("Terminating training")
print("Global dev", metric, "history:", global_sbleu_history)
print("Individual", metric, "history:", individual_sbleu_history )
quit_condition[0] = -1 ## Since this is a shared variable it will be updated for all processes.
curr_eval_step += 1
model.train() ## Put the model back in training mode where dropout will be done.
else: ## If no evaluation will be done then I consider it prudent to save the model every 10000 checkpoints by default. Change this to whatever value you want.
if ctr % args.no_eval_save_every == 0:
print("No evaluation based early stopping so saving every", args.no_eval_save_every, "checkpoints.")
checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': ctr}
torch.save(checkpoint_dict, CHECKPOINT_PATH+"."+str(ctr))
torch.save(model.state_dict(), CHECKPOINT_PATH+"."+str(ctr)+".pure_model")
print("Saving the model")
sys.stdout.flush()
# All processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes.
# Therefore, saving it in one process is sufficient.
checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': ctr}
torch.save(checkpoint_dict, CHECKPOINT_PATH) ## Save a model by default every eval_every steps. This model will be saved with the same file name each time.
torch.save(model.state_dict(), CHECKPOINT_PATH+".pure_model")
# Use a barrier() to make sure that process 1 loads the model after process
# 0 saves it.
dist.barrier()
if quit_condition[0].cpu().numpy() == -1: ## All processes will see the same value which is always updated by rank 0 processes.
break ## Everyone quits.
# configure map_location properly
print("Loading from checkpoint")
sys.stdout.flush()
map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu}
checkpoint_dict = torch.load(CHECKPOINT_PATH, map_location=map_location)
model.load_state_dict(checkpoint_dict['model'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
scheduler.load_state_dict(checkpoint_dict['scheduler'])
dist.barrier()
if args.cross_distillation or args.multi_source: ## The returned input ids and input masks are actually a list of two items each. The first item is to be fed to the parent model and the second item is to be fed to the child model.
input_ids_parent=input_ids[1]
input_ids=input_ids[0]
input_ids_parent = input_ids_parent.to(gpu) ## Move to gpu
input_masks_parent=input_masks[1]
input_masks=input_masks[0]
input_masks_parent = input_masks_parent.to(gpu) ## Move to gpu
if args.num_domains_for_domain_classifier > 1: ## The label will contain the label as well as the domain indicator
domain_classifier_labels=labels[1] ## This is not a tensor yet
domain_classifier_labels = torch.tensor(domain_classifier_labels, dtype=torch.int64).to(gpu) ## Move to gpu
labels=labels[0]
label_mask = labels.eq(tok.pad_token_id).unsqueeze(-1).to(gpu)
input_ids=input_ids.to(gpu) ## Move to gpu
input_masks=input_masks.to(gpu) ## Move to gpu
decoder_input_ids=decoder_input_ids.to(gpu) ## Move to gpu
labels=labels.to(gpu) ## Move to gpu
optimizer.zero_grad() ## Empty the gradients before any computation.
if rank == 0:
writer.add_scalar("learning rate", scheduler.get_lr()[0], ctr)
if args.mixed_wait_k:
model.module.config.wait_k = random.randint(1, args.wait_k)
if rank == 0:
writer.add_scalar("mixed wait k value", model.module.config.wait_k, ctr)
if args.fp16: ## The difference between AMP and FP32 is the use of the autocast. The code below is duplicated and can be shrunk. TODO.
with torch.cuda.amp.autocast():
mod_compute = model(input_ids=input_ids, attention_mask=input_masks ,decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation, additional_input_ids=input_ids_parent if args.multi_source else None, additional_input_ids_mask=input_masks_parent if args.multi_source else None, label_mask=label_mask if args.num_domains_for_domain_classifier > 1 else None) ## Run the model and get logits.
logits = mod_compute.logits
lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## Softmax tempering of logits if needed.
loss = label_smoothed_nll_loss(
lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id
) ## Label smoothed cross entropy loss.
loss = loss*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1.
if rank == 0:
writer.add_scalar("pure cross entropy loss", loss.detach().cpu().numpy(), ctr)
if args.temperature_calibration:
loss = loss*mod_compute.softmax_temperature
if rank == 0:
writer.add_scalar("calibrated temperature", mod_compute.softmax_temperature.detach().cpu().numpy(), ctr)
writer.add_scalar("calibrated temperature loss", loss.detach().cpu().numpy(), ctr)
if args.num_domains_for_domain_classifier > 1: ## We augment the main loss with the domain classifier loss
domain_classifier_logits = mod_compute.domain_classifier_logits
domain_classifier_lprobs = torch.nn.functional.log_softmax(domain_classifier_logits, dim=-1) ## Softmax tempering of logits if needed.
domain_classifier_loss = label_smoothed_nll_loss(
domain_classifier_lprobs.view(-1,args.num_domains_for_domain_classifier), domain_classifier_labels.view(-1,1), args.label_smoothing
) ## Label smoothed cross entropy loss. We are not going to do any temperature related stuff to this.
loss = domain_classifier_loss*args.domain_classifier_loss_weight + loss * (1.0-args.domain_classifier_loss_weight)
if rank == 0:
writer.add_scalar("domain classifier loss", domain_classifier_loss.detach().cpu().numpy(), ctr)
writer.add_scalar("loss with domain classifier loss", loss.detach().cpu().numpy(), ctr)
## We will do multilayer softmaxing without any consideration for entropy maximization or distillation.
if mod_compute.additional_lm_logits is not None:
for additional_logits in mod_compute.additional_lm_logits:
lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## Softmax tempering of logits if needed.
loss_extra = label_smoothed_nll_loss(
lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id
) ## Label smoothed cross entropy loss.
loss_extra = loss_extra*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1. TODO: Perhaps log this too.
if args.temperature_calibration:
loss_extra = loss_extra*mod_compute.softmax_temperature
loss += loss_extra ## Up scale loss in case of non unitary temperatures. TODO: Perhaps log this too.
if args.max_ent_weight != -1: ## This deals with softmax entropy maximization. The logic is that we compute the softmax entropy of the predictions via -(P(Y/X)*log(P(Y/X))). We then add it to the cross entropy loss with a negative sign as we wish to maximize entropy. This should penalize overconfident predictions.
assert (args.max_ent_weight >= 0 and args.max_ent_weight <= 1)
logits = logits*args.softmax_temperature ## We have to undo the tempered logits else our entropy estimate will be wrong.
if args.temperature_calibration:
logits = logits*mod_compute.softmax_temperature
lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## No tempering here
entropy = -(torch.exp(lprobs)*lprobs).mean()
if rank == 0:
writer.add_scalar("softmax entropy", entropy.detach().cpu().numpy(), ctr)
if mod_compute.additional_lm_logits is not None:
for additional_logits in mod_compute.additional_lm_logits: ## Compute entropy for each layer as well
additional_logits = additional_logits*args.softmax_temperature ## We have to undo the tempered logits else our entropy estimate will be wrong.
if args.temperature_calibration:
additional_logits = additional_logits*mod_compute.softmax_temperature
lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## No tempering here
entropy_extra = -(torch.exp(lprobs)*lprobs).mean()
entropy += entropy_extra
loss = loss*(1-args.max_ent_weight) - entropy*args.max_ent_weight ## Maximize the entropy so a minus is needed. Weigh and add losses as required.
if rank == 0:
writer.add_scalar("loss with entropy loss", loss.detach().cpu().numpy(), ctr)
if args.distillation: ## Time to distill.
if args.cross_distillation: ## The input ids and masks should be replaced with those appropriate for the parent.
input_ids = input_ids_parent
input_masks = input_masks_parent
with torch.no_grad(): ## No gradient to avoid memory allocation.
parent_mod_compute = parent_model(input_ids=input_ids, attention_mask=input_masks ,decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation) ## Get the parent model's computations.
distillation_loss = compute_distillation_losses(mod_compute, parent_mod_compute, labels, tok.pad_token_id, args) ## Compute distillation losses.
loss = args.distillation_loss_weight*distillation_loss + (1.0 - args.distillation_loss_weight)*loss ## Update the main loss with weighing and adding.
if rank == 0:
writer.add_scalar("distillation loss", distillation_loss.detach().cpu().numpy(), ctr)
writer.add_scalar("final loss", loss.detach().cpu().numpy(), ctr)
else:
mod_compute = model(input_ids=input_ids, attention_mask=input_masks, decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation, additional_input_ids=input_ids_parent if args.multi_source else None, additional_input_ids_mask=input_masks_parent if args.multi_source else None, label_mask=label_mask if args.num_domains_for_domain_classifier > 1 else None) ## Run the model and get logits.
logits = mod_compute.logits
lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## Softmax tempering of logits if needed.
loss = label_smoothed_nll_loss(
lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id
) ## Label smoothed cross entropy loss.
loss = loss*args.softmax_temperature ## Up scale loss in case of non unitary temperatures.
if rank == 0:
writer.add_scalar("pure cross entropy loss", loss.detach().cpu().numpy(), ctr)
if args.temperature_calibration:
loss = loss*mod_compute.softmax_temperature
if rank == 0:
writer.add_scalar("calibrated temperature", mod_compute.softmax_temperature.detach().cpu().numpy(), ctr)
writer.add_scalar("calibrated temperature loss", loss.detach().cpu().numpy(), ctr)
if args.num_domains_for_domain_classifier > 1: ## We augment the main loss with the domain classifier loss
domain_classifier_logits = mod_compute.domain_classifier_logits
domain_classifier_lprobs = torch.nn.functional.log_softmax(domain_classifier_logits, dim=-1) ## Softmax tempering of logits if needed.
domain_classifier_loss = label_smoothed_nll_loss(
domain_classifier_lprobs.view(-1,args.num_domains_for_domain_classifier), domain_classifier_labels.view(-1,1), args.label_smoothing
) ## Label smoothed cross entropy loss. We are not going to do any temperature related stuff to this.
loss = domain_classifier_loss*args.domain_classifier_loss_weight + loss * (1.0-args.domain_classifier_loss_weight)
if rank == 0:
writer.add_scalar("domain classifier loss", domain_classifier_loss.detach().cpu().numpy(), ctr)
writer.add_scalar("loss with domain classifier loss", loss.detach().cpu().numpy(), ctr)
## We will do multilayer softmaxing without any consideration for distillation or domain classification.
if mod_compute.additional_lm_logits is not None:
for additional_logits in mod_compute.additional_lm_logits:
lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## Softmax tempering of logits if needed.
loss_extra = label_smoothed_nll_loss(
lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id
) ## Label smoothed cross entropy loss.
loss_extra = loss_extra*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1. TODO: Perhaps log this too.
if args.temperature_calibration:
loss_extra = loss_extra*mod_compute.softmax_temperature
loss += loss_extra ## Up scale loss in case of non unitary temperatures. TODO: Perhaps log this too.
if args.max_ent_weight != -1: ## This deals with softmax entropy maximization. The logic is that we compute the softmax entropy of the predictions via -(P(Y/X)*log(P(Y/X))). We then add it to the cross entropy loss with a negative sign as we wish to maximize entropy. This should penalize overconfident predictions.
assert (args.max_ent_weight >= 0 and args.max_ent_weight <= 1)
logits = logits*args.softmax_temperature ## We have to undo the tempered logits else our entropy estimate will be wrong.
if args.temperature_calibration:
logits = logits*mod_compute.softmax_temperature
lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## No tempering here
entropy = -(torch.exp(lprobs)*lprobs).mean()
if rank == 0:
writer.add_scalar("softmax entropy", entropy.detach().cpu().numpy(), ctr)
if mod_compute.additional_lm_logits is not None:
for additional_logits in mod_compute.additional_lm_logits: ## Compute entropy for each layer as well
additional_logits = additional_logits*args.softmax_temperature ## We have to undo the tempered logits else our entropy estimate will be wrong.
if args.temperature_calibration:
additional_logits = additional_logits*mod_compute.softmax_temperature
lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## No tempering here
entropy_extra = -(torch.exp(lprobs)*lprobs).mean()
entropy += entropy_extra
loss = loss*(1-args.max_ent_weight) - entropy*args.max_ent_weight ## Maximize the entropy so a minus is needed. Weigh and add losses as required.
if rank == 0:
writer.add_scalar("loss with entropy loss", loss.detach().cpu().numpy(), ctr)
if args.distillation: ## Time to distill.
if args.cross_distillation: ## The input ids and masks should be replaced with those appropriate for the parent.
input_ids = input_ids_parent
input_masks = input_masks_parent
with torch.no_grad(): ## No gradient to avoid memory allocation.
parent_mod_compute = parent_model(input_ids=input_ids, attention_mask=input_masks ,decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation) ## Get the parent model's computations.
distillation_loss = compute_distillation_losses(mod_compute, parent_mod_compute, labels, tok.pad_token_id, args) ## Compute distillation losses.
loss = args.distillation_loss_weight*distillation_loss + (1.0 - args.distillation_loss_weight)*loss ## Update the main loss with weighing and adding.
if rank == 0:
writer.add_scalar("distillation loss", distillation_loss.detach().cpu().numpy(), ctr)
writer.add_scalar("final loss", loss.detach().cpu().numpy(), ctr)
input_ids=input_ids.to('cpu') ## Move to CPU. May not be needed but its a safety net.
input_masks=input_masks.to('cpu') ## Move to CPU. May not be needed but its a safety net.
decoder_input_ids=decoder_input_ids.to('cpu') ## Move to CPU. May not be needed but its a safety net.
labels=labels.to('cpu') ## Move to CPU. May not be needed but its a safety net.
if args.cross_distillation or args.multi_source:
input_ids_parent=input_ids_parent.to('cpu') ## Move to CPU. May not be needed but its a safety net.
input_masks_parent=input_masks_parent.to('cpu') ## Move to CPU. May not be needed but its a safety net.
if args.fp16: ## The gradient scaler needs to be invoked with FP16/AMP computation.
loss = loss/args.multistep_optimizer_steps
scaler.scale(loss).backward()
num_batches_this_optimizer_step += 1
losses += loss
if num_batches_this_optimizer_step < args.multistep_optimizer_steps:
continue
else:
pass
if args.fp16: ## With FP16/AMP computation we need to unscale gradients before clipping them. We then optimize and update the scaler.
if args.max_gradient_clip_value != 0.0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_gradient_clip_value)
scaler.step(optimizer)
scaler.update()
else: ## With FP32, we just do regular backpropagation, gradient clipping and then step the optimizer.
loss = loss/args.multistep_optimizer_steps
loss.backward()
num_batches_this_optimizer_step += 1
losses += loss
if num_batches_this_optimizer_step < args.multistep_optimizer_steps:
continue
if args.max_gradient_clip_value != 0.0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_gradient_clip_value)
optimizer.step()
scheduler.step() ## Advance the scheduler to get to the next value of LR.
lv = losses.detach().cpu().numpy() ## Detach the loss in order to report it.
losses = 0
num_batches_this_optimizer_step = 0
if ctr % 10 == 0 and rank % 8 == 0: ## Print the current loss every 10 batches but only for the master/prime process.
print(ctr, lv)
sys.stdout.flush()
if ctr % args.eval_every == 0 and rank == 0 and args.save_weights_and_gradeint_info: ## Save the model weight and gradient info every time this condition is triggered.
for param_name, param_value in model.named_parameters():
if not ("embed_positions" in param_name and args.positional_encodings):
writer.add_histogram("weights."+param_name, param_value.detach().cpu().numpy(), ctr)
writer.add_histogram("gradients."+param_name, param_value.grad.detach().cpu().numpy(), ctr)
end = time.time()
ctr += 1
if rank == 0:
CHECKPOINT_PATH = args.model_path
print("Saving the model after the final step")
# All processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes.
# Therefore, saving it in one process is sufficient.
print("The best bleu was:", max_global_sbleu)
print("The corresponding step was:", max_global_sbleu_step*args.eval_every)
checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': ctr}
torch.save(checkpoint_dict, CHECKPOINT_PATH) ## Save one last time.
torch.save(model.module.state_dict(), CHECKPOINT_PATH+".pure_model") ## Pure model without any ddp markers or optimizer info.
dist.barrier() ## Wait till all processes reach this point so that the prime process saves the final checkpoint.
dist.destroy_process_group() ## Everything that has a beginning has an end, Neo!
def run_demo():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--nodes', default=1,
type=int, metavar='N')
parser.add_argument('-g', '--gpus', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-nr', '--nr', default=0, type=int,
help='ranking within the nodes')
parser.add_argument('-a', '--ipaddr', default='localhost', type=str,
help='IP address of the main node')
parser.add_argument('-p', '--port', default='26023', type=str,
help='Port main node')
parser.add_argument('--freeze_embeddings', action='store_true',
help='Should freeze embeddings during fine tuning?')
parser.add_argument('--freeze_encoder', action='store_true',
help='Should we freeze encoder during fine tuning?')
parser.add_argument('--positional_encodings', action='store_true',
help='If true then we will use positional encodings instead of learned positional embeddings.')
parser.add_argument('--no_embed_norm', action='store_true',
help='If true then we wont normalize embeddings.')
parser.add_argument('--scale_embedding', action='store_true',
help='Should we scale embeddings?')
parser.add_argument('--no_scale_attention_embedding', action='store_true',
help='Should we scale attention embeddings?')
parser.add_argument('--multistep_optimizer_steps', default=1, type=int, help="In case you want to simulate a larger batch you should set this to a higher value.")
parser.add_argument('--encoder_layers', default=6, type=int, help="The value for number of encoder layers")
parser.add_argument('--decoder_layers', default=6, type=int, help="The value for number of decoder layers")
parser.add_argument('--label_smoothing', default=0.1, type=float, help="The value for label smoothing")
parser.add_argument('--weight_decay', default=0.0001, type=float, help="The value for weight decay")
parser.add_argument('--lr', default=7e-4, type=float, help="The value for the learning rate")
parser.add_argument('--layerdrop', default=0.0, type=float, help="The value for layerdrop which indicates the probability that a whole layer will be bypassed via an identity transformation.")
parser.add_argument('--dropout', default=0.1, type=float, help="The value for embedding dropout")
parser.add_argument('--attention_dropout', default=0.1, type=float, help="The value for attention dropout")
parser.add_argument('--activation_dropout', default=0.1, type=float, help="The value for activation dropout")
parser.add_argument('--data_sampling_temperature', default=5.0, type=float, help="The value for the data sampling temperature")
parser.add_argument('--token_masking_lambda', default=3.5, type=float, help="The value for the poisson sampling lambda value")
parser.add_argument('--token_masking_probs_range', nargs='+', type=float, default=[0.3], help="The range of probabilities with which the token will be masked. If you want a fixed probability then specify one argument else specify ONLY 2.")
parser.add_argument('--repetition_penalty', default=1.0, type=float,
help='To prevent repetition during decoding. 1.0 means no repetition. 1.2 was supposed to be a good value for some settings according to some researchers.')
parser.add_argument('--no_repeat_ngram_size', default=0, type=int,
help='N-grams of this size will never be repeated in the decoder. Lets play with 2-grams as default.')
parser.add_argument('--length_penalty', default=1.0, type=float,
help='Set to more than 1.0 for longer sentences.')
parser.add_argument('--no_skip_special_tokens', action='store_false',
help='Should we return outputs without special tokens? We may need this to deal with situations where the user specified control tokens must be in the output.')
parser.add_argument('--encoder_no_repeat_ngram_size', default=0, type=int,
help='N-gram sizes to be prevented from being copied over from encoder. Lets play with 2-grams as default.')
parser.add_argument('--encoder_tying_config', default=None, type=str,
help='What should be the parameter tying configuration? 1-1-1-1-1-1 means 6 layers where all are shared. 1-1-2-2-3-3 means 6 layers, 3 unique layers and each one is recurred twice before passing to another layer. 1-2-3-1-2-3 means 6 layers, 3 unique layers and recurrence is done twice after all layers have been passed through. The default None implies a 1-2-3-4-...-N setup')
parser.add_argument('--decoder_tying_config', default=None, type=str,
help='What should be the parameter tying configuration? 1-1-1-1-1-1 means 6 layers where all are shared. 1-1-2-2-3-3 means 6 layers, 3 unique layers and each one is recurred twice before passing to another layer. 1-2-3-1-2-3 means 6 layers, 3 unique layers and recurrence is done twice after all layers have been passed through. The default None implies a 1-2-3-4-...-N setup')
parser.add_argument('--softmax_temperature', default=1.0, type=float, help="The value for the softmax temperature")
parser.add_argument('--distillation_temperature', default=1.0, type=float, help="The value for the softmax temperature during distillation")
parser.add_argument('--temperature_calibration', action='store_true',
help='Are we calibrating the temperature automatically during training? If yes then the softmax_temperature parameter should have a value of 1.0 furthermore the returned temperature will be used to scale the loss.')
parser.add_argument('--encoder_attention_heads', default=8, type=int, help="The value for number of encoder attention heads")
parser.add_argument('--decoder_attention_heads', default=8, type=int, help="The value for number of decoder attention heads")
parser.add_argument('--wait_k', default=-1, type=int, help="The value for k in wait-k snmt. Keep as -1 for non-snmt aka vanilla NMT.")
parser.add_argument('--mixed_wait_k', action='store_true',
help='Should we train using up to wait_k? This can help simulate multiple wait_k')
parser.add_argument('--additional_source_wait_k', default=-1, type=int, help="The value for k in wait-k snmt. Keep as -1 for non-snmt aka vanilla NMT. This is the wait-k for the additional source language. Can be used for simultaneous mutlisource NMT.")
parser.add_argument('--future_prediction', action='store_true',
help='This assumes that we dont mask token sequences randomly but only after the latter half of the sentence. We do this to make the model more robust towards missing future information. Granted we can achieve this using wait-k but methinks this may be a better way of training.')
parser.add_argument('--unidirectional_encoder', action='store_true',
help='This assumes that we use a unidirectional encoder. This is simulated via a lower-triangular matrix mask in the encoder. Easy peasy lemon squeazy.')
parser.add_argument('--decoder_ffn_dim', default=2048, type=int, help="The value for decoder ff hidden dim")
parser.add_argument('--encoder_ffn_dim', default=2048, type=int, help="The value for encoder ff hidden dim")
parser.add_argument('--d_model', default=512, type=int, help="The value for model hidden size")
parser.add_argument('--eval_every', default=1000, type=int, help="The number of iterations after which an evaluation must be done. Also saves a checkpoint every these number of steps.")
parser.add_argument('--no_eval_save_every', default=10000, type=int, help="The number of iterations after which a model must be force saved in case evaluation is not done.")
parser.add_argument('--max_gradient_clip_value', default=1.0, type=float, help="The max value for gradient norm value")
parser.add_argument('--use_official_pretrained', action='store_true',
help='Use this flag if you want the argument "pretrained_model" to specify a pretrained model created by someone else.')
parser.add_argument('--pretrained_model', default='', type=str,
help='Path to the pretrained model.')
parser.add_argument('--no_reload_optimizer_ctr_and_scheduler', action='store_true',
help='Should we reload the optimizer, counter and secheduler? By default we always reload these. Set this to False if we only want to reload the model params and optimize from scratch.')
parser.add_argument('-m', '--model_path', default='pytorch.bin', type=str,
help='Path to save the fine tuned model')
parser.add_argument('--warmup_steps', default=16000, type=int,
help='Scheduler warmup steps')
parser.add_argument('--batch_size', default=2048, type=int,
help='Train batch sizes in tokens')
parser.add_argument('--batch_size_indicates_lines', action='store_true',
help='Should we batch as a fixed number of lines?')
parser.add_argument('--dev_batch_size', default=1024, type=int,
help='Dev batch sizes in lines')
parser.add_argument('--max_src_length', default=256, type=int,
help='Maximum token length for source language')
parser.add_argument('--max_tgt_length', default=256, type=int,
help='Maximum token length for target language')
parser.add_argument('--early_stop_checkpoints', default=10, type=int,
help='Number of checkpoints to wait to see if BLEU increases.')
parser.add_argument('--learning_rate_scaling', default=2, type=int,
help='How much should the LR be divided by during annealing?. Set num_batches to a larger value or else you will see lr go to zero too soon.')
parser.add_argument('--max_annealing_attempts', default=2, type=int,
help='Number of times LR should be annealed.')
parser.add_argument('--additional_early_stop_checkpoints_per_anneal_step', default=5, type=int,
help='How many additional checkpoints should we wait till declaring convergence? This will be multiplied with the annealing step number.')
parser.add_argument('--num_batches', default=500000, type=int,
help='Number of batches to train on')
parser.add_argument('--max_eval_batches', default=1000, type=int,
help='These many evaluation batches will be considered. Use a small value like 5 to cover a portion of the evaluation data.')
parser.add_argument('--max_decode_length_multiplier', default=2.0, type=float,
help='This multiplied by the source sentence length will be the maximum decoding length. If you want to directly specify a particular value then set this to the negative of that value.')
parser.add_argument('--min_decode_length_multiplier', default=0.1, type=float,
help='This multiplied by the source sentence length will be the minimum decoding length. If you want to directly specify a particular value then set this to the negative of that value.')
parser.add_argument('--tokenizer_name_or_path', default='ai4bharat/indic-bert', type=str,
help='Name of or path to the tokenizer')
parser.add_argument('--pretrained_tokenizer_name_or_path', default=None, type=str,
help='Name of or path to the tokenizer of the pretrained model if its different from the current model. This tokenizer will be used for remapping embeddings so as to reuse as many pretrained embeddings as possible.')
parser.add_argument('--multi_source_method', default=None, type=str,
help='How to merge representations from multiple sources? Should be one of self_relevance_and_merge_after_attention, self_relevance_and_merge_before_attention, merge_after_attention, merge_before_attention. We also need to implement averaging methods such as early averaging (average encoder representations) and late averaging (average softmaxes). Relevance mechanisms should have a separate flag in the future.')
parser.add_argument('--tokenization_sampling', action='store_true',
help='Should we use stoachastic tokenization aka BPE dropout or Subword regularization?')
parser.add_argument('--tokenization_nbest_list_size', type=int, default=64,
help='The size of the nbest list when doing stochastic tokenization.')
parser.add_argument('--tokenization_alpha_or_dropout', type=float, default=0.1,
help='The value of sentence piece regularization amount controlled via alpha or the amount of BPE dropout controlled by dropout.')
parser.add_argument('--train_slang', default='en', type=str,
help='Source language(s) for training. If you want to specify the domain of the language pair then specify it as language-domain (hyphen in the middle) and make sure to set --num_domains_for_domain_classifier to a value > 1. If you want to specify an additional source then you need to do the same thing but note that you can do multi-source domain classification as its just too much.')
parser.add_argument('--train_tlang', default='hi', type=str,
help='Target language(s) for training')
parser.add_argument('--train_src', default='', type=str,
help='Source language training sentences')
parser.add_argument('--train_tgt', default='', type=str,
help='Target language training sentences')
parser.add_argument('--dev_slang', default='en', type=str,
help='Source language(s) for training')
parser.add_argument('--dev_tlang', default='hi', type=str,
help='Target language(s) for training')
parser.add_argument('--dev_src', default='', type=str,
help='Source language(s) development sentences')
parser.add_argument('--dev_tgt', default='', type=str,
help='Target language(s) development sentences')
parser.add_argument('--fp16', action='store_true',
help='Should we use fp16 training?')
parser.add_argument('--no_eval', action='store_true',
help='Should we skip evaluation?')
parser.add_argument('--source_masking_for_bilingual', action='store_true',
help='Should we use masking on source sentences when training on parallel corpora?')
parser.add_argument('--is_summarization', action='store_true',
help='Should we use masking on source sentences when training on parallel corpora?')
parser.add_argument('--hard_truncate_length', default=0, type=int,
help='Should we perform a hard truncation of the batch? This will be needed to eliminate cuda caching errors for when sequence lengths exceed a particular limit. This means self attention matrices will be massive and I used to get errors. Choose this value empirically.')
parser.add_argument('--use_rouge', action='store_true',
help='Should we use ROUGE for evaluation?')
parser.add_argument('--max_ent_weight', type=float, default=-1.0,
help='Should we maximize softmax entropy? If the value is anything between 0 and 1 then yes. If its -1.0 then no maximization will be done.')
parser.add_argument('--num_domains_for_domain_classifier', type=int, default=1,
help='If we have multiple domains then we should set this to a value higher than one.')
parser.add_argument('--gradient_reversal_for_domain_classifier', action='store_true',
help='Should we do gradient reversal for the domain classifier? If true then all gradients below the softmax layer (meaning linear projection plus softmax activation) for the classifier will be reversed. Essentially, the representations for two domains will be forced to become more similar. This may in turn be used for style transfer.')
parser.add_argument('--domain_classifier_loss_weight', type=float, default=0.1,
help='What weight should we give to the domain classifier? 1 minus this weight will be given to the main loss.')
parser.add_argument('--shard_files', action='store_true',
help='Should we shard the training data? Set to true only if the data is not already pre-sharded.')
parser.add_argument('--multi_source', action='store_true',
help='Are we doing multisource NMT? In that case you should specify the train_src as a hyphen separated pair indicating the parent language and the child language. You should also ensure that the source file is a tab separated file where each line contains "the parent pair source sentence[tab]child pair source sentence".')
parser.add_argument('--multilayer_softmaxing', action='store_true',
help='Should we apply a softmax for each decoder layer? Unsupported for distillation. Only for vanilla training.')
parser.add_argument('--remap_encoder', default='', type=str,
help='This indicates the remappings for the layer. Example: 1-2,2-4,3-6. The plan is to use these remappings to cut down the model prior to decoding or training. Suppose we have a 6 layer model but we only want to utilize the 2nd, 4th and 6th layer then we will copy the content of the 2nd, 4th and 6th layers to the 1st, 2nd and 3rd layer and delete the former layers from the parameter dictionary. This counts as layer pruning. IMPORTANT NOTE: Ensure that you specify ALL child layer indices you wish mapped. For example if you want 1-2,2-1,3-3 you MUST NOT skip the 3-3 part else it will be deleted from the model dictionary and will be randomly initialized. The loading mechanism is not strict so it will ignore missing or non matching keys. ADDITIONAL NOTE: Load a checkpoint with only the model and not the optimizer to prevent failure as we are not sure if remapping optimizers and learning rate schedulers make sense or not.')
parser.add_argument('--remap_decoder', default='', type=str,
help='This indicates the remappings for the layer. Example: 1-2,2-4,3-6. The plan is to use these remappings to cut down the model prior to decoding or training. Suppose we have a 6 layer model but we only want to utilize the 2nd, 4th and 6th layer then we will copy the content of the 2nd, 4th and 6th layers to the 1st, 2nd and 3rd layer and delete the former layers from the parameter dictionary. This counts as layer pruning. IMPORTANT NOTE: Ensure that you specify ALL child layer indices you wish mapped. For example if you want 1-2,2-1,3-3 you MUST NOT skip the 3-3 part else it will be deleted from the model dictionary and will be randomly initialized. The loading mechanism is not strict so it will ignore missing or non matching keys. ADDITIONAL NOTE: Load a checkpoint with only the model and not the optimizer to prevent failure as we are not sure if remapping optimizers and learning rate schedulers make sense or not.')
parser.add_argument('--eliminate_encoder_before_initialization', action='store_true',
help='Lets wipe out the encoder params from the pretrained model before we use it to initialize the current model. This means we have random encoder initialization.')
parser.add_argument('--eliminate_decoder_before_initialization', action='store_true',
help='Lets wipe out the decoder params from the pretrained model before we use it to initialize the current model. This means we have random decoder initialization.')
parser.add_argument('--eliminate_embeddings_before_initialization', action='store_true',
help='Lets wipe out the embedding params from the pretrained model before we use it to initialize the current model. This means we have random embedding initialization.')
### Distillation flags
parser.add_argument('--distillation', action='store_true',
help='Should we perform distillation from a parent model? If so then you must specify the model using "parent_pretrained_model". There are several distillation options check the flag called "distillation_styles".')
parser.add_argument('--cross_distillation', action='store_true',
help='Should we perform cross distillation from a parent model which has been trained on another source language but the same target language? If so then you must specify the model using "parent_pretrained_model". Additionally you should specify the train_src as a hyphen separated pair indicating the parent language and the child language. You should also ensure that the source file is a tab separated file where each line contains "the parent pair source sentence[tab]child pair source sentence" There are several distillation options check the flag called "distillation_styles".')
parser.add_argument('--use_official_parent_pretrained', action='store_true',
help='Use this flag if you want the argument "pretrained_model" to specify a pretrained model created by someone else for the purposes of distillation. Use this carefully because if the parent is created by someone else then you have to have your own model with different configurations for fine-tuning. Essentially you must make sure that use_official_parent_pretrained and use_official_pretrained are not true simultaneously.')
parser.add_argument('--parent_pretrained_model', default='', type=str,
help='Path to the parent pretrained model for distillation. The pretrained_model flag will be used to initialize the child model.')
parser.add_argument('--distillation_loss_weight', type=float, default=0.7,
help='All the distillation losses will be averaged and then multiplied by this weight before adding it to the regular xentropy loss which will be weighted by (1- distillation_loss_weight).')
parser.add_argument('--distillation_styles', default='cross_entropy', type=str,
help='One or more of softmax_distillation, attention_distillation, hidden_layer_regression. For attention distillation you must make sure that the number of attention heads between the parent and child are the same and for hidden layer regression you must make sure that the hidden size (d_model) is the same for the parent and child. In both these cases, you should also specify the layer mapping. See the "distillation_layer_mapping" flag.')
parser.add_argument('--distillation_layer_mapping', default='1-1,2-2,3-3,4-4,5-5,6-6', type=str,
help='This indicates the mappings between the parent and child model. The same flag is used for the encoder and the decoder. If you want to map the 2nd parent layer to the first child layer then use 2-1. Note that the layers are not zero indexed as per the description. Ensure that your indices are correct because checking is not done at the moment. If you get weird results then first make sure that your flags are correctly set. If the parent has 6 layers and the child has 3 layers then something like 6-4 will definitely throw an error. User beware! Dokuro mark.')
parser.add_argument('--parent_encoder_layers', default=6, type=int, help="The value for number of encoder layers")
parser.add_argument('--parent_decoder_layers', default=6, type=int, help="The value for number of decoder layers")
parser.add_argument('--parent_dropout', default=0.1, type=float, help="The value for embedding dropout")
parser.add_argument('--parent_attention_dropout', default=0.1, type=float, help="The value for attention dropout")
parser.add_argument('--parent_activation_dropout', default=0.1, type=float, help="The value for activation dropout")
parser.add_argument('--parent_encoder_attention_heads', default=8, type=int, help="The value for number of encoder attention heads")
parser.add_argument('--parent_decoder_attention_heads', default=8, type=int, help="The value for number of decoder attention heads")
parser.add_argument('--parent_decoder_ffn_dim', default=2048, type=int, help="The value for decoder ff hidden dim")
parser.add_argument('--parent_encoder_ffn_dim', default=2048, type=int, help="The value for encoder ff hidden dim")
parser.add_argument('--parent_d_model', default=512, type=int, help="The value for model hidden size")
parser.add_argument('--save_weights_and_gradeint_info', action='store_true',
help='Saving gradient information is time consuming. We should make this optional.')
###
### Placeholder flags to prevent code from breaking. These flags are not intended to be used for fine tuning. These flags are here because the common_utils.py methods assume the existence of these args for when joint mbart training and regular NMT training is done. TODO: Modify code to avoid the need for these flags in this script.
parser.add_argument('--unify_encoder', action='store_true',
help='Should we minimize the encoder representation distances instead of regular cross entropy minimization on the parallel corpus?')
args = parser.parse_args()
assert len(args.token_masking_probs_range) <= 2
print("IP address is", args.ipaddr)
args.world_size = args.gpus * args.nodes #
train_files = {}
slangs = args.train_slang.strip().split(",")
tlangs = args.train_tlang.strip().split(",")
train_srcs = args.train_src.strip().split(",")
train_tgts = args.train_tgt.strip().split(",")
if args.num_domains_for_domain_classifier > 1: ## In case we have to do domain classification
train_domains = args.train_domains.strip().split(",") ## Should not be empty
args.train_domains = {} ## We can index the domain indicator this way
domain_idx = 0
for train_domain in train_domains:
if train_domain not in args.train_domains:
args.train_domains[train_domain] = domain_idx
domain_idx += 1
train_files = {slang+"-"+tlang+"-"+train_domain: (train_src, train_tgt, args.train_domains[train_domain]) for slang, tlang, train_src, train_tgt, train_domain in zip(slangs, tlangs, train_srcs, train_tgts, train_domains)}
else:
train_files = {slang+"-"+tlang: (train_src, train_tgt) for slang, tlang, train_src, train_tgt in zip(slangs, tlangs, train_srcs, train_tgts)}
print("Training files are:", train_files)
dev_files = {}
if not args.no_eval:
slangs = args.dev_slang.strip().split(",")
tlangs = args.dev_tlang.strip().split(",")
dev_srcs = args.dev_src.strip().split(",")
dev_tgts = args.dev_tgt.strip().split(",")
dev_files = {slang+"-"+tlang: (dev_src, dev_tgt) for slang, tlang, dev_src, dev_tgt in zip(slangs, tlangs, dev_srcs, dev_tgts)}
print("Development files are:", dev_files)
os.environ['MASTER_ADDR'] = args.ipaddr #
os.environ['MASTER_PORT'] = args.port #
quit_condition = torch.ones(1) ## Create a variable to hold the quitting condition trigger
quit_condition.share_memory_() ## Share this among all processes
mp.spawn(model_create_load_run_save, nprocs=args.gpus, args=(args,train_files, dev_files, quit_condition)) #
if __name__ == "__main__":
run_demo() | [
"torch.cuda.amp.autocast",
"torch.distributed.destroy_process_group",
"torch.distributed.init_process_group",
"torch.save",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.ones",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.nn.functional.log_softmax",
"torch.tensor",
"torch.load",
"torch.cuda.amp.GradScaler",
"torch.distributed.barrier",
"torch.exp",
"torch.utils.tensorboard.SummaryWriter"
] | 1.7.1 | alvations/yanmtt | 9da45055e95f6e66faa17306ad79630071b84d9e |
1.6 | import torch
#Nota bene: In training, the average of losses in each batch is returned.
#In testing, this is not the case.
def loss_enc(z_code, z_code_hat, test):
if(test):
l_enc = torch.sum((z_code - z_code_hat)**2, dim=(1))
else:
l_enc = torch.sum((z_code - z_code_hat)**2, dim=(1))
return l_enc
#def loss_enc(z_code, z_code_hat, test):
# if test:
# l_enc = torch.sum((z_code - z_code_hat)**2, dim=(1))
# return l_enc
#def loss_rec(x, x_hat, test):
# if test :
# else:
# l_con = torch.sum(torch.abs(x - x_hat), dim=(1, 2, 3))
# return l_con
def loss_rec(x,x_hat,test):
if test == True:
l_con = torch.sum(torch.abs(x - x_hat), dim=(1, 2, 3))
else:
l_con = torch.sum(torch.abs(x - x_hat), dim=(1, 2, 3))
return l_con
def loss_adv(dis_x, dis_x_hat, features_real, features_fake, test):
l_adv = torch.sum((dis_x - dis_x_hat)**2, dim=(1))
for fidx, _ in enumerate(features_real):
feat_dim = len(features_real[fidx].shape)
if(feat_dim == 4):
l_adv += torch.sum((features_real[fidx] - features_fake[fidx])**2, dim=(1, 2, 3))
elif(feat_dim == 3):
l_adv += torch.sum((features_real[fidx] - features_fake[fidx])**2, dim=(1, 2))
elif(feat_dim == 2):
l_adv += torch.sum((features_real[fidx] - features_fake[fidx])**2, dim=(1))
else:
l_adv += torch.sum((features_real[fidx] - features_fake[fidx])**2)
return l_adv
def loss_grad(grad_loss):
l_grad = grad_loss
#for i in range(nlayer):
# wrt = model.module.up[int(2*i)].weight
# target_grad = torch.autograd.grad(recon_loss, wrt, create_graph=True, retain_graph=True)[0]
#
# l_grad += -1 * func.cosine_similarity(target_grad.view(-1, 1),
# ref_grad[i].avg.view(-1, 1), dim=0)
return l_grad
def loss_ganomaly(z_code, z_code_hat, x, x_hat, \
dis_x, dis_x_hat, features_real, features_fake, \
w_grad, w_enc, w_adv, w_con, test):
z_code, z_code_hat, x, x_hat, dis_x, dis_x_hat = \
z_code.cpu(), z_code_hat.cpu(), x.cpu(), x_hat.cpu(), dis_x.cpu(), dis_x_hat.cpu()
for fidx, _ in enumerate(features_real):
features_real[fidx] = features_real[fidx].cpu()
features_fake[fidx] = features_fake[fidx].cpu()
l_enc = loss_enc(z_code, z_code_hat,test)
l_con = loss_rec(x, x_hat,test)
l_adv = loss_adv(dis_x, dis_x_hat, features_real, features_fake,test)
if(test):
l_tot = (w_enc * l_enc) + (w_con * l_con) + (w_adv * l_adv)
else:
l_tot = torch.mean((w_enc * l_enc) + (w_con * l_con) + (w_adv * l_adv))
l_enc = torch.mean(l_enc)
l_con = torch.mean(l_con)
l_adv = torch.mean(l_adv)
return l_tot, l_enc, l_con, l_adv
| [
"torch.abs",
"torch.mean",
"torch.sum"
] | 1.6.0 | ErikBertolino/Anomaly-Detection | edd14ccf3015d3bca48bd55b5b0d4aa98c98ff85 |
1.8 | import sys
import os
import numpy as np
import argparse
import time
import json
from pathlib import Path
import argparse
import os
import shutil
import sys
import numpy as np
import torch
import random
random.seed(42)
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader, TensorDataset
import torch.utils.data.distributed
from sklearn.metrics import roc_auc_score, accuracy_score
import torch.utils.tensorboard as tensorboard
import torchvision.transforms as transforms
from opacus import PrivacyEngine
# from opacus.layers import DifferentiallyPrivateDistributedDataParallel as DPDDP
from opacus.utils import stats
from opacus.utils.uniform_sampler import UniformWithReplacementSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.model_selection import train_test_split
sys.path.append(os.path.abspath('../'))
from art.estimators.classification import PyTorchClassifier
from art.utils import load_mnist
from art.attacks.inference.membership_inference import MembershipInferenceBlackBoxRuleBased, MembershipInferenceBlackBox
from art.utils import load_dataset
def convnet(num_classes):
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, num_classes, bias=True),
)
def accuracy(preds, labels):
return (preds == labels).mean()
def save_checkpoint(state, is_best, filename="checkpoint.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def train(model, train_loader, optimizer, epoch, device):
model.train()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
for i, (images, target) in enumerate(train_loader):
images = images.to(device)
target = target.to(device)
# compute output
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
# measure accuracy and record loss
acc1 = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc1)
stats.update(stats.StatType.TRAIN, acc1=acc1)
# compute gradient and do SGD step
loss.backward()
# make sure we take a step after processing the last mini-batch in the
# epoch to ensure we start the next epoch with a clean state
if ((i + 1) % n_accumulation_steps == 0) or ((i + 1) == len(train_loader)):
optimizer.step()
optimizer.zero_grad()
else:
optimizer.virtual_step()
if i % print_freq == 0:
if not args.disable_dp:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(
_delta
)
print(
f"\tTrain Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"Acc@1: {np.mean(top1_acc):.6f} "
f"(ε = {epsilon:.2f}, δ = {_delta}) for α = {best_alpha}"
)
else:
print(
f"\tTrain Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"Acc@1: {np.mean(top1_acc):.6f} "
)
def test(model, test_loader, device):
model.eval()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
with torch.no_grad():
for images, target in test_loader:
images = images.to(device)
target = target.to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc1 = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc1)
top1_avg = np.mean(top1_acc)
stats.update(stats.StatType.TEST, acc1=top1_avg)
print(f"\tTest set:" f"Loss: {np.mean(losses):.6f} " f"Acc@1: {top1_avg :.6f} ")
return np.mean(top1_acc)
def calc_precision_recall(predicted, actual, positive_value=1):
score = 0 # both predicted and actual are positive
num_positive_predicted = 0 # predicted positive
num_positive_actual = 0 # actual positive
for i in range(len(predicted)):
if predicted[i] == positive_value:
num_positive_predicted += 1
if actual[i] == positive_value:
num_positive_actual += 1
if predicted[i] == actual[i]:
if predicted[i] == positive_value:
score += 1
if num_positive_predicted == 0:
precision = 1
else:
precision = score / num_positive_predicted # the fraction of predicted “Yes” responses that are correct
if num_positive_actual == 0:
recall = 1
else:
recall = score / num_positive_actual # the fraction of “Yes” responses that are predicted correctly
return precision, recall
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Membership Inference Attack on Resampling')
parser.add_argument('--dataset', default='cifar', help='dataset to test')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--epochs', type=int, default=60, help='num epochs')
parser.add_argument('--noise_multiplier', type=float, default=1.3, help='noise multiplier')
parser.add_argument('--max_grad_norm', type=float, default=10.0, help='max grad norm')
parser.add_argument('--lr', type=float, default=1, help='learning rate')
parser.add_argument('--delta', type=float, default=0.00002, help='delta')
parser.add_argument('--disable_dp', action='store_true', default=False, help='train non-private model')
parser.add_argument('--load_model', action='store_true', default=False, help='use pre trained model')
parser.add_argument('--perform_aug', action='store_true', default=False, help='perform data augmentation')
parser.add_argument('--sampling_type', default='none', help='over, under or smote sampling')
parser.add_argument('--attack_model', default='rf', help='attack model type -- rf, nn')
parser.add_argument('--sampling_ratio', type=float, default=0.5, help='sampling ratio')
args = parser.parse_args()
print(vars(args))
device = torch.device('cpu')
start = time.time()
epsilon = -1
# hparams
_sample_rate=0.04
batch_size_test=256
workers=2
wd=0
_weight_decay=0
_momentum=0.9
na=1
n_accumulation_steps=1
local_rank=-1
lr_schedule='cos'
_optim='SGD'
log_dir=""
data_root='../cifar10'
checkpoint_file='checkpoint'
_delta=1e-5
_secure_rng=False
resume=""
print_freq=10
# Load data
generator=None
augmentations = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
]
normalize = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
if args.perform_aug:
train_transform = transforms.Compose(
augmentations + normalize
# augmentations + normalize if disable_dp else normalize
)
else:
train_transform = transforms.Compose(normalize)
test_transform = transforms.Compose(normalize)
train_dataset = CIFAR10(
root=data_root, train=True, download=True, transform=train_transform
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
num_workers=workers,
generator=generator,
batch_sampler=UniformWithReplacementSampler(
num_samples=len(train_dataset),
sample_rate=_sample_rate,
generator=generator,
),
)
test_dataset = CIFAR10(
root=data_root, train=False, download=True, transform=train_transform
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size_test,
shuffle=False,
num_workers=workers,
)
X = np.empty(shape=(0,3,32,32))
y = np.empty(shape=(0))
for images, target in train_loader:
X = np.append(X, images, axis=0)
y = np.append(y, target, axis=0)
for images, target in test_loader:
X = np.append(X, images, axis=0)
y = np.append(y, target, axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)
print(X_train.shape)
print(X_test.shape)
# Perform resampling
if args.sampling_type == 'over':
print("Performing oversampling at {}".format(args.sampling_ratio))
X_train, y_train = RandomOverSampler(sampling_strategy=args.sampling_ratio).fit_resample(X_train, y_train)
elif args.sampling_type == 'under':
print("Performing undersampling at {}".format(args.sampling_ratio))
X_train, y_train = RandomUnderSampler(sampling_strategy=args.sampling_ratio).fit_resample(X_train, y_train)
elif args.sampling_type == 'smote':
print("Performing SMOTE oversampling at {}".format(args.sampling_ratio))
X_train, y_train = SMOTE(sampling_strategy=args.sampling_ratio).fit_resample(X_train, y_train)
print(X_train.shape)
print(y_train.shape)
clipping = {"clip_per_layer": False, "enable_stat": True}
best_acc1 = 0
device = torch.device("cuda")
model = convnet(num_classes=10)
model = model.to(device)
if _optim == "SGD":
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=_momentum,
weight_decay=_weight_decay,
)
elif _optim == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
elif _optim == "Adam":
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
raise NotImplementedError("Optimizer not recognized. Please check spelling")
if not args.disable_dp:
privacy_engine = PrivacyEngine(
model,
sample_rate=_sample_rate * n_accumulation_steps,
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.noise_multiplier,
max_grad_norm=args.max_grad_norm,
secure_rng=_secure_rng,
**clipping,
)
privacy_engine.attach(optimizer)
if not args.load_model:
for epoch in range(1, args.epochs + 1):
if lr_schedule == "cos":
lr = args.lr * 0.5 * (1 + np.cos(np.pi * epoch / (args.epochs + 1)))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
train(model, train_loader, optimizer, epoch, device)
top1_acc = test(model, test_loader, device)
# remember best acc@1 and save checkpoint
is_best = top1_acc > best_acc1
best_acc1 = max(top1_acc, best_acc1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "Convnet",
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
filename=checkpoint_file + ".tar",
)
else:
checkpoint = torch.load('model_best.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
art_classifier = PyTorchClassifier(model, loss=nn.CrossEntropyLoss(),
optimizer=optimizer,
input_shape=(3,32,32),
nb_classes=10,)
pred = np.array([np.argmax(arr) for arr in art_classifier.predict(torch.from_numpy(X_test).type(torch.FloatTensor))])
acc = accuracy_score(y_test, pred)
print('Base private model accuracy: ', acc)
# Black box attack
attack_train_ratio = 0.5
attack_train_size = int(len(X_train) * attack_train_ratio)
attack_test_size = int(len(X_test) * attack_train_ratio)
mlp_attack_bb = MembershipInferenceBlackBox(art_classifier, attack_model_type=args.attack_model)
# train attack model
mlp_attack_bb.fit(torch.from_numpy(X_train[:attack_train_size]).type(torch.FloatTensor), torch.from_numpy(y_train[:attack_train_size]).type(torch.FloatTensor),
torch.from_numpy(X_test[:attack_test_size]).type(torch.FloatTensor), torch.from_numpy(y_test[:attack_test_size]).type(torch.FloatTensor))
# infer
mlp_inferred_train_bb = mlp_attack_bb.infer(torch.from_numpy(X_train).type(torch.FloatTensor), torch.from_numpy(y_train).type(torch.FloatTensor))
mlp_inferred_test_bb = mlp_attack_bb.infer(torch.from_numpy(X_test).type(torch.FloatTensor), torch.from_numpy(y_test).type(torch.FloatTensor))
# check accuracy
print("Random forest model attack results: ")
mlp_train_acc_bb = np.sum(mlp_inferred_train_bb) / len(mlp_inferred_train_bb)
mlp_test_acc_bb = 1 - (np.sum(mlp_inferred_test_bb) / len(mlp_inferred_test_bb))
mlp_acc_bb = (mlp_train_acc_bb * len(mlp_inferred_train_bb) + mlp_test_acc_bb * len(mlp_inferred_test_bb)) / (len(mlp_inferred_train_bb) + len(mlp_inferred_test_bb))
print('train acc: {}'.format(mlp_train_acc_bb))
print('test acc: {}'.format(mlp_test_acc_bb))
print('total acc: {}'.format(mlp_acc_bb))
mlp_prec_recall_bb = calc_precision_recall(np.concatenate((mlp_inferred_train_bb, mlp_inferred_test_bb)),
np.concatenate((np.ones(len(mlp_inferred_train_bb)), np.zeros(len(mlp_inferred_test_bb)))))
print('precision, recall: {}'.format(mlp_prec_recall_bb))
rf_results = {
'train acc': mlp_train_acc_bb,
'test acc': mlp_test_acc_bb,
'total acc': mlp_acc_bb,
'prec, recall': mlp_prec_recall_bb
}
results = [epsilon, acc, mlp_test_acc_bb, mlp_acc_bb]
results_json = {
'experiment_args': vars(args),
'experiment_time': time.time() - start,
'epsilon': epsilon,
'accuracy': acc,
'rf_acc': rf_results,
}
print(results)
# Create experiment directory.
experiment_path = f'/homes/al5217/adversarial-robustness-toolbox/examples/{args.dataset}/{args.sampling_type}/'
experiment_number = len(os.listdir(experiment_path))
print("experiment_number: {}".format(experiment_number))
# Dump the results to file
json_file = Path.cwd() / f'{args.dataset}/{args.sampling_type}/test_results-{experiment_number}.json'
with json_file.open('w') as f:
json.dump(results_json, f, indent=" ")
print("dumped results to {}".format(str(json_file))) | [
"torch.nn.Linear",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.AvgPool2d",
"torch.utils.data.DataLoader",
"torch.nn.Flatten",
"torch.device",
"torch.save",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.no_grad",
"torch.from_numpy",
"torch.nn.AdaptiveAvgPool2d"
] | 1.8.1 | ashlylau/adversarial-robustness-toolbox | 0ab24714db39f0d7e428e57f4eb6f9c0d34ca898 |
1.7 | # ################################
# From paper: "End-to-End Waveform Utterance Enhancement for Direct Evaluation
# Metrics Optimization by Fully Convolutional Neural Networks", TASLP, 2018
# Authors: Szu-Wei, Fu 2020
# ################################
import torch
import torchaudio
import numpy as np
from speechbrain.utils.torch_audio_backend import get_torchaudio_backend
# torchaudio_backend = get_torchaudio_backend()
# torchaudio.set_audio_backend(torchaudio_backend)
torchaudio_backend = "soundfile"
torchaudio.set_audio_backend(torchaudio_backend)
smallVal = np.finfo("float").eps # To avoid divide by zero
def thirdoct(fs, nfft, num_bands, min_freq):
"""Returns the 1/3 octave band matrix.
Arguments
---------
fs : int
Sampling rate.
nfft : int
FFT size.
num_bands : int
Number of 1/3 octave bands.
min_freq : int
Center frequency of the lowest 1/3 octave band.
Returns
-------
obm : tensor
Octave Band Matrix.
"""
f = torch.linspace(0, fs, nfft + 1)
f = f[: int(nfft / 2) + 1]
k = torch.from_numpy(np.array(range(num_bands)).astype(float))
cf = torch.pow(2.0 ** (1.0 / 3), k) * min_freq
freq_low = min_freq * torch.pow(2.0, (2 * k - 1) / 6)
freq_high = min_freq * torch.pow(2.0, (2 * k + 1) / 6)
obm = torch.zeros(num_bands, len(f)) # a verifier
for i in range(len(cf)):
# Match 1/3 oct band freq with fft frequency bin
f_bin = torch.argmin(torch.square(f - freq_low[i]))
freq_low[i] = f[f_bin]
fl_ii = f_bin
f_bin = torch.argmin(torch.square(f - freq_high[i]))
freq_high[i] = f[f_bin]
fh_ii = f_bin
# Assign to the octave band matrix
obm[i, fl_ii:fh_ii] = 1
return obm
def removeSilentFrames(x, y, dyn_range=40, N=256, K=128):
w = torch.unsqueeze(torch.from_numpy(np.hanning(256)), 0).to(torch.float)
X1 = x[0 : int(x.shape[0]) // N * N].reshape(int(x.shape[0]) // N, N).T
X2 = (
x[128 : (int(x.shape[0]) - 128) // N * N + 128]
.reshape((int(x.shape[0]) - 128) // N, N)
.T
)
X = torch.zeros(N, X1.shape[1] + X2.shape[1])
X[:, 0::2] = X1
X[:, 1::2] = X2
energy = 20 * torch.log10(
torch.sqrt(torch.matmul(w ** 2, X ** 2)) / 16.0 + smallVal
)
Max_energy = torch.max(energy)
msk = torch.squeeze((energy - Max_energy + dyn_range > 0))
Y1 = y[0 : int(y.shape[0]) // N * N].reshape(int(y.shape[0]) // N, N).T
Y2 = (
y[128 : (int(y.shape[0]) - 128) // N * N + 128]
.reshape((int(y.shape[0]) - 128) // N, N)
.T
)
Y = torch.zeros(N, Y1.shape[1] + Y2.shape[1])
Y[:, 0::2] = Y1
Y[:, 1::2] = Y2
x_sil = w.T.repeat(1, X[:, msk].shape[-1]) * X[:, msk]
y_sil = w.T.repeat(1, X[:, msk].shape[-1]) * Y[:, msk]
x_sil = torch.cat(
(
x_sil[0:128, 0],
(x_sil[0:128, 1:] + x_sil[128:, 0:-1]).T.flatten(),
x_sil[128:256, -1],
),
axis=0,
)
y_sil = torch.cat(
(
y_sil[0:128, 0],
(y_sil[0:128, 1:] + y_sil[128:, 0:-1]).T.flatten(),
y_sil[128:256, -1],
),
axis=0,
)
return [x_sil, y_sil]
def stoi_loss(y_pred_batch, y_true_batch, lens, reduction="mean"):
"""Compute the STOI score and return -1 * that score.
This function can be used as a loss function for training
with SGD-based updates.
Arguments
---------
y_pred_batch : torch.Tensor
The degraded (enhanced) waveforms.
y_true_batch : torch.Tensor
The clean (reference) waveforms.
lens : torch.Tensor
The relative lengths of the waveforms within the batch.
reduction : str
The type of reduction ("mean" or "batch") to use.
Example
-------
>>> a = torch.sin(torch.arange(16000, dtype=torch.float32)).unsqueeze(0)
>>> b = a + 0.001
>>> -stoi_loss(b, a, torch.ones(1))
tensor(0.7...)
"""
y_pred_batch = torch.squeeze(y_pred_batch, dim=-1)
y_true_batch = torch.squeeze(y_true_batch, dim=-1)
batch_size = y_pred_batch.shape[0]
fs = 16000 # Sampling rate
N = 30 # length of temporal envelope vectors
J = 15.0 # Number of one-third octave bands
octave_band = thirdoct(fs=10000, nfft=512, num_bands=15, min_freq=150)
c = 5.62341325 # 10^(-Beta/20) with Beta = -15
D = torch.zeros(batch_size)
resampler = torchaudio.transforms.Resample(fs, 10000).to(
y_pred_batch.device
)
for i in range(0, batch_size): # Run over mini-batches
y_true = y_true_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
y_pred = y_pred_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
y_true, y_pred = resampler(y_true), resampler(y_pred)
[y_sil_true, y_sil_pred] = removeSilentFrames(y_true, y_pred)
stft_true = torchaudio.transforms.Spectrogram(
n_fft=512, win_length=256, hop_length=128, power=2
)(y_sil_true)
stft_pred = torchaudio.transforms.Spectrogram(
n_fft=512, win_length=256, hop_length=128, power=2
)(y_sil_pred)
OCT_true = torch.sqrt(torch.matmul(octave_band, stft_true) + 1e-14)
OCT_pred = torch.sqrt(torch.matmul(octave_band, stft_pred) + 1e-14)
M = int(
stft_pred.shape[-1] - (N - 1)
) # number of temporal envelope vectors
X = torch.zeros(15 * M, 30)
Y = torch.zeros(15 * M, 30)
for m in range(0, M): # Run over temporal envelope vectors
X[m * 15 : (m + 1) * 15, :] = OCT_true[:, m : m + N]
Y[m * 15 : (m + 1) * 15, :] = OCT_pred[:, m : m + N]
alpha = torch.norm(X, dim=-1, keepdim=True) / (
torch.norm(Y, dim=-1, keepdim=True) + smallVal
)
ay = Y * alpha
y = torch.min(ay, X + X * c)
xn = X - torch.mean(X, dim=-1, keepdim=True)
xn = xn / (torch.norm(xn, dim=-1, keepdim=True) + smallVal)
yn = y - torch.mean(y, dim=-1, keepdim=True)
yn = yn / (torch.norm(yn, dim=-1, keepdim=True) + smallVal)
d = torch.sum(xn * yn)
D[i] = d / (J * M)
if reduction == "mean":
return -D.mean()
return -D
| [
"torch.zeros",
"torch.min",
"torch.max",
"torch.square",
"torch.norm",
"torch.linspace",
"torch.sum",
"torch.squeeze",
"torch.matmul",
"torch.mean",
"torch.pow"
] | 1.7 | jafermarq/speechbrain | b640e366dd0daa713ac2d7d19b77fbf7ed38486c |
1.7 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
class UNet3D(nn.Module):
def __init__(self, in_channels, out_channels, interpolate=True, conv_layer_order='cbr', init_ch=16):
super(UNet3D, self).__init__()
self.no_class = out_channels
# number of groups for the GroupNorm
# num_groups = min(init_ch // 2, 32)
# encoder path consist of 4 subsequent Encoder modules
# the number of features maps is the same as in the paper
self.encoders = nn.ModuleList([
Encoder(in_channels, init_ch, is_max_pool=False, conv_layer_order=conv_layer_order),
Encoder(init_ch, 2 * init_ch, conv_layer_order=conv_layer_order),
Encoder(2 * init_ch, 4 * init_ch, conv_layer_order=conv_layer_order),
Encoder(4 * init_ch, 8 * init_ch, conv_layer_order=conv_layer_order),
])
self.decoders = nn.ModuleList([
Decoder(4 * init_ch + 8 * init_ch, 4 * init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(2 * init_ch + 4 * init_ch, 2 * init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(init_ch + 2 * init_ch, init_ch, interpolate, conv_layer_order=conv_layer_order)
])
self.final_conv = nn.Sequential(nn.Dropout3d(0.1, False),
nn.Conv3d(init_ch, self.no_class, 1))
def forward(self, x):
# encoder part
encoders_features = []
enc1 = self.encoders[0](x)
enc2 = self.encoders[1](enc1)
enc3 = self.encoders[2](enc2)
mid = self.encoders[3](enc3)
encoders_features = [enc3, enc2, enc1]
dec3 = self.decoders[0](enc3, mid)
dec2 = self.decoders[1](enc2, dec3)
dec1 = self.decoders[2](enc1, dec2)
final = self.final_conv(dec1)
return final
# Some correctly implemented utilities from a github code repository,
# but I don't like them.
class Encoder(nn.Module):
def __init__(self, in_channels, out_channels, conv_kernel_size=3, is_max_pool=True,
max_pool_kernel_size=(2, 2, 2), conv_layer_order='cbr', num_groups=32):
super(Encoder, self).__init__()
self.max_pool = nn.MaxPool3d(kernel_size=max_pool_kernel_size, padding=0) if is_max_pool else None
self.double_conv = DoubleConv(in_channels, out_channels,
kernel_size=conv_kernel_size,
order=conv_layer_order,
num_groups=num_groups)
def forward(self, x):
if self.max_pool is not None:
x = self.max_pool(x)
x = self.double_conv(x)
return x
class Decoder(nn.Module):
def __init__(self, in_channels, out_channels, interpolate, kernel_size=3,
scale_factor=(2, 2, 2), conv_layer_order='cbr', num_groups=32):
super(Decoder, self).__init__()
if interpolate:
self.upsample = None
else:
self.upsample = nn.ConvTranspose3d(2 * out_channels,
2 * out_channels,
kernel_size=kernel_size,
stride=scale_factor,
padding=1,
output_padding=0)
self.double_conv = DoubleConv(in_channels, out_channels,
kernel_size=kernel_size,
order=conv_layer_order,
num_groups=num_groups)
def forward(self, encoder_features, x):
if self.upsample is None:
output_size = encoder_features.size()[2:]
x = F.interpolate(x, size=output_size, mode='trilinear')
else:
x = self.upsample(x)
x = torch.cat((encoder_features, x), dim=1)
x = self.double_conv(x)
return x
class DoubleConv(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, order='cbr', num_groups=32):
super(DoubleConv, self).__init__()
if in_channels < out_channels:
# if in_channels < out_channels we're in the encoder path
conv1_in_channels, conv1_out_channels = in_channels, out_channels // 2
conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels
else:
# otherwise we're in the decoder path
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
# conv1
self._add_conv(1, conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups)
# conv2
self._add_conv(2, conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups)
def _add_conv(self, pos, in_channels, out_channels, kernel_size, order, num_groups):
assert pos in [1, 2], 'pos MUST be either 1 or 2'
assert 'c' in order, "'c' (conv layer) MUST be present"
assert 'r' in order, "'r' (ReLU layer) MUST be present"
for i, char in enumerate(order):
if char == 'r':
self.add_module(f'relu{pos}', nn.ReLU(inplace=True))
elif char == 'c':
self.add_module(f'conv{pos}', nn.Conv3d(in_channels,
out_channels,
kernel_size,
padding=1))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
self.add_module(f'norm{pos}', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
self.add_module(f'norm{pos}', nn.BatchNorm3d(in_channels))
else:
self.add_module(f'norm{pos}', nn.BatchNorm3d(out_channels))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of 'b', 'r', 'c'")
if __name__ == '__main__':
import time
model = UNet3D(1, 9, init_ch=16, conv_layer_order='cbr', interpolate=True)
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
start = time.time()
summary(model, (1, 160, 160, 64))
print("take {:f} s".format(time.time() - start)) | [
"torch.cat",
"torch.nn.MaxPool3d",
"torch.nn.functional.interpolate",
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.nn.Dropout3d",
"torch.cuda.is_available",
"torch.nn.Conv3d",
"torch.nn.BatchNorm3d",
"torch.nn.ConvTranspose3d"
] | 1.7.1 | jeff7021/organseg_dags | 2ba7deb90836aaf8e0e35d879fd00b65787bb491 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
import torch
from torch import optim
from torch.utils.data import DataLoader
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.plugins.environments import SLURMEnvironment
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class AMPTestModel(BoringModel):
def _step(self, batch, batch_idx):
assert torch.is_autocast_enabled()
output = self(batch)
bfloat16 = self.trainer.precision_plugin.is_bfloat16
assert output.dtype == torch.float16 if not bfloat16 else torch.bfloat16
loss = self.loss(batch, output)
return loss
def training_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
return {"loss": output}
def validation_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
return {"x": output}
def test_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
return {"y": output}
def predict(self, batch, batch_idx, dataloader_idx=None):
assert torch.is_autocast_enabled()
output = self(batch)
bfloat16 = self.trainer.precision_plugin.is_bfloat16
assert output.dtype == torch.float16 if not bfloat16 else torch.bfloat16
return output
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
"accelerator",
[
pytest.param("dp", marks=pytest.mark.skip("dp + amp not supported currently")), # TODO
"ddp_spawn",
],
)
@pytest.mark.parametrize(
"precision",
[
16,
pytest.param(
"bf16",
marks=pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_10, reason="torch.bfloat16 not available"),
),
],
)
@pytest.mark.parametrize("gpus", [1, 2])
def test_amp_gpus(tmpdir, accelerator, precision, gpus):
"""Make sure combinations of AMP and training types work if supported."""
tutils.reset_seed()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, gpus=gpus, accelerator=accelerator, precision=precision)
model = AMPTestModel()
# tutils.run_model_test(trainer_options, model)
trainer.fit(model)
trainer.test(model)
trainer.predict(model, DataLoader(RandomDataset(32, 64)))
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_gpus=2)
@mock.patch.dict(
os.environ,
{
"SLURM_NTASKS": "1",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_LOCALID": "0",
"SLURM_PROCID": "0",
},
)
def test_amp_gpu_ddp_slurm_managed(tmpdir):
"""Make sure DDP + AMP work."""
# simulate setting slurm flags
tutils.set_random_master_port()
model = AMPTestModel()
# exp file to get meta
logger = tutils.get_default_logger(tmpdir)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
gpus=[0],
accelerator="ddp_spawn",
precision=16,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
# correct result and ok accuracy
assert trainer.state.finished, "amp + ddp model failed to complete"
# test root model address
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc") == "abc"
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23]") == "abc23"
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23-24]") == "abc23"
generated = trainer.training_type_plugin.cluster_environment.resolve_root_node_address("abc[23-24, 45-40, 40]")
assert generated == "abc23"
@pytest.mark.skipif(torch.cuda.is_available(), reason="test is restricted only on CPU")
def test_cpu_model_with_amp(tmpdir):
"""Make sure model trains on CPU."""
with pytest.raises(MisconfigurationException, match="AMP is only available on GPU"):
Trainer(precision=16)
@mock.patch("pytorch_lightning.plugins.precision.apex_amp.ApexMixedPrecisionPlugin.backward")
def test_amp_without_apex(bwd_mock, tmpdir):
"""Check that even with apex amp type without requesting precision=16 the amp backend is void."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, amp_backend="native")
assert trainer.amp_backend is None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, amp_backend="apex")
assert trainer.amp_backend is None
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert not bwd_mock.called
@RunIf(min_gpus=1, amp_apex=True)
@mock.patch("pytorch_lightning.plugins.precision.apex_amp.ApexMixedPrecisionPlugin.backward")
def test_amp_with_apex(bwd_mock, tmpdir):
"""Check calling apex scaling in training."""
class CustomModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=0.01)
optimizer2 = optim.SGD(self.parameters(), lr=0.01)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
model = CustomModel()
model.training_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, precision=16, amp_backend="apex", gpus=1)
assert str(trainer.amp_backend) == "AMPType.APEX"
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert bwd_mock.call_count == 10
assert isinstance(trainer.lr_schedulers[0]["scheduler"].optimizer, optim.Adam)
assert isinstance(trainer.lr_schedulers[1]["scheduler"].optimizer, optim.SGD)
| [
"torch.is_autocast_enabled",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.StepLR"
] | 1.6 | yuvalkirstain/pytorch-lightning | ac1744242104a2f6a46a3d211723e4dbf90ad544 |
1.7 | import os
import time
import numpy as np
import torch
import gym
import gym_cabworld
from algorithms.a2c_model import PolicyNetwork
from common.features import clip_state, cut_off_state
from common.logging import create_log_folder, get_last_folder
from common.logging import Tracker
def train_a2c(n_episodes):
from pyvirtualdisplay import Display
Display().start()
env_name = "Cabworld-v0"
env = gym.make(env_name)
n_states = env.observation_space.shape[1]
n_actions = env.action_space.n
max_timesteps = 1000
gamma = 0.99
rewards = []
log_path = create_log_folder("a2c")
tracker = Tracker()
a2c = PolicyNetwork(n_states, n_actions)
for episode in range(n_episodes):
tracker.new_episode()
state = env.reset()
log_probs = []
state_values = []
for _ in range(max_timesteps):
action, log_prob, state_value = a2c.get_action(state)
state, reward, done, _ = env.step(action)
tracker.track_reward(reward)
log_probs.append(log_prob)
state_values.append(state_value)
rewards.append(reward)
if done:
print(
f"Episode: {episode} Reward: {tracker.episode_reward} Passengers {tracker.get_pick_ups()}"
)
returns = []
Gt = 0
pw = 0
for reward in rewards[::-1]:
Gt += gamma ** pw * reward
pw += 1
returns.append(Gt)
returns = returns[::-1]
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + 1e-9)
a2c.update(returns, log_probs, state_values, episode)
break
a2c.save_model(log_path)
tracker.plot(log_path)
def deploy_a2c(n_episodes, wait):
env_name = "Cabworld-v0"
env = gym.make(env_name)
n_states = env.observation_space.shape[1]
n_actions = env.action_space.n
a2c = PolicyNetwork(n_states, n_actions)
current_folder = get_last_folder("a2c")
if not current_folder:
print("No model")
return
current_model = os.path.join(current_folder, "a2c.pth")
print(current_model)
a2c.load_model(current_model)
for _ in range(n_episodes):
state = env.reset()
episode_reward = 0
done = False
while not done:
action = a2c.get_action(state)
state, reward, done, _ = env.step(action)
episode_reward += reward
env.render()
time.sleep(wait)
if done:
print(f"Reward {episode_reward}")
break
| [
"torch.tensor"
] | 1.7.1 | nikolim/cablab | 1dcf0d7da01ed3988f84309acfb31cc9a9893de1 |
1.7 | import os
import numpy as np
import sklearn
from sklearn import preprocessing
import torch
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from utils import load_w2v_feature
import settings
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # include timestamp
class ChunkSampler(Sampler):
"""
Samples elements sequentially from some offset.
Arguments:
num_samples: # of desired data points
start: offset where we should start selecting from
"""
def __init__(self, num_samples, start=0):
self.num_samples = num_samples
self.start = start
def __iter__(self):
return iter(range(self.start, self.start + self.num_samples))
def __len__(self):
return self.num_samples
class InfluenceDataset(Dataset):
def __init__(self, file_dir, seed, shuffle):
self.vertices = np.load(os.path.join(file_dir, "vertex_id.npy"))
logger.info("vertex ids loaded!")
embedding_path = os.path.join(file_dir, "prone.emb2")
max_vertex_idx = np.max(self.vertices)
embedding = load_w2v_feature(embedding_path, max_vertex_idx)
self.embedding = torch.FloatTensor(embedding)
logger.info("global prone embedding loaded")
vertex_features = np.load(os.path.join(file_dir, "vertex_feature.npy"))
vertex_features = preprocessing.scale(vertex_features)
self.vertex_features_dim = vertex_features.shape[1]
vertex_features = np.concatenate((vertex_features, np.zeros(shape=(1, self.vertex_features_dim))), axis=0)
self.vertex_features = torch.FloatTensor(vertex_features)
del vertex_features
logger.info("global vertex features loaded!")
self.graphs = np.load(os.path.join(file_dir, "adjacency_matrix.npy")).astype(np.float32)
# self-loop trick, the input graphs should have no self-loop
identity = np.identity(self.graphs.shape[2], dtype=np.bool_)
self.graphs += identity
self.graphs[self.graphs != False] = True
logger.info("graphs loaded!")
# whether a user has been influenced
# whether he/she is the ego user
self.influence_features = np.load(
os.path.join(file_dir, "influence_feature.npy")).astype(np.float32)
logger.info("influence features loaded!")
self.labels = np.load(os.path.join(file_dir, "label.npy"))
logger.info("labels loaded!")
if shuffle:
self.graphs, self.influence_features, self.labels, self.vertices = \
sklearn.utils.shuffle(
self.graphs, self.influence_features,
self.labels, self.vertices,
random_state=seed
)
self.N = len(self.graphs)
if self.N > settings.TEST_SIZE:
self.graphs = self.graphs[: settings.TEST_SIZE]
self.influence_features = self.influence_features[: settings.TEST_SIZE]
self.labels = self.labels[: settings.TEST_SIZE]
self.vertices = self.vertices[: settings.TEST_SIZE]
self.N = settings.TEST_SIZE
logger.info("%d ego networks loaded, each with size %d" % (self.N, self.graphs.shape[1]))
n_classes = self.get_num_class()
class_weight = self.N / (n_classes * np.bincount(self.labels))
self.class_weight = torch.FloatTensor(class_weight)
def get_embedding(self):
return self.embedding
def get_vertex_features(self):
return self.vertex_features
def get_feature_dimension(self):
return self.influence_features.shape[-1]
def get_num_class(self):
return np.unique(self.labels).shape[0]
def get_class_weight(self):
return self.class_weight
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.graphs[idx], self.influence_features[idx], self.labels[idx], self.vertices[idx]
| [
"torch.FloatTensor"
] | 1.7.0 | zfjsail/wechat-wow-analysis | 9fc678f7931f0eac2e8b326d141b60ca7039eece |
0.4 | import torch
from scipy import optimize
import torch.nn.functional as F
import math
import numpy as np
from functools import reduce
from collections import OrderedDict
def branin(x, a=1., b=5.1/(4.*math.pi**2), c=5./math.pi, r=6., s=10.,
t=1./(8*math.pi)):
# branin function, global min of 0.397887 at
# x = (-pi, 12.275), (math.pi, 2.275) and (9.42478, 2.475)
x1, x2 = x[0], x[1]
return a*(x1 - b*x2 + c*x1 - r)**2 + s*(1.-t)*torch.cos(x1) + s
class PyTorchObjective(object):
"""PyTorch objective function, wrapped to be called by scipy.optimize."""
def __init__(self, obj_module):
self.f = obj_module # some pytorch module, that produces a scalar loss
# make an x0 from the parameters in this module
parameters = OrderedDict(obj_module.named_parameters())
self.param_shapes = {n:parameters[n].size() for n in parameters}
# ravel and concatenate all parameters to make x0
self.x0 = np.concatenate([parameters[n].data.cpu().numpy().ravel()
for n in parameters])
def unpack_parameters(self, x):
"""optimize.minimize will supply 1D array, chop it up for each parameter."""
i = 0
named_parameters = OrderedDict()
for n in self.param_shapes:
param_len = reduce(lambda x,y: x*y, self.param_shapes[n])
# slice out a section of this length
param = x[i:i+param_len]
# reshape according to this size, and cast to torch
param = param.reshape(*self.param_shapes[n])
named_parameters[n] = torch.from_numpy(param)
# update index
i += param_len
return named_parameters
def pack_grads(self):
"""pack all the gradients from the parameters in the module into a
numpy array."""
grads = []
for p in self.f.parameters():
grad = p.grad.data.cpu().numpy()
grads.append(grad.ravel())
return np.concatenate(grads)
def is_new(self, x):
# if this is the first thing we've seen
if not hasattr(self, 'cached_x'):
return True
else:
# compare x to cached_x to determine if we've been given a new input
x, self.cached_x = np.array(x), np.array(self.cached_x)
error = np.abs(x - self.cached_x)
return error.max() > 1e-4
def cache(self, x):
# unpack x and load into module
state_dict = self.unpack_parameters(x)
self.f.load_state_dict(state_dict)
# store the raw array as well
self.cached_x = x
# zero the gradient
self.f.zero_grad()
# use it to calculate the objective
obj = self.f()
# backprop the objective
obj.backward()
self.cached_f = obj.item()
self.cached_jac = self.pack_grads()
def fun(self, x):
if self.is_new(x):
self.cache(x)
return self.cached_f
def jac(self, x):
if self.is_new(x):
self.cache(x)
return self.cached_jac
if __name__ == '__main__':
x0 = np.random.rand(2)
obj = PyTorchObjective(branin)
xL = optimize.minimize(obj.fun, x0, method='BFGS', jac=obj.jac)
print(x0, xL)
| [
"torch.cos",
"torch.from_numpy"
] | 0.4.1 | gngdb/pytorch-acdc | 60044f39b018cfe7190381c08e9adff546c3bc66 |
1.0 | #!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import configargparse
import codecs
import os
import math
import torch
from itertools import count
from onmt.utils.misc import tile
import onmt.model_builder
import onmt.translate.beam
import onmt.inputters as inputters
import onmt.opts as opts
import onmt.decoders.ensemble
def build_translator(opt, report_score=True, logger=None, out_file=None):
if out_file is None:
out_file = codecs.open(opt.output, 'w+', 'utf-8')
dummy_parser = configargparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
load_test_model = onmt.decoders.ensemble.load_test_model \
if len(opt.models) > 1 else onmt.model_builder.load_test_model
fields, model, model_opt = load_test_model(opt, dummy_opt.__dict__)
scorer = onmt.translate.GNMTGlobalScorer(opt)
translator = Translator(
model,
fields,
opt,
model_opt,
global_scorer=scorer,
out_file=out_file,
report_score=report_score,
logger=logger
)
return translator
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(
self,
model,
fields,
opt,
model_opt,
global_scorer=None,
out_file=None,
report_score=True,
logger=None
):
self.model = model
self.fields = fields
self.gpu = opt.gpu
self.cuda = opt.gpu > -1
self.n_best = opt.n_best
self.max_length = opt.max_length
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
self.beam_size = opt.beam_size
self.random_sampling_temp = opt.random_sampling_temp
self.sample_from_topk = opt.random_sampling_topk
self.min_length = opt.min_length
self.stepwise_penalty = opt.stepwise_penalty
self.dump_beam = opt.dump_beam
self.block_ngram_repeat = opt.block_ngram_repeat
self.ignore_when_blocking = set(opt.ignore_when_blocking)
self.sample_rate = opt.sample_rate
self.window_size = opt.window_size
self.window_stride = opt.window_stride
self.window = opt.window
self.image_channel_size = opt.image_channel_size
self.replace_unk = opt.replace_unk
self.data_type = opt.data_type
self.verbose = opt.verbose
self.report_bleu = opt.report_bleu
self.report_rouge = opt.report_rouge
self.fast = opt.fast
self.copy_attn = model_opt.copy_attn
self.global_scorer = global_scorer
self.out_file = out_file
self.report_score = report_score
self.logger = logger
self.use_filter_pred = False
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def translate(
self,
src,
tgt=None,
src_dir=None,
batch_size=None,
attn_debug=False
):
"""
Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.
Note: batch_size must not be None
Note: one of ('src_path', 'src_data_iter') must not be None
Args:
src_path (str): filepath of source data
tgt_path (str): filepath of target data or None
src_dir (str): source directory path
(used for Audio and Image datasets)
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
assert src is not None
if batch_size is None:
raise ValueError("batch_size must be set")
data = inputters.build_dataset(
self.fields,
self.data_type,
src=src,
tgt=tgt,
src_dir=src_dir,
sample_rate=self.sample_rate,
window_size=self.window_size,
window_stride=self.window_stride,
window=self.window,
use_filter_pred=self.use_filter_pred,
image_channel_size=self.image_channel_size,
dynamic_dict=self.copy_attn
)
cur_device = "cuda" if self.cuda else "cpu"
data_iter = inputters.OrderedIterator(
dataset=data,
device=cur_device,
batch_size=batch_size,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
builder = onmt.translate.TranslationBuilder(
data, self.fields, self.n_best, self.replace_unk, tgt
)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
for batch in data_iter:
batch_data = self.translate_batch(
batch, data, attn_debug, fast=self.fast
)
translations = builder.from_batch(batch_data)
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if attn_debug:
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(attns[0]))]
header_format = "{:>10.10} " + "{:>10.7} " * len(srcs)
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
output = header_format.format("", *srcs) + '\n'
for word, row in zip(preds, attns):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
os.write(1, output.encode('utf-8'))
if self.report_score:
msg = self._report_score('PRED', pred_score_total,
pred_words_total)
if self.logger:
self.logger.info(msg)
else:
print(msg)
if tgt is not None:
msg = self._report_score('GOLD', gold_score_total,
gold_words_total)
if self.logger:
self.logger.info(msg)
else:
print(msg)
if self.report_bleu:
msg = self._report_bleu(tgt)
if self.logger:
self.logger.info(msg)
else:
print(msg)
if self.report_rouge:
msg = self._report_rouge(tgt)
if self.logger:
self.logger.info(msg)
else:
print(msg)
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions
def sample_with_temperature(self, logits, sampling_temp, keep_topk):
if sampling_temp == 0.0 or keep_topk == 1:
# For temp=0.0, take the argmax to avoid divide-by-zero errors.
# keep_topk=1 is also equivalent to argmax.
topk_scores, topk_ids = logits.topk(1, dim=-1)
else:
logits = torch.div(logits, sampling_temp)
if keep_topk > 0:
top_values, top_indices = torch.topk(logits, keep_topk, dim=1)
kth_best = top_values[:, -1].view([-1, 1])
kth_best = kth_best.repeat([1, logits.shape[1]])
kth_best = kth_best.type(torch.cuda.FloatTensor)
# Set all logits that are not in the top-k to -1000.
# This puts the probabilities close to 0.
keep = torch.ge(logits, kth_best).type(torch.cuda.FloatTensor)
logits = (keep * logits) + ((1-keep) * -10000)
dist = torch.distributions.Multinomial(
logits=logits, total_count=1)
topk_ids = torch.argmax(dist.sample(), dim=1, keepdim=True)
topk_scores = logits.gather(dim=1, index=topk_ids)
return topk_ids, topk_scores
def _translate_random_sampling(
self,
batch,
data,
max_length,
min_length=0,
sampling_temp=1.0,
keep_topk=-1,
return_attention=False
):
"""Alternative to beam search. Do random sampling at each step."""
assert self.beam_size == 1
# TODO: support these blacklisted features.
assert self.block_ngram_repeat == 0
batch_size = batch.batch_size
vocab = self.fields["tgt"].vocab
start_token = vocab.stoi[self.fields["tgt"].init_token]
end_token = vocab.stoi[self.fields["tgt"].eos_token]
# Encoder forward.
src, enc_states, memory_bank, src_lengths = self._run_encoder(
batch, data.data_type)
self.model.decoder.init_state(src, memory_bank, enc_states)
use_src_map = data.data_type == 'text' and self.copy_attn
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["attention"] = [[] for _ in range(batch_size)] # noqa: F812
results["batch"] = batch
if "tgt" in batch.__dict__:
results["gold_score"] = self._score_target(
batch,
memory_bank,
src_lengths,
data,
batch.src_map if use_src_map else None
)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
results["gold_score"] = [0] * batch_size
memory_lengths = src_lengths
src_map = batch.src_map if use_src_map else None
if isinstance(memory_bank, tuple):
mb_device = memory_bank[0].device
else:
mb_device = memory_bank.device
# seq_so_far contains chosen tokens; on each step, dim 1 grows by one.
seq_so_far = torch.full(
[batch_size, 1], start_token, dtype=torch.long, device=mb_device)
alive_attn = None
for step in range(max_length):
decoder_input = seq_so_far[:, -1].view(1, -1, 1)
log_probs, attn = self._decode_and_generate(
decoder_input,
memory_bank,
batch,
data,
memory_lengths=memory_lengths,
src_map=src_map,
step=step,
batch_offset=torch.arange(batch_size, dtype=torch.long)
)
if step < min_length:
log_probs[:, end_token] = -1e20
# Note that what this code calls log_probs are actually logits.
topk_ids, topk_scores = self.sample_with_temperature(
log_probs, sampling_temp, keep_topk)
# Append last prediction.
seq_so_far = torch.cat([seq_so_far, topk_ids.view(-1, 1)], -1)
if return_attention:
current_attn = attn
if alive_attn is None:
alive_attn = current_attn
else:
alive_attn = torch.cat([alive_attn, current_attn], 0)
predictions = seq_so_far.view(-1, 1, seq_so_far.size(-1))
attention = (
alive_attn.view(
alive_attn.size(0), -1, 1, alive_attn.size(-1))
if alive_attn is not None else None)
for i in range(topk_scores.size(0)):
# Store finished hypotheses for this batch. Unlike in beam search,
# there will only ever be 1 hypothesis per example.
score = topk_scores[i, 0]
pred = predictions[i, 0, 1:] # Ignore start_token.
m_len = memory_lengths[i]
attn = attention[:, i, 0, :m_len] if attention is not None else []
results["scores"][i].append(score)
results["predictions"][i].append(pred)
results["attention"][i].append(attn)
return results
def translate_batch(self, batch, data, attn_debug, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
if self.beam_size == 1:
return self._translate_random_sampling(
batch,
data,
self.max_length,
min_length=self.min_length,
sampling_temp=self.random_sampling_temp,
keep_topk=self.sample_from_topk,
return_attention=attn_debug or self.replace_unk)
if fast:
return self._fast_translate_batch(
batch,
data,
self.max_length,
min_length=self.min_length,
n_best=self.n_best,
return_attention=attn_debug or self.replace_unk)
else:
return self._translate_batch(batch, data)
def _run_encoder(self, batch, data_type):
src = inputters.make_features(batch, 'src', data_type)
src_lengths = None
if data_type == 'text':
_, src_lengths = batch.src
elif data_type == 'audio':
src_lengths = batch.src_lengths
enc_states, memory_bank, src_lengths = self.model.encoder(
src, src_lengths)
if src_lengths is None:
assert not isinstance(memory_bank, tuple), \
'Ensemble decoding only supported for text data'
src_lengths = torch.Tensor(batch.batch_size) \
.type_as(memory_bank) \
.long() \
.fill_(memory_bank.size(0))
return src, enc_states, memory_bank, src_lengths
def _decode_and_generate(
self,
decoder_in,
memory_bank,
batch,
data,
memory_lengths,
src_map=None,
step=None,
batch_offset=None
):
unk_idx = self.fields["tgt"].vocab.stoi[self.fields["tgt"].unk_token]
if self.copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(
decoder_in.gt(len(self.fields["tgt"].vocab) - 1), unk_idx
)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
dec_out, dec_attn = self.model.decoder(
decoder_in, memory_bank, memory_lengths=memory_lengths, step=step
)
# Generator forward.
if not self.copy_attn:
attn = dec_attn["std"]
log_probs = self.model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn["copy"]
scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),
attn.view(-1, attn.size(2)),
src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(batch.batch_size, -1, scores.size(-1))
else:
scores = scores.view(-1, self.beam_size, scores.size(-1))
scores = data.collapse_copy_scores(
scores,
batch,
self.fields["tgt"].vocab,
data.src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn
def _fast_translate_batch(
self,
batch,
data,
max_length,
min_length=0,
n_best=1,
return_attention=False
):
# TODO: support these blacklisted features.
assert not self.dump_beam
assert not self.use_filter_pred
assert self.block_ngram_repeat == 0
assert self.global_scorer.beta == 0
beam_size = self.beam_size
batch_size = batch.batch_size
vocab = self.fields["tgt"].vocab
start_token = vocab.stoi[self.fields["tgt"].init_token]
end_token = vocab.stoi[self.fields["tgt"].eos_token]
# Encoder forward.
src, enc_states, memory_bank, src_lengths = self._run_encoder(
batch, data.data_type)
self.model.decoder.init_state(src, memory_bank, enc_states)
use_src_map = data.data_type == 'text' and self.copy_attn
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["attention"] = [[] for _ in range(batch_size)] # noqa: F812
results["batch"] = batch
if "tgt" in batch.__dict__:
results["gold_score"] = self._score_target(
batch,
memory_bank,
src_lengths,
data,
batch.src_map if use_src_map else None
)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
results["gold_score"] = [0] * batch_size
# Tile states and memory beam_size times.
self.model.decoder.map_state(
lambda state, dim: tile(state, beam_size, dim=dim))
if isinstance(memory_bank, tuple):
memory_bank = tuple(tile(x, beam_size, dim=1) for x in memory_bank)
mb_device = memory_bank[0].device
else:
memory_bank = tile(memory_bank, beam_size, dim=1)
mb_device = memory_bank.device
memory_lengths = tile(src_lengths, beam_size)
src_map = (tile(batch.src_map, beam_size, dim=1)
if use_src_map else None)
top_beam_finished = torch.zeros([batch_size], dtype=torch.uint8)
batch_offset = torch.arange(batch_size, dtype=torch.long)
beam_offset = torch.arange(
0, batch_size * beam_size, step=beam_size, dtype=torch.long,
device=mb_device)
alive_seq = torch.full(
[batch_size * beam_size, 1], start_token, dtype=torch.long,
device=mb_device)
alive_attn = None
# Give full probability to the first beam on the first step.
topk_log_probs = torch.tensor(
[0.0] + [float("-inf")] * (beam_size - 1), device=mb_device
).repeat(batch_size)
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
for step in range(max_length):
decoder_input = alive_seq[:, -1].view(1, -1, 1)
log_probs, attn = self._decode_and_generate(
decoder_input,
memory_bank,
batch,
data,
memory_lengths=memory_lengths,
src_map=src_map,
step=step,
batch_offset=batch_offset
)
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1)
if return_attention:
current_attn = attn.index_select(1, select_indices)
if alive_attn is None:
alive_attn = current_attn
else:
alive_attn = alive_attn.index_select(1, select_indices)
alive_attn = torch.cat([alive_attn, current_attn], 0)
is_finished = topk_ids.eq(end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# Save finished hypotheses.
if is_finished.any():
# Penalize beams that finished.
topk_log_probs.masked_fill_(is_finished, -1e10)
is_finished = is_finished.to('cpu')
top_beam_finished |= is_finished[:, 0].eq(1)
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
attention = (
alive_attn.view(
alive_attn.size(0), -1, beam_size, alive_attn.size(-1))
if alive_attn is not None else None)
non_finished_batch = []
for i in range(is_finished.size(0)):
b = batch_offset[i]
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((
topk_scores[i, j],
predictions[i, j, 1:], # Ignore start_token.
attention[:, i, j, :memory_lengths[i]]
if attention is not None else None))
# End condition is the top beam finished and we can return
# n_best hypotheses.
if top_beam_finished[i] and len(hypotheses[b]) >= n_best:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
for n, (score, pred, attn) in enumerate(best_hyp):
if n >= n_best:
break
results["scores"][b].append(score)
results["predictions"][b].append(pred)
results["attention"][b].append(
attn if attn is not None else [])
else:
non_finished_batch.append(i)
non_finished = torch.tensor(non_finished_batch)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
top_beam_finished = top_beam_finished.index_select(
0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
non_finished = non_finished.to(topk_ids.device)
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
select_indices = batch_index.view(-1)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
if alive_attn is not None:
alive_attn = attention.index_select(1, non_finished) \
.view(alive_attn.size(0),
-1, alive_attn.size(-1))
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices)
for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
self.model.decoder.map_state(
lambda state, dim: state.index_select(dim, select_indices))
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
return results
def _translate_batch(self, batch, data):
# (0) Prep each of the components of the search.
# And helper method for reducing verbosity.
beam_size = self.beam_size
batch_size = batch.batch_size
data_type = data.data_type
vocab = self.fields["tgt"].vocab
# Define a set of tokens to exclude from ngram-blocking
exclusion_tokens = {vocab.stoi[t] for t in self.ignore_when_blocking}
pad = vocab.stoi[self.fields['tgt'].pad_token]
eos = vocab.stoi[self.fields['tgt'].eos_token]
bos = vocab.stoi[self.fields['tgt'].init_token]
beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,
cuda=self.cuda,
global_scorer=self.global_scorer,
pad=pad, eos=eos, bos=bos,
min_length=self.min_length,
stepwise_penalty=self.stepwise_penalty,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=exclusion_tokens)
for __ in range(batch_size)]
# (1) Run the encoder on the src.
src, enc_states, memory_bank, src_lengths = self._run_encoder(
batch, data_type)
self.model.decoder.init_state(src, memory_bank, enc_states)
results = {}
results["predictions"] = []
results["scores"] = []
results["attention"] = []
results["batch"] = batch
if "tgt" in batch.__dict__:
results["gold_score"] = self._score_target(
batch, memory_bank, src_lengths, data, batch.src_map
if data_type == 'text' and self.copy_attn else None)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
results["gold_score"] = [0] * batch_size
# (2) Repeat src objects `beam_size` times.
# We use now batch_size x beam_size (same as fast mode)
src_map = (tile(batch.src_map, beam_size, dim=1)
if data.data_type == 'text' and self.copy_attn else None)
self.model.decoder.map_state(
lambda state, dim: tile(state, beam_size, dim=dim))
if isinstance(memory_bank, tuple):
memory_bank = tuple(tile(x, beam_size, dim=1) for x in memory_bank)
else:
memory_bank = tile(memory_bank, beam_size, dim=1)
memory_lengths = tile(src_lengths, beam_size)
# (3) run the decoder to generate sentences, using beam search.
for i in range(self.max_length):
if all((b.done() for b in beam)):
break
# (a) Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = torch.stack([b.get_current_state() for b in beam])
inp = inp.view(1, -1, 1)
# (b) Decode and forward
out, beam_attn = self._decode_and_generate(
inp, memory_bank, batch, data, memory_lengths=memory_lengths,
src_map=src_map, step=i
)
out = out.view(batch_size, beam_size, -1)
beam_attn = beam_attn.view(batch_size, beam_size, -1)
# (c) Advance each beam.
select_indices_array = []
# Loop over the batch_size number of beam
for j, b in enumerate(beam):
b.advance(out[j, :],
beam_attn.data[j, :, :memory_lengths[j]])
select_indices_array.append(
b.get_current_origin() + j * beam_size)
select_indices = torch.cat(select_indices_array)
self.model.decoder.map_state(
lambda state, dim: state.index_select(dim, select_indices))
# (4) Extract sentences from beam.
for b in beam:
scores, ks = b.sort_finished(minimum=self.n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:self.n_best]):
hyp, att = b.get_hyp(times, k)
hyps.append(hyp)
attn.append(att)
results["predictions"].append(hyps)
results["scores"].append(scores)
results["attention"].append(attn)
return results
def _score_target(self, batch, memory_bank, src_lengths, data, src_map):
tgt_in = inputters.make_features(batch, 'tgt')[:-1]
log_probs, attn = self._decode_and_generate(
tgt_in, memory_bank, batch, data,
memory_lengths=src_lengths, src_map=src_map)
tgt_pad = self.fields["tgt"].vocab.stoi[self.fields["tgt"].pad_token]
log_probs[:, :, tgt_pad] = 0
gold = batch.tgt[1:].unsqueeze(2)
gold_scores = log_probs.gather(2, gold)
gold_scores = gold_scores.sum(dim=0).view(-1)
return gold_scores
def _report_score(self, name, score_total, words_total):
if words_total == 0:
msg = "%s No words predicted" % (name,)
else:
msg = ("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / words_total,
name, math.exp(-score_total / words_total)))
return msg
def _report_bleu(self, tgt_path):
import subprocess
base_dir = os.path.abspath(__file__ + "/../../..")
# Rollback pointer to the beginning.
self.out_file.seek(0)
print()
res = subprocess.check_output(
"perl %s/tools/multi-bleu.perl %s" % (base_dir, tgt_path),
stdin=self.out_file, shell=True
).decode("utf-8")
msg = ">> " + res.strip()
return msg
def _report_rouge(self, tgt_path):
import subprocess
path = os.path.split(os.path.realpath(__file__))[0]
msg = subprocess.check_output(
"python %s/tools/test_rouge.py -r %s -c STDIN" % (path, tgt_path),
shell=True, stdin=self.out_file
).decode("utf-8").strip()
return msg
| [
"torch.zeros",
"torch.cat",
"torch.arange",
"torch.no_grad",
"torch.full",
"torch.tensor",
"torch.div",
"torch.ge",
"torch.Tensor",
"torch.distributions.Multinomial",
"torch.topk"
] | 1.0 | hkhpub/opennmt-hkh | 63dceae7f8737a1780e91f2e727c7875ec0ebfdf |
1.6 | '''
Model file and non-differentially private file
'''
import time
import torch
import torch.nn.functional as F
from torch import nn, optim
import data
import utils
class EmbeddingNet(nn.Module):
def __init__(self, vocab_size: int, **_):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 16)
self.fc1 = nn.Linear(16, 2)
def forward(self, x):
# x: batch_size, seq_len
x = self.emb(x) # batch_size, seq_len, embed_dim
x = x.mean(1) # batch_size, embed_dim
x = self.fc1(x) # batch_size, fc_dim
return x
class LSTMNet(nn.Module):
def __init__(self, vocab_size: int, **_):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 100)
self.lstm = nn.LSTM(100, 100)
self.fc1 = nn.Linear(100, 2)
def forward(self, x):
# x: batch_size, seq_len
x = self.emb(x) # batch_size, seq_len, embed_dim
x = x.transpose(0, 1) # seq_len, batch_size, embed_dim
x, _ = self.lstm(x) # seq_len, batch_size, lstm_dim
x = x.mean(0) # batch_size, lstm_dim
x = self.fc1(x) # batch_size, fc_dim
return x
class MNISTNet(nn.Module):
def __init__(self, **_):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
class FFNN(nn.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = nn.Linear(104, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
return out
class Logistic(nn.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = nn.Linear(104, 1)
def forward(self, x):
out = self.fc1(x)
out = F.sigmoid(out)
return out
class CIFAR10Model(nn.Module):
def __init__(self, **_):
super().__init__()
self.layer_list = nn.ModuleList([
nn.Sequential(nn.Conv2d(3, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(32, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(32, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(64, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(64, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(128, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(128, 256, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Conv2d(256, 10, (3, 3), padding=1, stride=(1, 1)),
])
def forward(self, x):
for layer in self.layer_list:
x = layer(x)
# print(x.shape)
return torch.mean(x, dim=(2, 3))
model_dict = {
'mnist': MNISTNet,
'lstm': LSTMNet,
'embed': EmbeddingNet,
'ffnn': FFNN,
'logreg': Logistic,
'cifar10': CIFAR10Model,
}
def get_data(args):
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NCHW',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
train_data, _ = data_fn(**kwargs)
for d in train_data: # train_data, train_labels
d = torch.from_numpy(d)
if d.dtype == torch.int32:
d = d.long()
if args.experiment == 'logreg' and d.dtype != torch.float32:
d = d.float()
yield d
def main(args):
print(args)
assert not args.dpsgd
torch.backends.cudnn.benchmark = True
train_data, train_labels = get_data(args)
model = model_dict[args.experiment](vocab_size=args.max_features).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
optimizer.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
args = parser.parse_args()
main(args)
| [
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.LSTM",
"torch.nn.AvgPool2d",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.mean",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.Embedding"
] | 1.6.0 | jessijzhao/fast-dpsgd | 143370bd1a11076a461e1d06e235db9b1d6b5de5 |
1.6 | from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, l = 3):
val = val if isinstance(val, tuple) else (val,)
return (*val, *((val[-1],) * max(l - len(val), 0)))
def always(val):
return lambda *args, **kwargs: val
# classes
class FeedForward(nn.Module):
def __init__(self, dim, mult, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv2d(dim * mult, dim, 1),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, fmap_size, heads = 8, dim_key = 32, dim_value = 64, dropout = 0., dim_out = None, downsample = False):
super().__init__()
inner_dim_key = dim_key * heads
inner_dim_value = dim_value * heads
dim_out = default(dim_out, dim)
self.heads = heads
self.scale = dim_key ** -0.5
self.to_q = nn.Sequential(nn.Conv2d(dim, inner_dim_key, 1, stride = (2 if downsample else 1), bias = False), nn.BatchNorm2d(inner_dim_key))
self.to_k = nn.Sequential(nn.Conv2d(dim, inner_dim_key, 1, bias = False), nn.BatchNorm2d(inner_dim_key))
self.to_v = nn.Sequential(nn.Conv2d(dim, inner_dim_value, 1, bias = False), nn.BatchNorm2d(inner_dim_value))
self.attend = nn.Softmax(dim = -1)
self.to_out = nn.Sequential(
nn.GELU(),
nn.Conv2d(inner_dim_value, dim_out, 1),
nn.BatchNorm2d(dim_out),
nn.Dropout(dropout)
)
# positional bias
self.pos_bias = nn.Embedding(fmap_size * fmap_size, heads)
q_range = torch.arange(0, fmap_size, step = (2 if downsample else 1))
k_range = torch.arange(fmap_size)
q_pos = torch.stack(torch.meshgrid(q_range, q_range), dim = -1)
k_pos = torch.stack(torch.meshgrid(k_range, k_range), dim = -1)
q_pos, k_pos = map(lambda t: rearrange(t, 'i j c -> (i j) c'), (q_pos, k_pos))
rel_pos = (q_pos[:, None, ...] - k_pos[None, :, ...]).abs()
x_rel, y_rel = rel_pos.unbind(dim = -1)
pos_indices = (x_rel * fmap_size) + y_rel
self.register_buffer('pos_indices', pos_indices)
def apply_pos_bias(self, fmap):
bias = self.pos_bias(self.pos_indices)
bias = rearrange(bias, 'i j h -> () h i j')
return fmap + bias
def forward(self, x):
b, n, *_, h = *x.shape, self.heads
q = self.to_q(x)
y = q.shape[2]
qkv = (q, self.to_k(x), self.to_v(x))
q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
dots = self.apply_pos_bias(dots)
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', h = h, y = y)
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, fmap_size, depth, heads, dim_key, dim_value, mlp_mult = 2, dropout = 0., dim_out = None, downsample = False):
super().__init__()
dim_out = default(dim_out, dim)
self.layers = nn.ModuleList([])
self.attn_residual = (not downsample) and dim == dim_out
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim, fmap_size = fmap_size, heads = heads, dim_key = dim_key, dim_value = dim_value, dropout = dropout, downsample = downsample, dim_out = dim_out),
FeedForward(dim_out, mlp_mult, dropout = dropout)
]))
def forward(self, x):
for attn, ff in self.layers:
attn_res = (x if self.attn_residual else 0)
x = attn(x) + attn_res
x = ff(x) + x
return x
class LeViT(nn.Module):
def __init__(
self,
*,
image_size,
num_classes,
dim,
depth,
heads,
mlp_mult,
stages = 3,
dim_key = 32,
dim_value = 64,
dropout = 0.,
num_distill_classes = None
):
super().__init__()
dims = cast_tuple(dim, stages)
depths = cast_tuple(depth, stages)
layer_heads = cast_tuple(heads, stages)
assert all(map(lambda t: len(t) == stages, (dims, depths, layer_heads))), 'dimensions, depths, and heads must be a tuple that is less than the designated number of stages'
self.conv_embedding = nn.Sequential(
nn.Conv2d(3, 32, 3, stride = 2, padding = 1),
nn.Conv2d(32, 64, 3, stride = 2, padding = 1),
nn.Conv2d(64, 128, 3, stride = 2, padding = 1),
nn.Conv2d(128, dims[0], 3, stride = 2, padding = 1)
)
fmap_size = image_size // (2 ** 4)
layers = []
for ind, dim, depth, heads in zip(range(stages), dims, depths, layer_heads):
is_last = ind == (stages - 1)
layers.append(Transformer(dim, fmap_size, depth, heads, dim_key, dim_value, mlp_mult, dropout))
if not is_last:
next_dim = dims[ind + 1]
layers.append(Transformer(dim, fmap_size, 1, heads * 2, dim_key, dim_value, dim_out = next_dim, downsample = True))
fmap_size = ceil(fmap_size / 2)
self.backbone = nn.Sequential(*layers)
self.pool = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Rearrange('... () () -> ...')
)
self.distill_head = nn.Linear(dim, num_distill_classes) if exists(num_distill_classes) else always(None)
self.mlp_head = nn.Linear(dim, num_classes)
def forward(self, img):
x = self.conv_embedding(img)
x = self.backbone(x)
x = self.pool(x)
out = self.mlp_head(x)
distill = self.distill_head(x)
if exists(distill):
return out, distill
return out
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.AdaptiveAvgPool2d",
"torch.arange",
"torch.nn.ModuleList",
"torch.einsum",
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.meshgrid",
"torch.nn.GELU",
"torch.nn.Embedding"
] | 1.6 | jhvics1/vit-pytorch | bad4b94e7b4baa544ca36149431f7912eccd4b49 |
1.7 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
import random
import warnings
from loguru import logger
import os
import torch
import torch.backends.cudnn as cudnn
from yolox.core import Trainer, launch
from yolox.exp import get_exp
from yolox.utils import configure_nccl
os.environ['CUDA_VISIBLE_DEVICES']='1,2'
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
# distributed
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--dist-url", default=None, type=str, help="url used to set up distributed training"
)
parser.add_argument("-b", "--batch-size", type=int, default=32, help="batch size")
parser.add_argument(
"-d", "--devices", default=2, type=int, help="device for training"
)
parser.add_argument(
"--local_rank", default=0, type=int, help="local rank for dist training"
)
parser.add_argument(
"-f",
"--exp_file",
default='/home/meprint/sunanlin_folder/YOLOX-main/yolox/exp/yolox_base.py',
type=str,
help="plz input your expriment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default='yoloxl_.pth.tar', type=str, help="pre checkpoint file")
parser.add_argument(
"-e", "--start_epoch", default=None, type=int, help="resume training start epoch"
)
parser.add_argument(
"--num_machine", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--fp16",
dest="fp16",
default=True,
action="store_true",
help="Adopting mix precision training.",
)
parser.add_argument(
"-o",
"--occupy",
dest="occupy",
default=True,
action="store_true",
help="occupy GPU memory first for training.",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
if exp.seed is not None:
random.seed(exp.seed)
torch.manual_seed(exp.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! You may see unexpected behavior "
"when restarting from checkpoints."
)
# set environment variables for distributed training
configure_nccl()
cudnn.benchmark = True
trainer = Trainer(exp, args)
trainer.train()
if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
exp.merge(args.opts)
num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
assert num_gpu <= torch.cuda.device_count()
dist_url = "auto" if args.dist_url is None else args.dist_url
launch(
main, num_gpu, args.num_machine, backend=args.dist_backend,
dist_url=dist_url, args=(exp, args)
)
| [
"torch.manual_seed",
"torch.cuda.device_count"
] | 1.7 | 1298998/YOLOX-train-your-data | be50386e5cab7614924796bf6b6bde581d14d4aa |
1.2 | from typing import Dict, Any
import torch
from allennlp.training.optimizers import Optimizer
from allennlp.common.testing import AllenNlpTestCase
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.common.params import Params
class CosineWithRestartsTest(AllenNlpTestCase):
def setUp(self):
super().setUp()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
# We use these cases to verify that the scheduler works as expected.
# Each case consists of 5 parameters:
# - epochs: the total # of epochs to run for.
# - params: parameters passed to initialize the scheduler.
# - learning rate checks: a list of tuples, each of which specifies an epoch
# number and the expected value of the learning rate at that epoch.
# - checkpoints: a list of epoch numbers at which to save the scheduler
# state, and then restore from the saved state and resume.
self.cosine_schedule_cases = [
(
30,
{"t_initial": 30, "t_mul": 1.0},
[(0, 1.0), (15, 0.5000000000000001), (29, 0.0027390523158632996)],
[10, 14],
),
(10, {"t_initial": 1, "t_mul": 2.0}, [(0, 1.0), (1, 1.0), (2, 0.5), (3, 1.0)], [1, 3]),
(30, {"t_initial": 1, "t_mul": 1.0}, [(0, 1.0), (15, 1.0), (29, 1.0)], []),
(
60,
{"t_initial": 30, "t_mul": 1.0},
[
(0, 1.0),
(15, 0.5000000000000001),
(29, 0.0027390523158632996),
(30, 1.0),
(45, 0.5000000000000001),
(59, 0.0027390523158632996),
],
[30, 35],
),
(
60,
{"t_initial": 30, "t_mul": 1.0, "eta_mul": 0.5},
[(0, 1.0), (15, 0.5000000000000001), (29, 0.0027390523158632996), (30, 0.5)],
[],
),
(
100,
{"t_initial": 30, "t_mul": 1.5},
[(0, 1.0), (29, 0.0027390523158632996), (30, 1.0), (74, 0.0012179748700879012)],
[],
),
(
210,
{"t_initial": 30, "t_mul": 2},
[
(0, 1.0),
(29, 0.0027390523158632996),
(30, 1.0),
(89, 0.0006852326227130834),
(90, 1.0),
(209, 0.00017133751222137006),
],
[],
),
(
210,
{"t_initial": 30, "t_mul": 2, "eta_mul": 0.5},
[(0, 1.0), (30, 0.5), (90, 0.25)],
[29, 90],
),
(
150,
{"t_initial": 30, "t_mul": 1},
[
(0, 1.0),
(29, 0.0027390523158632996),
(30, 1.0),
(59, 0.0027390523158632996),
(60, 1.0),
(89, 0.0027390523158632996),
(90, 1.0),
],
[],
),
(10, {"t_initial": 1, "t_mul": 1, "eta_mul": 0.5}, [(0, 1.0), (1, 0.5), (2, 0.25)], []),
]
def _get_optimizer(self, lr: float = 1.0):
return Optimizer.from_params(
self.model.named_parameters(), Params({"type": "sgd", "lr": lr})
)
def test_from_params(self):
"""Make sure `from_params` initializes an instance properly."""
optim = self._get_optimizer()
sched = LearningRateScheduler.from_params(optim, Params({"type": "cosine", "t_initial": 5}))
assert sched.t_initial == 5
assert sched.last_epoch == -1
# Learning should be unchanged after initializing scheduler.
assert optim.param_groups[0]["lr"] == 1.0
with self.assertRaises(TypeError):
# t_initial is required.
LearningRateScheduler.from_params(optim, Params({"type": "cosine"}))
def test_schedules(self):
"""Make sure the math is correct."""
for epochs, params, lr_checks, _ in self.cosine_schedule_cases:
optimizer = self._get_optimizer()
params["type"] = "cosine"
scheduler = LearningRateScheduler.from_params(optimizer, Params(params))
lrs = [optimizer.param_groups[0]["lr"]]
for epoch in range(epochs):
scheduler.step(epoch)
lrs.append(optimizer.param_groups[0]["lr"])
for it, lr in lr_checks:
assert lrs[it] == lr, f"Iteration {it}: {lrs[it]} != {lr}"
def test_schedules_with_save_and_resume(self):
"""Make sure scheduler will resume with the right state."""
def init_and_restore_scheduler(
optimizer: torch.optim.Optimizer,
params: Dict[str, Any],
state_dict: Dict[str, Any] = None,
):
"""
Initialize a new scheduler and optionally restore its state from
a checkpoint.
"""
params["type"] = "cosine"
scheduler = LearningRateScheduler.from_params(optimizer, Params(params))
if state_dict is not None:
scheduler.load_state_dict(state_dict)
return scheduler
for epochs, params, lr_checks, checkpoints in self.cosine_schedule_cases:
optimizer = self._get_optimizer()
scheduler = init_and_restore_scheduler(optimizer, params)
state = scheduler.state_dict()
lrs = [optimizer.param_groups[0]["lr"]]
for epoch in range(epochs):
if epoch in checkpoints:
# Restore scheduler from state dict.
scheduler = init_and_restore_scheduler(optimizer, params, state_dict=state)
# Take step and record learning rate.
scheduler.step(1, epoch)
lrs.append(optimizer.param_groups[0]["lr"])
# Save state again.
state = scheduler.state_dict()
for it, lr in lr_checks:
assert lrs[it] == lr, f"Iteration {it}: {lrs[it]} != {lr}"
| [
"torch.nn.Linear"
] | 1.2.0 | avinashsai/allennlp | 6da30e9d53bd2c8199848addc78ff0e29d79b542 |
1.9 | from torch import nn
from src.retrieval_core.models.pooling.GeM import GeM
class PoolFactory(nn.Module):
def __init__(self, pool='max'):
super(PoolFactory, self).__init__()
pool_type = {
'avg': nn.AdaptiveAvgPool2d(1),
'max': nn.AdaptiveMaxPool2d(1),
'gem': GeM()
}
if pool not in pool_type.keys():
raise ValueError('Unknown pooling methods for {}'.format(pool))
else:
self.pool = pool_type[pool]
def forward(self, x):
return self.pool(x)
| [
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.AdaptiveMaxPool2d"
] | 1.9.0 | RImbriaco/OML | 4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b |
1.4 | import torch.nn as nn
from models.blocks import GlobalAvgPool2d
class _VanillaConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size):
super(_VanillaConvBlock, self).__init__()
self._block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self._block(x)
class VanillaCnn(nn.Module):
def __init__(self, class_count=10, use_softmax=True):
super(VanillaCnn, self).__init__()
self._features = nn.Sequential(_VanillaConvBlock(in_channels=3, out_channels=8, kernel_size=3),
_VanillaConvBlock(in_channels=8, out_channels=16, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
_VanillaConvBlock(in_channels=16, out_channels=32, kernel_size=3),
_VanillaConvBlock(in_channels=32, out_channels=64, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
_VanillaConvBlock(in_channels=64, out_channels=128, kernel_size=3),
_VanillaConvBlock(in_channels=128, out_channels=256, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2))
classifier_layers = [
GlobalAvgPool2d(),
nn.Conv2d(256, class_count, kernel_size=1)
]
if use_softmax:
classifier_layers.append(nn.Softmax(dim=1))
self._classifier = nn.Sequential(*classifier_layers)
def forward(self, x):
y = self._features(x)
return self._classifier(y)[:, :, 0, 0]
| [
"torch.nn.Softmax",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.4.0 | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel
from .layer import AttentionLayer as AL, GlobalAttentionLayer as GoAL, \
StructAttentionLayer as SAL, ResAttentionLayer as RAL, ContAttentionLayer as CAL
from .dataset import get_lm_path
class TranHGAT(nn.Module):
def __init__(self, attr_num, device='cpu', finetuning=True, lm='bert', lm_path=None):
super().__init__()
# load the model or model checkpoint
path = get_lm_path(lm, lm_path)
self.lm = lm
if lm == 'bert':
from transformers import BertModel
self.bert = BertModel.from_pretrained(path)
elif lm == 'distilbert':
from transformers import DistilBertModel
self.bert = DistilBertModel.from_pretrained(path)
elif lm == 'roberta' or lm == 'roberta-large':
from transformers import RobertaModel
self.bert = RobertaModel.from_pretrained(path)
elif lm == 'xlnet':
from transformers import XLNetModel
self.bert = XLNetModel.from_pretrained(path)
self.device = device
self.finetuning = finetuning
hidden_size = self.bert.config.hidden_size
hidden_dropout_prob = 0.1
self.inits = nn.ModuleList([
GoAL(hidden_size, 0.2)
for _ in range(attr_num)])
self.alls = nn.ModuleList([
GoAL(hidden_size, 0.2)
for _ in range(attr_num)])
self.oves = nn.ModuleList([
CAL(hidden_size + hidden_size, 0.2)
for _ in range(attr_num)])
self.conts = nn.ModuleList([
AL(hidden_size + hidden_size, 0.2, device)
for _ in range(attr_num)])
self.out = SAL(hidden_size * (attr_num + 1), 0.2)
self.res = RAL(hidden_size, 0.2, 1/17)
self.softmax = nn.Softmax(dim=2)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.fc = nn.Linear(hidden_size, 2)
def forward(self, xs, left_xs, right_xs, zs, y, token_attr_adjs):
# Token Sequence
xs = xs.to(self.device)
left_xs = left_xs.to(self.device)
right_xs = right_xs.to(self.device)
xs = xs.permute(1, 0, 2) #[Attributes, Batch, Tokens]
left_xs = left_xs.permute(1, 0, 2)
right_xs = right_xs.permute(1, 0, 2)
zs = zs.to(self.device)
y = y.to(self.device)
# Token-Attribute Graph Adjacency Matrix
token_attr_adjs = token_attr_adjs.to(self.device)
token_attr_adjs = token_attr_adjs.permute(0, 2, 1) # [Batch, All Tokens, Attributes]
if self.training and self.finetuning:
self.bert.train()
# Get Context
attns, contexts = self.get_context(xs, zs, token_attr_adjs)
entity_embs = []
attr_comp_embs = []
for x, left_x, right_x in zip(xs, left_xs, right_xs):
# Hierarchical Aggregation
entity_embs.append(self.hier_aggr(left_x, right_x, attns, contexts))
# Attribute Comparison
attr_comp_embs.append(self.hier_attr_comp(x, attns, contexts))
entity_outputs = torch.stack(entity_embs).permute(1, 0, 2)
attr_outputs = torch.stack(attr_comp_embs).permute(1, 0, 2)
# Entity Comparison
entity_output = self.hier_ent_comp(attr_outputs, entity_outputs)
# Entity Alignment
entity_output = self.res(entity_output)
else:
self.bert.eval()
with torch.no_grad():
# Get Context
attns, contexts = self.get_context(xs, zs, token_attr_adjs)
entity_embs = []
attr_comp_embs = []
for x, left_x, right_x in zip(xs, left_xs, right_xs):
# Hierarchical Aggregation
entity_embs.append(self.hier_aggr(left_x, right_x, attns, contexts))
# Attribute Comparison
attr_comp_embs.append(self.hier_attr_comp(x, attns, contexts))
entity_outputs = torch.stack(entity_embs).permute(1, 0, 2)
attr_outputs = torch.stack(attr_comp_embs).permute(1, 0, 2)
# Entity Comparison
entity_output = self.hier_ent_comp(attr_outputs, entity_outputs)
# Entity Alignment
entity_output = self.res(entity_output)
logits = self.fc(entity_output)
y_hat = logits.argmax(-1)
return logits, y, y_hat
# `adjs` is the Token-Attribute graph adjacency matrix
def get_context(self, xs, zs, adjs):
attr_outputs = []
attns = []
for x, init, cont in zip(xs, self.inits, self.conts):
# Get Attribute Context Embedding
attr_embeddings = init(self.bert.get_input_embeddings()(x)) # [Batch, Hidden]
attr_outputs.append(attr_embeddings)
# Get Token-Attribute Attention
attn = cont(x, self.bert.get_input_embeddings(), attr_embeddings) # [Batch, All Tokens]
attns.append(attn)
ent_outputs = []
for z, all in zip(zs, self.alls):
# Get Entity Context Embedding
ent_embedding = all(self.bert.get_input_embeddings(z)) # [1, Hidden]
ent_outputs.append(ent_embedding)
context_outputs = []
for attr, ent, ove in zip(attr_outputs, ent_outputs, self.oves):
context_outputs.append(ove(attr, ent))
attns = self.softmax(torch.stack(attns).permute(1, 2, 0)) * adjs # [Batch, All Tokens, Attributes]
context_outputs = torch.stack(context_outputs).permute(1, 0, 2) # [Batch, Attributes, Hidden]
return attns, context_outputs
def context_embedding(self, x, attns, attr_outputs):
if self.lm == 'distilbert':
words_emb = self.bert.embeddings(x)
else:
words_emb = self.bert.get_input_embeddings()(x)
# Add Context Embedding
for i in range(words_emb.size()[0]): # i is index of batch
words_emb[i] += torch.matmul(attns[i][x[i]], attr_outputs[i])
return words_emb
def hier_aggr(self, left_x, right_x, attns, attr_contexts):
left_attr_emb = self.transform(self.context_embedding(left_x, attns, attr_contexts))
right_attr_emb = self.transform(self.context_embedding(right_x, attns, attr_contexts))
entity_emb = torch.cat([left_attr_emb, right_attr_emb])
return entity_emb
def hier_attr_comp(self, x, attns, attr_contexts):
return self.transform(self.context_embedding(x, attns, attr_contexts))
def hier_ent_comp(self, attr_comp_emb, en_sum_emb):
# Currently, we only support aligned attributes
# So the entity is connected to all attributes
# For simplicity, we omit this particular adjacency matrix
return self.out(attr_comp_emb, en_sum_emb)
def transform(self, emb):
output = self.bert(inputs_embeds=emb)
pooled_output = output[0][:, 0, :]
pooled_output = self.dropout(pooled_output)
return pooled_output
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.stack",
"torch.nn.Softmax",
"torch.no_grad",
"torch.matmul"
] | 1.4.0 | CGCL-codes/HierGAT | df0353e589a00b2b9ac6c8eae87396233fe850ee |
1.5 | import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data.dataset import Dataset
from PIL import Image, ImageFilter
from Utils import utils
import random
import posixpath
def get_list_from_filenames(file_path):
# input: relative path to .txt file with file names
# output: list of relative path names
with open(file_path) as f:
lines = f.read().splitlines()
return lines
class Synhead(Dataset):
def __init__(self, data_dir, csv_path, transform, test=False):
column_names = ['path', 'bbox_x_min', 'bbox_y_min', 'bbox_x_max', 'bbox_y_max', 'yaw', 'pitch', 'roll']
tmp_df = pd.read_csv(csv_path, sep=',', names=column_names, index_col=False, encoding="utf-8-sig")
self.data_dir = data_dir
self.transform = transform
self.X_train = tmp_df['path']
self.y_train = tmp_df[['bbox_x_min', 'bbox_y_min', 'bbox_x_max', 'bbox_y_max', 'yaw', 'pitch', 'roll']]
self.length = len(tmp_df)
self.test = test
def __getitem__(self, index):
path = os.path.join(self.data_dir, self.X_train.iloc[index]).strip('.jpg') + '.png'
img = Image.open(path)
img = img.convert('RGB')
x_min, y_min, x_max, y_max, yaw, pitch, roll = self.y_train.iloc[index]
x_min = float(x_min); x_max = float(x_max)
y_min = float(y_min); y_max = float(y_max)
yaw = -float(yaw); pitch = float(pitch); roll = float(roll)
# k = 0.2 to 0.40
k = np.random.random_sample() * 0.2 + 0.2
x_min -= 0.6 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 0.6 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
width, height = img.size
# Crop the face
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# Flip?
rnd = np.random.random_sample()
if rnd < 0.5:
yaw = -yaw
roll = -roll
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# Blur?
rnd = np.random.random_sample()
if rnd < 0.05:
img = img.filter(ImageFilter.BLUR)
# Bin values
bins = np.array(range(-99, 102, 3))
binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
labels = torch.LongTensor(binned_pose)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
return self.length
class Pose_300W_LP(Dataset):
# Head pose from 300W-LP dataset
def __init__(self,
data_dir,
filename_path,
transform,
img_ext='.jpg',
annot_ext='.mat',
image_mode='RGB',
train_percent=100.,
use_train=True,
seed=17):
assert 0. <= train_percent <= 100.
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
# allowing a sub sample of the dataset for training and validation
filename_list = get_list_from_filenames(filename_path)
self.length = int(len(filename_list) * train_percent // 100)
random.seed(seed)
train_inds = set(random.sample(range(len(filename_list)), self.length))
validation_inds = set(range(len(filename_list))) - train_inds
if use_train:
filename_list = [filename_list[i] for i in train_inds]
else:
filename_list = [filename_list[i] for i in validation_inds]
self.length = len(filename_list)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
# self.length = len(filename_list)
def __getitem__(self, index):
#NOAM
path = os.path.join(self.data_dir, self.X_train[index] + self.img_ext)
path = posixpath.join(*path.split('\\'))
img = Image.open(path)
img = img.convert(self.image_mode)
mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
mat_path = posixpath.join(*mat_path.split('\\'))
# Crop the face loosely
pt2d = utils.get_pt2d_from_mat(mat_path)
x_min = min(pt2d[0,:])
y_min = min(pt2d[1,:])
x_max = max(pt2d[0,:])
y_max = max(pt2d[1,:])
# k = 0.2 to 0.40
k = np.random.random_sample() * 0.2 + 0.2
x_min -= 0.6 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 0.6 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# We get the pose in radians
pose = utils.get_ypr_from_mat(mat_path)
# And convert to degrees.
pitch = pose[0] * 180 / np.pi
yaw = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Flip?
rnd = np.random.random_sample()
if rnd < 0.5:
yaw = -yaw
roll = -roll
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# Blur?
rnd = np.random.random_sample()
if rnd < 0.05:
img = img.filter(ImageFilter.BLUR)
# Bin values
bins = np.array(range(-99, 102, 3))
binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
# Get target tensors
labels = binned_pose
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# 122,450
return self.length
class Pose_300W_LP_random_ds(Dataset):
# 300W-LP dataset with random downsampling
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
path = os.path.join(self.data_dir, self.X_train[index] + self.img_ext).replace(r"\\", "/")
img = Image.open(path)
img = img.convert(self.image_mode)
mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# Crop the face loosely
pt2d = utils.get_pt2d_from_mat(mat_path)
x_min = min(pt2d[0,:])
y_min = min(pt2d[1,:])
x_max = max(pt2d[0,:])
y_max = max(pt2d[1,:])
# k = 0.2 to 0.40
k = np.random.random_sample() * 0.2 + 0.2
x_min -= 0.6 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 0.6 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# We get the pose in radians
pose = utils.get_ypr_from_mat(mat_path)
pitch = pose[0] * 180 / np.pi
yaw = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
ds = 1 + np.random.randint(0,4) * 5
original_size = img.size
img = img.resize((img.size[0] / ds, img.size[1] / ds), resample=Image.NEAREST)
img = img.resize((original_size[0], original_size[1]), resample=Image.NEAREST)
# Flip?
rnd = np.random.random_sample()
if rnd < 0.5:
yaw = -yaw
roll = -roll
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# Blur?
rnd = np.random.random_sample()
if rnd < 0.05:
img = img.filter(ImageFilter.BLUR)
# Bin values
bins = np.array(range(-99, 102, 3))
binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
# Get target tensors
labels = binned_pose
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# 122,450
return self.length
class AFLW2000(Dataset):
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
img = img.convert(self.image_mode)
mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# Crop the face loosely
pt2d = utils.get_pt2d_from_mat(mat_path)
x_min = min(pt2d[0,:])
y_min = min(pt2d[1,:])
x_max = max(pt2d[0,:])
y_max = max(pt2d[1,:])
k = 0.20
x_min -= 2 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 2 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# We get the pose in radians
pose = utils.get_ypr_from_mat(mat_path)
# And convert to degrees.
pitch = pose[0] * 180 / np.pi
yaw = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Bin values
bins = np.array(range(-99, 102, 3))
labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# 2,000
return self.length
class AFLW2000_ds(Dataset):
# AFLW2000 dataset with fixed downsampling
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
img = img.convert(self.image_mode)
mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# Crop the face loosely
pt2d = utils.get_pt2d_from_mat(mat_path)
x_min = min(pt2d[0,:])
y_min = min(pt2d[1,:])
x_max = max(pt2d[0,:])
y_max = max(pt2d[1,:])
k = 0.20
x_min -= 2 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 2 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
ds = 3 # downsampling factor
original_size = img.size
img = img.resize((img.size[0] / ds, img.size[1] / ds), resample=Image.NEAREST)
img = img.resize((original_size[0], original_size[1]), resample=Image.NEAREST)
# We get the pose in radians
pose = utils.get_ypr_from_mat(mat_path)
# And convert to degrees.
pitch = pose[0] * 180 / np.pi
yaw = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Bin values
bins = np.array(range(-99, 102, 3))
labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# 2,000
return self.length
class AFLW_aug(Dataset):
# AFLW dataset with flipping
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.txt', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
img = img.convert(self.image_mode)
txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# We get the pose in radians
annot = open(txt_path, 'r')
line = annot.readline().split(' ')
pose = [float(line[1]), float(line[2]), float(line[3])]
# And convert to degrees.
yaw = pose[0] * 180 / np.pi
pitch = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Fix the roll in AFLW
roll *= -1
# Augment
# Flip?
rnd = np.random.random_sample()
if rnd < 0.5:
yaw = -yaw
roll = -roll
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# Bin values
bins = np.array(range(-99, 102, 3))
labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# train: 18,863
# test: 1,966
return self.length
class AFLW(Dataset):
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.txt', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
img = img.convert(self.image_mode)
txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# We get the pose in radians
annot = open(txt_path, 'r')
line = annot.readline().split(' ')
pose = [float(line[1]), float(line[2]), float(line[3])]
# And convert to degrees.
yaw = pose[0] * 180 / np.pi
pitch = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Fix the roll in AFLW
roll *= -1
# Bin values
bins = np.array(range(-99, 102, 3))
labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# train: 18,863
# test: 1,966
return self.length
class AFW(Dataset):
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.txt', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
img_name = self.X_train[index].split('_')[0]
img = Image.open(os.path.join(self.data_dir, img_name + self.img_ext))
img = img.convert(self.image_mode)
txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# We get the pose in degrees
annot = open(txt_path, 'r')
line = annot.readline().split(' ')
yaw, pitch, roll = [float(line[1]), float(line[2]), float(line[3])]
# Crop the face loosely
k = 0.32
x1 = float(line[4])
y1 = float(line[5])
x2 = float(line[6])
y2 = float(line[7])
x1 -= 0.8 * k * abs(x2 - x1)
y1 -= 2 * k * abs(y2 - y1)
x2 += 0.8 * k * abs(x2 - x1)
y2 += 1 * k * abs(y2 - y1)
img = img.crop((int(x1), int(y1), int(x2), int(y2)))
# Bin values
bins = np.array(range(-99, 102, 3))
labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# Around 200
return self.length
class BIWI(Dataset):
def __init__(self, data_dir, filename_path, transform, img_ext='.png', annot_ext='.txt', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_dir, self.X_train[index] + '_rgb' + self.img_ext))
img = img.convert(self.image_mode)
pose_path = os.path.join(self.data_dir, self.y_train[index] + '_pose' + self.annot_ext)
y_train_list = self.y_train[index].split('/')
bbox_path = os.path.join(self.data_dir, y_train_list[0] + '/dockerface-' + y_train_list[-1] + '_rgb' + self.annot_ext)
# Load bounding box
bbox = open(bbox_path, 'r')
line = bbox.readline().split(' ')
if len(line) < 4:
x_min, y_min, x_max, y_max = 0, 0, img.size[0], img.size[1]
else:
x_min, y_min, x_max, y_max = [float(line[1]), float(line[2]), float(line[3]), float(line[4])]
bbox.close()
# Load pose in degrees
pose_annot = open(pose_path, 'r')
R = []
for line in pose_annot:
line = line.strip('\n').split(' ')
l = []
if line[0] != '':
for nb in line:
if nb == '':
continue
l.append(float(nb))
R.append(l)
R = np.array(R)
T = R[3,:]
R = R[:3,:]
pose_annot.close()
R = np.transpose(R)
roll = -np.arctan2(R[1][0], R[0][0]) * 180 / np.pi
yaw = -np.arctan2(-R[2][0], np.sqrt(R[2][1] ** 2 + R[2][2] ** 2)) * 180 / np.pi
pitch = np.arctan2(R[2][1], R[2][2]) * 180 / np.pi
# Loosely crop face
k = 0.35
x_min -= 0.6 * k * abs(x_max - x_min)
y_min -= k * abs(y_max - y_min)
x_max += 0.6 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# Bin values
bins = np.array(range(-99, 102, 3))
binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
labels = torch.LongTensor(binned_pose)
cont_labels = torch.FloatTensor([yaw, pitch, roll])
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# 15,667
return self.length
| [
"torch.FloatTensor",
"torch.LongTensor"
] | 1.5.0 | noamzilo/deep-head-pose | 31969b8cbeeea5423ab7d326945f7871c5aed57e |
0.4 | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright Microsoft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import io
import warnings
import cloudpickle
from horovod.common.util import check_extension
try:
check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',
__file__, 'mpi_lib_v2')
except:
check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',
__file__, 'mpi_lib', '_mpi_lib')
from horovod.torch.compression import Compression
from horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_
from horovod.torch.mpi_ops import allgather, allgather_async
from horovod.torch.mpi_ops import broadcast, broadcast_async, broadcast_, broadcast_async_
from horovod.torch.mpi_ops import join
from horovod.torch.mpi_ops import poll, synchronize
from horovod.torch.mpi_ops import init, shutdown
from horovod.torch.mpi_ops import size, local_size, rank, local_rank
from horovod.torch.mpi_ops import mpi_threads_supported, mpi_enabled, mpi_built
from horovod.torch.mpi_ops import gloo_enabled, gloo_built
from horovod.torch.mpi_ops import nccl_built, ddl_built, ccl_built
from horovod.torch.mpi_ops import Average, Sum, Adasum
import torch
import collections
# Please run this function in a subprocess
def _check_has_gpu():
import torch
return torch.cuda.is_available()
class _DistributedOptimizer(torch.optim.Optimizer):
def __init__(self, params, named_parameters, compression,
backward_passes_per_step=1, op=Average):
super(self.__class__, self).__init__(params)
self._compression = compression
if named_parameters is not None:
named_parameters = list(named_parameters)
else:
named_parameters = [('allreduce.noname.%s' % i, v)
for param_group in self.param_groups
for i, v in enumerate(param_group['params'])]
# make sure that named_parameters are tuples
if any([not isinstance(p, tuple) for p in named_parameters]):
raise ValueError('named_parameters should be a sequence of '
'tuples (name, parameter), usually produced by '
'model.named_parameters().')
dups = _DistributedOptimizer.find_duplicates([k for k, _ in named_parameters])
if len(dups) > 0:
raise ValueError('Parameter names in named_parameters must be unique. '
'Found duplicates: %s' % ', '.join(dups))
all_param_ids = {id(v)
for param_group in self.param_groups
for v in param_group['params']}
named_param_ids = {id(v) for k, v in named_parameters}
unnamed_param_ids = all_param_ids - named_param_ids
if len(unnamed_param_ids):
raise ValueError('named_parameters was specified, but one or more model '
'parameters were not named. Python object ids: '
'%s' % ', '.join(str(id) for id in unnamed_param_ids))
self._parameter_names = {v: k for k, v in sorted(named_parameters)}
self.backward_passes_per_step = backward_passes_per_step
self._allreduce_delay = {v: self.backward_passes_per_step
for _, v in sorted(named_parameters)}
self.op = op
self._handles = {}
self._grad_accs = []
self._requires_update = set()
self._synchronized = False
self._should_synchronize = True
if size() > 1:
self._register_hooks()
@staticmethod
def find_duplicates(lst):
seen = set()
dups = set()
for el in lst:
if el in seen:
dups.add(el)
seen.add(el)
return dups
def set_backward_passes_per_step(self, passes):
self.backward_passes_per_step = passes
for p in self._allreduce_delay:
self._allreduce_delay[p] = self.backward_passes_per_step
def _register_hooks(self):
for param_group in self.param_groups:
for p in param_group['params']:
if p.requires_grad:
p.grad = p.data.new(p.size()).zero_()
self._requires_update.add(p)
p_tmp = p.expand_as(p)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_hook(p))
self._grad_accs.append(grad_acc)
def _allreduce_grad_async(self, p):
name = self._parameter_names.get(p)
tensor = p.grad
tensor_compressed, ctx = self._compression.compress(tensor)
handle = allreduce_async_(tensor_compressed, name=name, op=self.op)
return handle, ctx
def _make_hook(self, p):
def hook(*ignore):
if p in self._handles and self._handles[p][0] is not None:
if self._allreduce_delay[p] <= 0:
raise AssertionError(
"Gradients were computed more than "
"backward_passes_per_step times before call "
"to step(). Increase backward_passes_per_step to "
"accumulate gradients locally.")
assert not p.grad.requires_grad
assert self._allreduce_delay[p] > 0
handle, ctx = None, None
self._allreduce_delay[p] -= 1
if self._allreduce_delay[p] == 0:
handle, ctx = self._allreduce_grad_async(p)
self._handles[p] = (handle, ctx)
return hook
def synchronize(self):
missing_p = self._requires_update - set(self._handles.keys())
for p in missing_p:
handle, ctx = self._allreduce_grad_async(p)
self._handles[p] = (handle, ctx)
for p, value in self._handles.items():
handle, ctx = value
if handle is None:
handle, ctx = self._allreduce_grad_async(p)
self._handles[p] = (handle, ctx)
for p, (handle, _) in self._handles.items():
output = synchronize(handle)
self._allreduce_delay[p] = self.backward_passes_per_step
p.grad.set_(self._compression.decompress(output, ctx))
self._handles.clear()
self._synchronized = True
@contextmanager
def skip_synchronize(self):
"""
A context manager used to specify that optimizer.step() should
not perform synchronization.
It's typically used in a following pattern:
.. code-block:: python
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
"""
self._should_synchronize = False
try:
yield
finally:
self._should_synchronize = True
def step(self, closure=None):
if self._should_synchronize:
if self._synchronized:
warnings.warn("optimizer.step() called without "
"optimizer.skip_synchronize() context after "
"optimizer.synchronize(). This can cause training "
"slowdown. You may want to consider using "
"optimizer.skip_synchronize() context if you use "
"optimizer.synchronize() in your code.")
self.synchronize()
self._synchronized = False
return super(self.__class__, self).step(closure)
def zero_grad(self):
if self._handles:
raise AssertionError("optimizer.zero_grad() was called after loss.backward() "
"but before optimizer.step() or optimizer.synchronize(). "
"This is prohibited as it can cause a race condition.")
return super(self.__class__, self).zero_grad()
class _DistributedAdasumOptimizer(torch.optim.Optimizer):
def __init__(self, params, named_parameters, compression,
backward_passes_per_step=1):
super(self.__class__, self).__init__(params)
self._compression = compression
if named_parameters is not None:
named_parameters = list(named_parameters)
else:
named_parameters = [('allreduce.noname.%s' % i, v)
for param_group in self.param_groups
for i, v in enumerate(param_group['params'])]
# make sure that named_parameters are tuples
if any([not isinstance(p, tuple) for p in named_parameters]):
raise ValueError('named_parameters should be a sequence of '
'tuples (name, parameter), usually produced by '
'model.named_parameters().')
dups = _DistributedOptimizer.find_duplicates([k for k, _ in named_parameters])
if len(dups) > 0:
raise ValueError('Parameter names in named_parameters must be unique. '
'Found duplicates: %s' % ', '.join(dups))
all_param_ids = {id(v)
for param_group in self.param_groups
for v in param_group['params']}
named_param_ids = {id(v) for k, v in named_parameters}
unnamed_param_ids = all_param_ids - named_param_ids
if len(unnamed_param_ids):
raise ValueError('named_parameters was specified, but one or more model '
'parameters were not named. Python object ids: '
'%s' % ', '.join(str(id) for id in unnamed_param_ids))
self._parameter_names = {v: k for k, v in sorted(named_parameters)}
self.backward_passes_per_step = backward_passes_per_step
self._allreduce_delay = {v: self.backward_passes_per_step
for _, v in sorted(named_parameters)}
self._handles = {}
self._grad_accs = []
self._requires_update = set()
self._synchronized = False
self._should_synchronize = True
self._starting_models = {
p : torch.zeros_like(p, requires_grad=False)
for _, p in named_parameters
}
self._register_hooks()
def set_backward_passes_per_step(self, passes):
self.backward_passes_per_step = passes
for p in self._allreduce_delay:
self._allreduce_delay[p] = self.backward_passes_per_step
def _register_hooks(self):
for param_group in self.param_groups:
for p in param_group['params']:
if p.requires_grad:
p.grad = p.data.new(p.size()).zero_()
self._requires_update.add(p)
p_tmp = p.expand_as(p)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_hook(p))
self._grad_accs.append(grad_acc)
def _allreduce_grad_async(self, p):
# Delta optimizer implements this logic:
# start = current.copy()
# step() -> computes 'current - \alpha.f(g)' where f is
# optimizer logic and g is the gradient
# delta = current-start
# allreduce_(delta)
# start += delta
# current = start
# In order to suppport this logic using function hook to improve performance,
# we do:
# delta = (start - \alpha.f(g)) - start
# = -\alpha.f(g)
# set start to zero and step computes -\alpha.f(g)
# where f is the underlying optimizer logic
name = self._parameter_names.get(p)
start = self._starting_models[p]
stashed_params = []
for group in self.param_groups:
stashed_params.append(group['params'])
# only want to step on p
if any([p is v for v in group['params']]):
group['params'] = [p]
else:
group['params'] = []
start.data.copy_(p)
super(self.__class__, self).step()
# compute delta = curr - start
p.data.sub_(start)
# allreduce as before
tensor_compressed, ctx = self._compression.compress(p)
handle = allreduce_async_(tensor_compressed.data, name=name, op=Adasum)
# reset stashed parameters
for stashed, group in zip(stashed_params, self.param_groups):
group['params'] = stashed
return handle, ctx
def _make_hook(self, p):
def hook(*ignore):
if p in self._handles and self._handles[p][0] is not None:
if self._allreduce_delay[p] <= 0:
raise AssertionError(
"Gradients were computed more than "
"backward_passes_per_step times before call "
"to step(). Increase backward_passes_per_step to "
"accumulate gradients locally.")
assert not p.grad.requires_grad
assert self._allreduce_delay[p] > 0
handle, ctx = None, None
self._allreduce_delay[p] -= 1
if self._allreduce_delay[p] == 0:
handle, ctx = self._allreduce_grad_async(p)
self._handles[p] = (handle, ctx)
return hook
def synchronize(self):
pass
@contextmanager
def skip_synchronize(self):
raise AssertionError("Skipping synchronization is not supported when using Adasum optimizer.")
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
missing_p = self._requires_update - set(self._handles.keys())
for p in missing_p:
handle, ctx = self._allreduce_grad_async(p)
self._handles[p] = (handle, ctx)
for p, (handle, ctx) in self._handles.items():
# This means step() is called before backward_passes_per_steps finished.
# We do a synchoronous allreduce here.
if not handle:
handle, ctx = self._allreduce_grad_async(p)
self._handles[p] = (handle, ctx)
delta = synchronize(handle)
delta = self._compression.decompress(delta, ctx)
start = self._starting_models[p]
start.data.add_(delta.data)
p.data.copy_(start)
self._allreduce_delay[p] = self.backward_passes_per_step
self._handles.clear()
return loss
def zero_grad(self):
if self._handles:
raise AssertionError("optimizer.zero_grad() was called after loss.backward() "
"but before optimizer.step() or optimizer.synchronize(). "
"This is prohibited as it can cause a race condition.")
return super(self.__class__, self).zero_grad()
def DistributedOptimizer(optimizer, named_parameters=None,
compression=Compression.none,
backward_passes_per_step=1,
op=Average):
"""
An optimizer that wraps another torch.optim.Optimizer, using an allreduce to
combine gradient values before applying gradients to model weights.
Allreduce operations are executed after each gradient is computed by ``loss.backward()``
in parallel with each other. The ``step()`` method ensures that all allreduce operations are
finished before applying gradients to the model.
DistributedOptimizer exposes the ``synchronize()`` method, which forces allreduce operations
to finish before continuing the execution. It's useful in conjunction with gradient
clipping, or other operations that modify gradients in place before ``step()`` is executed.
Make sure to use ``optimizer.skip_synchronize()`` if you're calling ``synchronize()``
in your code.
Example of gradient clipping:
.. code-block:: python
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
with optimizer.skip_synchronize():
optimizer.step()
Arguments:
optimizer: Optimizer to use for computing gradients and applying updates.
named_parameters: A mapping between parameter names and values. Used for naming of
allreduce operations. Typically just ``model.named_parameters()``.
compression: Compression algorithm used during allreduce to reduce the amount
of data sent during the each parameter update step. Defaults to
not using compression.
backward_passes_per_step: Number of expected backward passes to perform
before calling step()/synchronize(). This
allows accumulating gradients over multiple
mini-batches before reducing and applying them.
op: The reduction operation to use when combining gradients across different ranks.
"""
# We dynamically create a new class that inherits from the optimizer that was passed in.
# The goal is to override the `step()` method with an allreduce implementation.
if op != Adasum or size() == 1:
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedOptimizer.__dict__))
return cls(optimizer.param_groups, named_parameters, compression, backward_passes_per_step, op)
else:
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedAdasumOptimizer.__dict__))
return cls(optimizer.param_groups, named_parameters, compression, backward_passes_per_step)
def broadcast_parameters(params, root_rank):
"""
Broadcasts the parameters from root rank to all other processes.
Typical usage is to broadcast the ``model.state_dict()``,
``model.named_parameters()``, or ``model.parameters()``.
Arguments:
params: One of the following:
- list of parameters to broadcast
- dict of parameters to broadcast
root_rank: The rank of the process from which parameters will be
broadcasted to all other processes.
"""
if isinstance(params, dict):
params = sorted(params.items())
elif isinstance(params, list):
# support both named_parameters() and regular parameters()
params = [p if isinstance(p, tuple) else (None, p) for p in params]
else:
raise ValueError('invalid params of type: %s' % type(params))
# Run asynchronous broadcasts.
handles = []
for name, p in params:
handle = broadcast_async_(p, root_rank, name)
handles.append(handle)
# Wait for completion.
for handle in handles:
synchronize(handle)
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError('cannot broadcast torch.optim.LBFGS state')
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict['state']) == 0:
for group in optimizer.param_groups:
for p in group['params']:
if p.requires_grad and id(p) not in state_dict['state']:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict['state']) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Returns the full type structure of the possibly nested objects for recursive casting back
def _get_types(x):
if isinstance(x, collections.Iterable):
return type(x), [_get_types(xi) for xi in x]
else:
return type(x)
# Casts an object encoded in a tensor back into its original type and subtypes
def _recursive_cast(x, dtype):
if isinstance(dtype, tuple):
t, dtypes = dtype
x = t(x)
return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
else:
return dtype(x)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict['state'][pid][name] = t(p.cpu().numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(option_tensor.cpu().numpy()[0], dtypes)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict['param_groups']):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == 'params':
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = '%s.%d' % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtypes)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group['params']:
param_state = state_dict['state'][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = '%s.%d' % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast cleanup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
def broadcast_object(obj, root_rank, name=None):
"""
Serializes and broadcasts an object from root rank to all other processes.
Typical usage is to broadcast the `optimizer.state_dict()`, for example:
.. code-block:: python
state_dict = broadcast_object(optimizer.state_dict(), 0)
if hvd.rank() > 0:
optimizer.load_state_dict(state_dict)
Arguments:
obj: An object capable of being serialized without losing any context.
root_rank: The rank of the process from which parameters will be
broadcasted to all other processes.
name: Optional name to use during broadcast, will default to the class
type.
Returns:
The object that was broadcast from the `root_rank`.
"""
if name is None:
name = str(type(obj))
if rank() == root_rank:
b = io.BytesIO()
cloudpickle.dump(obj, b)
t = torch.ByteTensor(bytearray(b.getvalue()))
sz = torch.IntTensor([t.shape[0]])
broadcast_(sz, root_rank, name + '.sz')
else:
sz = torch.IntTensor([0])
broadcast_(sz, root_rank, name + '.sz')
t = torch.ByteTensor(sz.tolist()[0])
broadcast_(t, root_rank, name + '.t')
if rank() != root_rank:
buf = io.BytesIO(t.numpy().tobytes())
obj = cloudpickle.load(buf)
return obj
| [
"torch.IntTensor",
"torch.is_tensor",
"torch.cuda.is_available",
"torch.zeros_like",
"torch.Tensor"
] | 0.4.0 | pragupta/horovod | 7d7a9df9ffb77a121030207f1659ec6c9afaa8ed |
1.1 | import numpy as np
import torch
from bonsai.pruning.abstract_pruners import WeightBasedPruner, ActivationBasedPruner, GradBasedPruner
class WeightL2Prunner(WeightBasedPruner):
@staticmethod
def compute_single_layer_ranks(module, *args, **kwargs):
size = module.weights.size()
weights = module.weights.contiguous().view(size[0], np.prod(size[1:]))
return torch.sqrt(torch.sum(weights ** 2, dim=1))
class ActivationL2Prunner(ActivationBasedPruner):
@staticmethod
def compute_single_layer_ranks(module, *args, **kwargs):
# activation map size is (batch_size x out_channels x width x height)
activation = module.activation.detach().transpose(0, 1)
size = activation.size()
activation = activation.contiguous().view(size[0], np.prod(size[1:]))
return torch.sqrt(torch.sum(activation ** 2, dim=1))
class TaylorExpansionPrunner(GradBasedPruner):
@staticmethod
def compute_single_layer_ranks(module, *args, **kwargs):
# activation map and grad sizes are (batch_size X out_channels X width X height)
activation = module.activation.detach().transpose(0, 1)
grad = module.grad.detach().transpose(0, 1)
ranks = activation * grad
size = ranks.size()
ranks = ranks.contiguous().view(size[0], np.prod(size[1:]))
ranks = torch.mean(ranks, dim=1)
return ranks
| [
"torch.mean",
"torch.sum"
] | 1.1.0 | ItamarWilf/pytorch-bonsai | d8091cfa731d5168ce9a0a1d98e555f7d1364244 |
0.4 | #!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import SimpleITK as sitk
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
from copy import deepcopy
import torch
import torch.nn.functional as F
from utils.model_utils import dice_val
from utils.model_utils import get_one_hot_encoding
def save_seg_result(cf,epoch,pid,seg_map,mask_map,fusion_map):
if cf.test_last_epoch == False:
pth = cf.plot_dir + '3D_result_epoch{}/'.format(epoch)
else:
pth = cf.plot_dir + '3D_result_lastepoch{}/'.format(epoch)
if not os.path.exists(pth):
os.mkdir(pth)
seg_map = np.squeeze(seg_map).astype(np.uint8)
mask_map = np.squeeze(mask_map).astype(np.uint8)
fusion_map = np.squeeze(fusion_map).astype(np.uint8)
seg_map_pth = pth + '{}_epoch{}_segmap.nii.gz'.format(pid,epoch)
mask_map_pth = pth + '{}_epoch{}_maskmap.nii.gz'.format(pid,epoch)
fusion_map_pth = pth + '{}_epoch{}_fusionmap.nii.gz'.format(pid,epoch)
seg_map = sitk.GetImageFromArray(seg_map)
sitk.WriteImage(seg_map,seg_map_pth)
mask_map = sitk.GetImageFromArray(mask_map)
sitk.WriteImage(mask_map,mask_map_pth)
fusion_map = sitk.GetImageFromArray(fusion_map)
sitk.WriteImage(fusion_map,fusion_map_pth)
def savedice_csv(cf,epoch,pidlist,seg_dice,mask_dice,fusion_dice):
if cf.test_last_epoch == True:
pth = cf.test_dir + 'dice_lastepoch{}.csv'.format(epoch)
else:
pth = cf.test_dir + 'dice_epoch{}.csv'.format(epoch)
print('saving csv to',pth)
f = open(pth,'w+')
f.write('%s,%s,%s,%s\n'%('patient','maskdice','segdice','fusiondice'))
for ii,pid in enumerate(pidlist):
print('pid',pid)
f.write('%s,%.2f,%.2f,%.2f\n'%(pid,(mask_dice[ii]),(seg_dice[ii]),(fusion_dice[ii])))
f.flush()
maskdice = sum(mask_dice)/float(len(mask_dice))
segdice = sum(seg_dice)/float(len(seg_dice))
fusiondice = sum(fusion_dice)/float(len(fusion_dice))
f.write('%s,%.2f,%.2f,%.2f\n'%('average',(maskdice),(segdice),(fusiondice)))
f.flush()
f.close()
def save_test_image(results_list,results_list_mask,results_list_seg,results_list_fusion, epoch,cf,pth,mode = 'test'):
print('in save_test_image')
if cf.test_last_epoch == False:
pth = pth + 'epoch_{}/'.format(epoch)
else:
pth = pth + 'lastepoch_{}/'.format(epoch)
if not os.path.exists(pth):
os.mkdir(pth)
mask_dice,seg_dice,fusion_dice,pidlist =[], [],[],[]
for ii,box_pid in enumerate(results_list_seg):
pid = box_pid[1]
pidlist.append(pid)
#boxes = box_pid[0][0]
boxes = results_list[ii][0][0]#box_pid[0][0]
img = np.load(cf.pp_test_data_path + pid + '_img.npy')
img = np.transpose(img,axes = (1,2,0))[np.newaxis]
data = np.transpose(img, axes=(3, 0, 1, 2))#128,1,64,128
seg = np.load(cf.pp_test_data_path + pid + '_rois.npy')
seg = np.transpose(seg,axes = (1,2,0))[np.newaxis]
this_batch_seg_label = np.expand_dims(seg,axis=0)#seg[np.newaxis,:,:,:,:]
this_batch_seg_label = get_one_hot_encoding(this_batch_seg_label, cf.num_seg_classes+1)
seg = np.transpose(seg, axes=(3, 0, 1, 2))#128,1,64,128
mask_map = np.squeeze(results_list_mask[ii][0])
mask_map = np.transpose(mask_map,axes = (0,1,2))[np.newaxis]
mask_map_ = np.expand_dims(mask_map,axis=0)
print('pid',pid)
print('mask_map',mask_map_.shape)
print('this_batch_seg_label',this_batch_seg_label.shape)
this_batch_dice_mask = dice_val(torch.from_numpy(mask_map_),torch.from_numpy(this_batch_seg_label))
mask_map = np.transpose(mask_map, axes=(3, 0, 1, 2))#128,1,64,128
mask_map[mask_map>0.5] = 1
mask_map[mask_map<1] = 0
seg_map = np.squeeze(results_list_seg[ii][0])
seg_map = np.transpose(seg_map,axes = (0,1,2))[np.newaxis]
seg_map_ = np.expand_dims(seg_map,axis=0)
this_batch_dice_seg = dice_val(torch.from_numpy(seg_map_),torch.from_numpy(this_batch_seg_label))
seg_map = np.transpose(seg_map, axes=(3, 0, 1, 2))#128,1,64,128
seg_map[seg_map>0.5] = 1
seg_map[seg_map<1] = 0
fusion_map = np.squeeze(results_list_fusion[ii][0])
fusion_map = np.transpose(fusion_map,axes = (0,1,2))[np.newaxis]
fusion_map_ = np.expand_dims(fusion_map,axis=0)
this_batch_dice_fusion = dice_val(torch.from_numpy(fusion_map_),torch.from_numpy(this_batch_seg_label))
fusion_map = np.transpose(fusion_map, axes=(3, 0, 1, 2))#128,1,64,128
fusion_map[fusion_map>0.5] = 1
fusion_map[fusion_map<1] = 0
save_seg_result(cf,epoch,pid,seg_map,mask_map,fusion_map)
mask_dice.append(this_batch_dice_mask)
seg_dice.append(this_batch_dice_seg)
fusion_dice.append(this_batch_dice_fusion)
gt_boxes = [box['box_coords'] for box in boxes if box['box_type'] == 'gt']
slice_num = 5
if len(gt_boxes) > 0:
center = int((gt_boxes[0][5]-gt_boxes[0][4])/2+gt_boxes[0][4])
z_cuts = [np.max((center - slice_num, 0)), np.min((center + slice_num, data.shape[0]))]#max len = 10
else:
z_cuts = [data.shape[0]//2 - slice_num, int(data.shape[0]//2 + np.min([slice_num, data.shape[0]//2]))]
roi_results = [[] for _ in range(data.shape[0])]
for box in boxes:#box is a list
b = box['box_coords']
# dismiss negative anchor slices.
slices = np.round(np.unique(np.clip(np.arange(b[4], b[5] + 1), 0, data.shape[0]-1)))
for s in slices:
roi_results[int(s)].append(box)
roi_results[int(s)][-1]['box_coords'] = b[:4]#change 3d box to 2d
roi_results = roi_results[z_cuts[0]: z_cuts[1]]#extract slices to show
data = data[z_cuts[0]: z_cuts[1]]
seg = seg[z_cuts[0]:z_cuts[1]]
seg_map = seg_map[z_cuts[0]:z_cuts[1]]
mask_map = mask_map[z_cuts[0]:z_cuts[1]]
fusion_map = fusion_map[z_cuts[0]:z_cuts[1]]
pids = [pid] * data.shape[0]
kwargs={'linewidth':0.2,
'alpha':1,
}
show_arrays = np.concatenate([data,data,data,data], axis=1).astype(float)#10,2,79,219
approx_figshape = (4*show_arrays.shape[0], show_arrays.shape[1])
fig = plt.figure(figsize=approx_figshape)
gs = gridspec.GridSpec(show_arrays.shape[1] + 1, show_arrays.shape[0])
gs.update(wspace=0.1, hspace=0.1)
for b in range(show_arrays.shape[0]):#10(0...9)
for m in range(show_arrays.shape[1]):#4(0,1,2,3)
ax = plt.subplot(gs[m, b])
ax.axis('off')
arr = show_arrays[b, m]#get image to be shown
cmap = 'gray'
vmin = None
vmax = None
if m == 1:
ax.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
ax.contour(np.squeeze(mask_map[b][0:1,:,:]),colors = 'yellow',linewidth=1,alpha=1)
if m == 2:
ax.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
ax.contour(np.squeeze(seg_map[b][0:1,:,:]),colors = 'lime',linewidth=1,alpha=1)
if m == 3:
ax.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
ax.contour(np.squeeze(fusion_map[b][0:1,:,:]),colors = 'orange',linewidth=1,alpha=1)
if m == 0:
plt.title('{}'.format(pids[b][:10]), fontsize=8)
ax.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
ax.contour(np.squeeze(seg[b][0:1,:,:]),colors = 'red',linewidth=1,alpha=1)
plot_text = False
ax.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
for box in roi_results[b]:
coords = box['box_coords']
#print('coords',coords)
#print('type',box['box_type'])
if box['box_type'] == 'det':
#print('score',box['box_score'])
if box['box_score'] > 0.1:# and box['box_score'] > cf.source_th:#detected box
plot_text = True
#score = np.max(box['box_score'])
score = box['box_score']
score_text = '{:.2f}'.format(score*100)#'{}|{:.0f}'.format(box['box_pred_class_id'], score*100)
score_font_size = 7
text_color = 'w'
text_x = coords[1] #+ 10*(box['box_pred_class_id'] -1) #avoid overlap of scores in plot.
text_y = coords[2] + 10
#else:#background and small score don't show
# continue
color_var = 'box_type'#'extra_usage' if 'extra_usage' in list(box.keys()) else 'box_type'
color = cf.box_color_palette[box[color_var]]
ax.plot([coords[1], coords[3]], [coords[0], coords[0]], color=color, linewidth=1, alpha=1) # up
ax.plot([coords[1], coords[3]], [coords[2], coords[2]], color=color, linewidth=1, alpha=1) # down
ax.plot([coords[1], coords[1]], [coords[0], coords[2]], color=color, linewidth=1, alpha=1) # left
ax.plot([coords[3], coords[3]], [coords[0], coords[2]], color=color, linewidth=1, alpha=1) # right
if plot_text:
ax.text(text_x, text_y, score_text, fontsize=score_font_size, color=text_color)
if cf.test_last_epoch == False:
outfile = pth+'result_{}_{}_{}.png'.format(mode,pid,epoch)
else:
outfile = pth+'result_{}_{}_lastepoch_{}.png'.format(mode,pid,epoch)
print('outfile',outfile)
try:
plt.savefig(outfile)
except:
raise Warning('failed to save plot.')
savedice_csv(cf,epoch,pidlist,seg_dice,mask_dice,fusion_dice)
def save_monitor_valuse(cf,test_df,epoch,flag = 'val'):
pth = cf.exp_dir
filename = flag+'_{}'.format(epoch)+'.csv'
print('pth',pth+filename)
test_df.to_csv(pth+filename)
def plot_batch_prediction(batch, results_dict, cf, mode,outfile= None):
"""
plot the input images, ground truth annotations, and output predictions of a batch. If 3D batch, plots a 2D projection
of one randomly sampled element (patient) in the batch. Since plotting all slices of patient volume blows up costs of
time and space, only a section containing a randomly sampled ground truth annotation is plotted.
:param batch: dict with keys: 'data' (input image), 'seg' (pixelwise annotations), 'pid'
:param results_dict: list over batch element. Each element is a list of boxes (prediction and ground truth),
where every box is a dictionary containing box_coords, box_score and box_type.
"""
#print('in ploting image')
data = batch['data']
pids = batch['pid']
segs = batch['seg']
# for 3D, repeat pid over batch elements.
if len(set(pids)) == 1:
pids = [pids] * data.shape[0]
if mode == 'val_patient':
mask_map = results_dict['seg_preds']#.cpu().detach().numpy()
seg_map = results_dict['seg_logits']#.cpu().detach().numpy()
fusion_map = results_dict['fusion_map']#.cpu().detach().numpy()
else:
#mask_map = torch.tensor(results_dict['seg_preds']).cuda()
if cf.fusion_feature_method == 'after':
mask_map = results_dict['seg_preds'][:,1:2,:,:,:].cpu().detach().numpy()
else:
mask_map = F.softmax(results_dict['seg_preds'], dim=1)[:,1:2,:,:,:].cpu().detach().numpy()# N,2,64,128,128
if cf.fusion_feature_method == 'after':
seg_map = results_dict['seg_logits'][:,1:2,:,:,:].cpu().detach().numpy()
else:
seg_map = F.softmax(results_dict['seg_logits'], dim=1)[:,1:2,:,:,:].cpu().detach().numpy()
fusion_map = results_dict['fusion_map'][:,1:2,:,:,:].cpu().detach().numpy()
we_layer_seg = results_dict['we_layer'][:,1:2,:,:,:].cpu().detach().numpy()
we_layer_mask = results_dict['we_layer'][:,3:4,:,:,:].cpu().detach().numpy()
roi_results = deepcopy(results_dict['boxes'])#len == batch size
# Randomly sampled one patient of batch and project data into 2D slices for plotting.
if cf.dim == 3:
patient_ix = np.random.choice(data.shape[0])
data = np.transpose(data[patient_ix], axes=(3, 0, 1, 2))#128,1,64,128
# select interesting foreground section to plot.
gt_boxes = [box['box_coords'] for box in roi_results[patient_ix] if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
center = int((gt_boxes[0][5]-gt_boxes[0][4])/2+gt_boxes[0][4])
z_cuts = [np.max((center - 5, 0)), np.min((center + 5, data.shape[0]))]#max len = 10
else:
z_cuts = [data.shape[0]//2 - 5, int(data.shape[0]//2 + np.min([5, data.shape[0]//2]))]
p_roi_results = roi_results[patient_ix]
roi_results = [[] for _ in range(data.shape[0])]#len = 128
# iterate over cubes and spread across slices.
for box in p_roi_results:#box is a list
b = box['box_coords']
# dismiss negative anchor slices.
slices = np.round(np.unique(np.clip(np.arange(b[4], b[5] + 1), 0, data.shape[0]-1)))
for s in slices:
roi_results[int(s)].append(box)
roi_results[int(s)][-1]['box_coords'] = b[:4]#change 3d box to 2d
roi_results = roi_results[z_cuts[0]: z_cuts[1]]#extract slices to show
data = data[z_cuts[0]: z_cuts[1]]
segs = np.transpose(segs[patient_ix], axes=(3, 0, 1, 2))[z_cuts[0]: z_cuts[1]]#gt
mask_map = np.transpose(mask_map[patient_ix], axes=(3, 0, 1, 2))[z_cuts[0]: z_cuts[1]]#pred seg
seg_map = np.transpose(seg_map[patient_ix], axes=(3, 0, 1, 2))[z_cuts[0]: z_cuts[1]]#pred seg
fusion_map = np.transpose(fusion_map[patient_ix], axes=(3, 0, 1, 2))[z_cuts[0]: z_cuts[1]]#pred seg
we_layer_seg = np.transpose(we_layer_seg[patient_ix],axes=(3,0,1,2))[z_cuts[0]:z_cuts[1]]
we_layer_mask = np.transpose(we_layer_mask[patient_ix],axes=(3,0,1,2))[z_cuts[0]:z_cuts[1]]
pids = [pids[patient_ix]] * data.shape[0]
try:
# all dimensions except for the 'channel-dimension' are required to match
for i in [0, 2, 3]:
assert data.shape[i] == segs.shape[i] == mask_map.shape[i]
except:
raise Warning('Shapes of arrays to plot not in agreement!'
'Shapes {} vs. {} vs {}'.format(data.shape, segs.shape, mask_map.shape))
show_arrays = np.concatenate([data[:,0][:,None], segs, mask_map, seg_map, fusion_map,we_layer_mask,we_layer_seg], axis=1).astype(float)
approx_figshape = (4 * show_arrays.shape[0], 4 * show_arrays.shape[1])
fig = plt.figure(figsize=approx_figshape)
gs = gridspec.GridSpec(show_arrays.shape[1] + 1, show_arrays.shape[0])
gs.update(wspace=0.1, hspace=0.1)
for b in range(show_arrays.shape[0]):
for m in range(show_arrays.shape[1]):
ax = plt.subplot(gs[m, b])
ax.axis('off')
arr = show_arrays[b, m]#get image to be shown
if m == 0:
cmap = 'gray'
vmin = None
vmax = None
else:
cmap = 'jet'
vmin = 0
vmax = 1#cf.num_seg_classes - 1
ax.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
if m == 0:
plot_text = False
plt.title('{}'.format(pids[b][:10]), fontsize=8)
for box in roi_results[b]:
coords = box['box_coords']
if box['box_type'] == 'det':
# dont plot background preds or low confidence boxes.
if box['box_score'] > cf.show_det_source_th:#detected box
plot_text = True
score = box['box_score']
score_text = '{:.2f}'.format(score*100)
score_font_size = 7
text_color = 'w'
text_x = coords[1] #+ 10*(box['box_pred_class_id'] -1) #avoid overlap of scores in plot.
text_y = coords[2] + 5
color_var = 'box_type'#'extra_usage' if 'extra_usage' in list(box.keys()) else 'box_type'
color = cf.box_color_palette[box[color_var]]
ax.plot([coords[1], coords[3]], [coords[0], coords[0]], color=color, linewidth=1, alpha=1) # up
ax.plot([coords[1], coords[3]], [coords[2], coords[2]], color=color, linewidth=1, alpha=1) # down
ax.plot([coords[1], coords[1]], [coords[0], coords[2]], color=color, linewidth=1, alpha=1) # left
ax.plot([coords[3], coords[3]], [coords[0], coords[2]], color=color, linewidth=1, alpha=1) # right
if plot_text:
ax.text(text_x, text_y, score_text, fontsize=score_font_size, color=text_color)
return fig
class TrainingPlot_2Panel():
def __init__(self, cf):
self.file_name = cf.plot_dir + '/monitor_{}'.format(cf.fold)
#print('file_name monitor',self.file_name)
self.exp_name = cf.fold_dir
self.do_validation = cf.do_validation
self.separate_values_dict = cf.assign_values_to_extra_figure#{}
self.figure_list = []
for n in range(cf.n_monitoring_figures):#1
self.figure_list.append(plt.figure(figsize=(10, 6)))
self.figure_list[-1].ax1 = plt.subplot(111)
self.figure_list[-1].ax1.set_xlabel('epochs')
self.figure_list[-1].ax1.set_ylabel('loss / metrics')
self.figure_list[-1].ax1.set_xlim(0, cf.num_epochs)
self.figure_list[-1].ax1.grid()
self.figure_list[0].ax1.set_ylim(0, 1.5)
self.color_palette = ['b', 'c', 'r', 'purple', 'm', 'y', 'k', 'tab:gray']
def update_and_save(self, metrics, epoch):
for figure_ix in range(len(self.figure_list)):
fig = self.figure_list[figure_ix]
detection_monitoring_plot(fig.ax1, metrics, self.exp_name, self.color_palette, epoch, figure_ix,
self.separate_values_dict,
self.do_validation)
fig.savefig(self.file_name + '_{}'.format(figure_ix))
def detection_monitoring_plot(ax1, metrics, exp_name, color_palette, epoch, figure_ix, separate_values_dict, do_validation):
monitor_values_keys = metrics['train']['monitor_values'][1][0].keys()
separate_values = [v for fig_ix in separate_values_dict.values() for v in fig_ix]
if figure_ix == 0:
plot_keys = [ii for ii in monitor_values_keys if ii not in separate_values]
plot_keys += [k for k in metrics['train'].keys() if k != 'monitor_values']
else:
plot_keys = separate_values_dict[figure_ix]
x = np.arange(1, epoch + 1)
for kix, pk in enumerate(plot_keys):
if pk in metrics['train'].keys():
y_train = metrics['train'][pk][1:]
if do_validation:
y_val = metrics['val'][pk][1:]
else:
y_train = [np.mean([er[pk] for er in metrics['train']['monitor_values'][e]]) for e in x]
if do_validation:
y_val = [np.mean([er[pk] for er in metrics['val']['monitor_values'][e]]) for e in x]
ax1.plot(x, y_train, label='train_{}'.format(pk), linestyle='--', color=color_palette[kix])
if do_validation:
ax1.plot(x, y_val, label='val_{}'.format(pk), linestyle='-', color=color_palette[kix])
if epoch == 1:
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax1.set_title(exp_name)
def plot_prediction_hist(label_list, pred_list, type_list, outfile):
"""
plot histogram of predictions for a specific class.
:param label_list: list of 1s and 0s specifying whether prediction is a true positive match (1) or a false positive (0).
False negatives (missed ground truth objects) are artificially added predictions with score 0 and label 1.
:param pred_list: list of prediction-scores.
:param type_list: list of prediction-types for stastic-info in title.
"""
#print('in plot_prediction_hist')
#print('label_list',label_list)
#print('pred_list',pred_list)
#print('type_list',type_list)
#print('outfile',outfile)
preds = np.array(pred_list)
labels = np.array(label_list)
title = outfile.split('/')[-1] + ' count:{}'.format(len(label_list))
plt.figure()
plt.yscale('log')
if 0 in labels:
plt.hist(preds[labels == 0], alpha=0.3, color='g', range=(0, 1), bins=50, label='false pos.')
if 1 in labels:
plt.hist(preds[labels == 1], alpha=0.3, color='b', range=(0, 1), bins=50, label='true pos. (false neg. @ score=0)')
if type_list is not None:
fp_count = type_list.count('det_fp')
fn_count = type_list.count('det_fn')
tp_count = type_list.count('det_tp')
pos_count = fn_count + tp_count
title += ' tp:{} fp:{} fn:{} pos:{}'. format(tp_count, fp_count, fn_count, pos_count)
plt.legend()
plt.title(title)
plt.xlabel('confidence score')
plt.ylabel('log n')
plt.savefig(outfile)
plt.close()
def plot_stat_curves(stats, outfile):
print('in plot_stat_curves')
print('outfile',outfile)
for c in ['roc', 'prc']:
plt.figure()
for s in stats:
if s[c] is not None:
plt.plot(s[c][0], s[c][1], label=s['name'] + '_' + c)
plt.title(outfile.split('/')[-1] + '_' + c)
plt.legend(loc=3 if c == 'prc' else 4)
plt.xlabel('precision' if c == 'prc' else '1-spec.')
plt.ylabel('recall')
plt.savefig(outfile + '_' + c)
plt.close()
| [
"torch.nn.functional.softmax",
"torch.from_numpy"
] | 0.4.1 | zhouyuegithub/medicaldetectiontoolkit | 283121228ab6012f3369c0a649c0e1aeae492283 |
1.6 | import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import segmentation_models_pytorch as smp
import utils
class FCN(nn.Module):
def __init__(self, num_input_channels, num_output_classes, num_filters=64):
super(FCN, self).__init__()
self.conv1 = nn.Conv2d(num_input_channels, num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.last = nn.Conv2d(num_filters, num_output_classes, kernel_size=1, stride=1, padding=0)
def forward(self, inputs):
x = F.relu(self.conv1(inputs))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = F.relu(self.conv7(x))
x = self.last(x)
return x
class Wakey_FCN(nn.Module):
def __init__(self, num_input_channels, num_output_classes, num_filters=64):
super(Wakey_FCN, self).__init__()
self.conv1 = nn.Conv2d(num_input_channels, num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.last = nn.Conv2d(num_filters, num_output_classes, kernel_size=1, stride=1, padding=0)
def forward(self, inputs):
x = F.relu(self.conv1(inputs))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.last(x)
return x
class Single_FCN(nn.Module):
def __init__(self, num_input_channels, num_output_classes, num_filters=64):
super(Single_FCN, self).__init__()
self.conv1 = nn.Conv2d(num_input_channels, num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.last = nn.Conv2d(num_filters, num_output_classes, kernel_size=1, stride=1, padding=0)
def forward(self, inputs):
x = F.relu(self.conv1(inputs))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.last(x)
return x
def get_unet():
return smp.Unet(
encoder_name='resnet18', encoder_depth=3, encoder_weights=None,
decoder_channels=(128, 64, 64), in_channels=4, classes=2)
def get_unet_plus_plus():
return smp.UnetPlusPlus(
encoder_name='resnet18', encoder_depth=3, encoder_weights=None,
decoder_channels=(128, 64, 64), in_channels=4, classes=2
)
def get_manet():
return smp.MAnet(
encoder_name='resnet18', encoder_depth=3, encoder_weights=None,
decoder_channels=(128, 64, 64), in_channels=4, classes=2
)
def get_deeplabv3():
return smp.DeepLabV3(
encoder_name='resnet18', encoder_depth=3, encoder_weights=None,
decoder_channels=128, in_channels=4, classes=2
)
def get_deeplabv3_plus():
return smp.DeepLabV3Plus(encoder_name='resnet34', encoder_depth=5, encoder_weights=None,
encoder_output_stride=16, decoder_channels=256, decoder_atrous_rates=(12, 24, 36),
in_channels=4, classes=2)
def get_fpn():
return smp.FPN(encoder_name='resnet34', encoder_depth=5, encoder_weights=None, decoder_pyramid_channels=256,
decoder_segmentation_channels=128, decoder_merge_policy='add', in_channels=4,
classes=2)
def get_linknet():
return smp.Linknet(encoder_name='resnet18', encoder_depth=3, encoder_weights=None, in_channels=4,
classes=2)
def get_pspnet():
return smp.PSPNet(encoder_name='resnet34', encoder_weights=None, encoder_depth=3, psp_out_channels=512,
psp_use_batchnorm=True, psp_dropout=0.2, in_channels=4, classes=2)
def get_pan():
return smp.PAN(encoder_name='resnet34', encoder_weights=None, encoder_dilation=True, decoder_channels=32,
in_channels=4, classes=2)
def get_fcn():
return FCN(num_input_channels=4, num_output_classes=len(utils.NLCD_CLASSES), num_filters=64)
def get_water_fcn():
return Single_FCN(num_input_channels=1, num_output_classes=2, num_filters=64)
def get_imprev_fcn():
return Single_FCN(num_input_channels=4, num_output_classes=2, num_filters=64)
def get_wakey_fcn():
return Wakey_FCN(num_input_channels=1, num_output_classes=2, num_filters=64)
| [
"torch.nn.Conv2d"
] | 1.6.0 | baoqianyue/DFC2021-Track-MSD | d707f7601c6caa0d0f0e6013d493e66059d23d49 |
1.1 | import argparse
import glob
import numpy as np
import os
import torch
import constants as c
def get_best_epoch_cell(ckpt_path, cell_type):
model_ckpt = torch.load(ckpt_path)
loss_history = model_ckpt['loss']
acc_history = model_ckpt['acc']
pp_acc_history = model_ckpt['pp_acc']
best_epoch = -1
best_loss = np.inf
best_acc = 0.0
best_pp_acc = 0.0
for i, loss_dict in enumerate(loss_history['valid']):
if loss_dict[cell_type] < best_loss:
best_loss = loss_dict[cell_type]
best_acc = acc_history['valid'][i][cell_type]
best_pp_acc = pp_acc_history['valid'][i][cell_type]
best_epoch = i
assert best_epoch != -1
return best_loss, best_acc, best_pp_acc
def get_args():
parser = argparse.ArgumentParser(description='Cellular Classification')
parser.add_argument('-c', '--checkpoint', type=str, required=True, help='Checkpoint file')
return parser.parse_args()
def main():
args = get_args()
path = os.path.join(args.checkpoint, 'best.tar')
if os.path.exists(path):
# base model
# model_ckpt = torch.load(path)
# loss_history = model_ckpt['loss']
# acc_history = model_ckpt['acc']
# if 'pp_acc' in model_ckpt:
# pp_acc_history = model_ckpt['pp_acc']
# else:
# pp_acc_history = dict()
max_epoch = -1
for path in glob.iglob(os.path.join(args.checkpoint, '*.tar')):
epoch = path.split('/')[-1].split('.')[0]
if epoch.isdigit():
max_epoch = max(max_epoch, int(epoch))
path = os.path.join(args.checkpoint, '%d.tar' % max_epoch)
for exp in c.EXPS:
best_loss, best_acc, best_pp_acc = get_best_epoch_cell(path, exp)
out = [exp, '', '', best_loss, best_acc, best_pp_acc]
# out = [exp, '', '', loss_history['valid'][-1][exp], acc_history['valid'][-1][exp],
# pp_acc_history.get('valid', [{exp: ''}])[-1][exp]]
print('\t'.join([str(x) for x in out]))
# if isinstance(loss_history['train'][-1], torch.Tensor):
# train_loss = loss_history['train'][-1].cpu().numpy()
# else:
# train_loss = loss_history['train'][-1]
# out = ['overall', train_loss, acc_history['train'][-1]]
# print('\t'.join([str(x) for x in out]))
else:
# finetune model
for exp in c.EXPS:
path = os.path.join(args.checkpoint, '%s_best.tar' % exp)
if os.path.exists(path):
model_ckpt = torch.load(path)
loss_history = model_ckpt['loss']
acc_history = model_ckpt['acc']
if 'pp_acc' in model_ckpt:
pp_acc_history = model_ckpt['pp_acc']
else:
pp_acc_history = dict()
if isinstance(loss_history['train'][-1], torch.Tensor):
train_loss = loss_history['train'][-1].cpu().numpy()
else:
train_loss = loss_history['train'][-1]
out = [exp, train_loss, acc_history['train'][-1],
loss_history['valid'][-1][exp], acc_history['valid'][-1][exp],
pp_acc_history.get('valid', [{exp: ''}])[-1][exp]]
print('\t'.join([str(x) for x in out]))
if __name__ == '__main__':
main()
| [
"torch.load"
] | 1.1.0 | ChihHsuLin/cellular_image_classification | 5ea81b4a0f42d17ecb95c41ff4349ef610841394 |
0.6 | import torch
class Decoder(torch.nn.Module):
# TODO: support learnable fusion modules
def __init__(self):
super().__init__()
self.FUSION_DIC = {"2to1_fusion": ["sum", "diff", "abs_diff"],
"2to2_fusion": ["concat"]}
def fusion(self, x1, x2, fusion_form="concat"):
"""Specify the form of feature fusion"""
if fusion_form == "concat":
x = torch.cat([x1, x2], dim=1)
elif fusion_form == "sum":
x = x1 + x2
elif fusion_form == "diff":
x = x2 - x1
elif fusion_form == "abs_diff":
x = torch.abs(x1 - x2)
else:
raise ValueError('the fusion form "{}" is not defined'.format(fusion_form))
return x
def aggregation_layer(self, fea1, fea2, fusion_form="concat", ignore_original_img=True):
"""aggregate features from siamese or non-siamese branches"""
start_idx = 1 if ignore_original_img else 0
aggregate_fea = [self.fusion(fea1[idx], fea2[idx], fusion_form)
for idx in range(start_idx, len(fea1))]
return aggregate_fea
| [
"torch.abs",
"torch.cat"
] | 0.6.3 | yjt2018/change_detection.pytorch | cbd6150708eeddbd66e30e311f2482d43334b738 |
1.10 | import torch
from cape import CAPE2d
def test_cape2d():
pos_emb = CAPE2d(d_model=512, max_global_shift=0.0, max_local_shift=0.0,
max_global_scaling=1.0, batch_first=False)
print("Checking correct dimensionality input/output (16x16) for batch_size = False...")
exp_shape = (16, 16, 32, 512)
x = torch.randn(exp_shape)
x = pos_emb(x)
assert exp_shape == x.shape, f"""Error! Expected shape = {exp_shape}
| Received shape = {x.shape}"""
print("Checking correct dimensionality input/output (24x16) for batch_size = False...")
exp_shape = (24, 16, 32, 512)
x = torch.randn(exp_shape)
x = pos_emb(x)
assert exp_shape == x.shape, f"""Error! Expected shape = {exp_shape}
| Received shape = {x.shape}"""
print("Checking correct dimensionality input/output (16x24) for batch_size = False...")
exp_shape = (16, 24, 32, 512)
x = torch.randn(exp_shape)
x = pos_emb(x)
assert exp_shape == x.shape, f"""Error! Expected shape = {exp_shape}
| Received shape = {x.shape}"""
print("Checking correct dimensionality input/output (16x16) for batch_size = True...")
pos_emb = CAPE2d(d_model=512, max_global_shift=0.0, max_local_shift=0.0,
max_global_scaling=1.0, batch_first=True)
exp_shape = (32, 16, 16, 512)
x = torch.randn(exp_shape)
x = pos_emb(x)
assert exp_shape == x.shape, f"""Error! Expected shape = {exp_shape}
| Received shape = {x.shape}"""
print("Checking correct dimensionality input/output (24x16) for batch_size = True...")
exp_shape = (32, 24, 16, 512)
x = torch.randn(exp_shape)
x = pos_emb(x)
assert exp_shape == x.shape, f"""Error! Expected shape = {exp_shape}
| Received shape = {x.shape}"""
print("Checking correct dimensionality input/output (16x24) for batch_size = True...")
exp_shape = (32, 16, 24, 512)
x = torch.randn(exp_shape)
x = pos_emb(x)
assert exp_shape == x.shape, f"""Error! Expected shape = {exp_shape}
| Received shape = {x.shape}"""
def test_augment_positions():
print("Checking that positions order is not altered after local shifting...")
def check_position_order(max_local_shift, batch_size=128,
patches_x=24, patches_y=24, expect_disorder=False):
if expect_disorder:
spotted_disorder = False
pos_emb = CAPE2d(d_model=512, max_global_shift=0.0, max_local_shift=max_local_shift,
max_global_scaling=1.0, batch_first=False)
x = torch.zeros([batch_size, patches_x, patches_y])
y = torch.zeros([batch_size, patches_x, patches_y])
x += torch.linspace(-1, 1, patches_x)[None, :, None]
y += torch.linspace(-1, 1, patches_y)[None, None, :]
x, y = pos_emb.augment_positions(x, y)
for b in range(batch_size):
for c in range(patches_y):
pos_x = x[b, :, c]
for t in range(patches_x - 1):
if not expect_disorder:
assert pos_x[t] < pos_x[t + 1], f"""Error! Pos x order has been altered
after local shifting with
max value {max_local_shift}.
Pos embedding = {pos_x}.
Index t = {t}
Index t + 1 = {t + 1}."""
else:
if pos_x[t] >= pos_x[t + 1]:
return
for b in range(batch_size):
for c in range(patches_x):
pos_y = y[b, c, :]
for t in range(patches_y - 1):
if not expect_disorder:
assert pos_y[t] < pos_y[t + 1], f"""Error! Pos y order has been altered
after local shifting with
max value {max_local_shift}.
Pos embedding = {pos_y}.
Index t = {t}
Index t + 1 = {t + 1}."""
else:
if pos_y[t] >= pos_y[t + 1]:
return
if expect_disorder:
assert spotted_disorder, f"""Error! Expected position disorder with
max local shift = {max_local_shift}.
However, haven't spotted any."""
check_position_order(max_local_shift=0.00, patches_x=24, patches_y=24)
check_position_order(max_local_shift=0.25, patches_x=24, patches_y=24)
check_position_order(max_local_shift=0.50, patches_x=24, patches_y=24)
check_position_order(max_local_shift=0.00, patches_x=24, patches_y=64)
check_position_order(max_local_shift=0.25, patches_x=24, patches_y=64)
check_position_order(max_local_shift=0.50, patches_x=24, patches_y=64)
check_position_order(max_local_shift=0.00, patches_x=64, patches_y=24)
check_position_order(max_local_shift=0.25, patches_x=64, patches_y=24)
check_position_order(max_local_shift=0.50, patches_x=64, patches_y=24)
check_position_order(max_local_shift=0.55, batch_size=1024,
patches_x=24, patches_y=24, expect_disorder=True)
check_position_order(max_local_shift=1.00, batch_size=128,
patches_x=24, patches_y=24, expect_disorder=True)
| [
"torch.zeros",
"torch.linspace",
"torch.randn"
] | 1.10.0 | gcambara/cape | c18c3c5e33f24a85506f30c399bf88b45f8a1787 |
0.7 | import numpy as np
import random
import os
import time
import importlib
import cv2
from PIL import Image
import math
import pickle
import torch
from torch import distributed as dist
from torch.utils.data.sampler import Sampler
def load_module(module_type, module_name):
m = importlib.import_module(f'{module_type}.{module_name}')
return m
def return_empty_dict_if_none(x):
return {} if x is None else x
def get_data_sampler(dataset, shuffle=False, is_distributed=False):
if is_distributed:
return torch.utils.data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return torch.utils.data.RandomSampler(dataset)
else:
return torch.utils.data.SequentialSampler(dataset)
def dict2device(d, device, dtype=None):
if isinstance(d, np.ndarray):
d = torch.from_numpy(d)
if torch.is_tensor(d):
d = d.to(device)
if dtype is not None:
d = d.type(dtype)
return d
if isinstance(d, dict):
for k, v in d.items():
d[k] = dict2device(v, device, dtype=dtype)
return d
def setup_environment(seed):
# random
random.seed(seed)
# numpy
np.random.seed(seed)
# cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
# pytorch
os.environ['OMP_NUM_THREADS'] = '1'
torch.set_num_threads(1)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
def squeeze_metrics(d):
metrics = dict()
for k, v in d.items():
if torch.is_tensor(v):
metrics[k] = v.mean().item()
elif isinstance(v, float):
metrics[k] = v
else:
raise NotImplementedError("Unknown datatype for metric: {}".format(type(v)))
return metrics
def reduce_metrics(metrics):
metrics_dict = dict()
for k in metrics[0].keys():
metrics_dict[k] = np.mean([item[k] for item in metrics])
return metrics_dict
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
def flatten_parameters(parameters):
list_of_flat_parameters = [torch.flatten(p) for p in parameters]
flat_parameters = torch.cat(list_of_flat_parameters).view(-1, 1)
return flat_parameters
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def itt(img):
tensor = torch.FloatTensor(img) #
if len(tensor.shape) == 3:
tensor = tensor.permute(2, 0, 1)
else:
tensor = tensor.unsqueeze(0)
return tensor
def tti(tensor):
tensor = tensor.detach().cpu()
tensor = tensor[0].permute(1, 2, 0)
image = tensor.numpy()
if image.shape[-1] == 1:
image = image[..., 0]
return image
def to_tanh(t):
return t * 2 - 1.
def to_sigm(t):
return (t + 1) / 2
def get_rotation_matrix(angle, axis='x'):
if axis == 'x':
return np.array([
[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]
])
elif axis == 'y':
return np.array([
[np.cos(angle), 0, -np.sin(angle)],
[0, 1, 0],
[np.sin(angle), 0, np.cos(angle)]
])
elif axis == 'z':
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
else:
raise ValueError(f"Unkown axis {axis}")
def rotate_verts(vertices, angle, K, K_inv, axis='y', mean_point=None):
rot_mat = get_rotation_matrix(angle, axis)
rot_mat = torch.FloatTensor(rot_mat).to(vertices.device).unsqueeze(0)
vertices_world = torch.bmm(vertices, K_inv.transpose(1, 2))
if mean_point is None:
mean_point = vertices_world.mean(dim=1)
vertices_rot = vertices_world - mean_point
vertices_rot = torch.bmm(vertices_rot, rot_mat.transpose(1, 2))
vertices_rot = vertices_rot + mean_point
vertices_rot_cam = torch.bmm(vertices_rot, K.transpose(1, 2))
return vertices_rot_cam, mean_point
def json2kps(openpose_dict):
list2kps = lambda x: np.array(x).reshape(-1, 3)
keys_to_save = ['pose_keypoints_2d', 'face_keypoints_2d', 'hand_right_keypoints_2d', 'hand_left_keypoints_2d']
kps = openpose_dict['people']
if len(kps) == 0:
kp_stacked = np.ones((137, 2)) * -1
return kp_stacked
kps = kps[0]
kp_parts = [list2kps(kps[key]) for key in keys_to_save]
kp_stacked = np.concatenate(kp_parts, axis=0)
kp_stacked[kp_stacked[:, 2] < 0.1, :] = -1
kp_stacked = kp_stacked[:, :2]
return kp_stacked
def segment_img(img, segm):
img = to_sigm(img) * segm
img = to_tanh(img)
return img
def segm2mask(segm):
segm = torch.sum(segm, dim=1, keepdims=True) # Bx3xHxW -> Bx1xHxW
segm = (segm > 0.0).type(torch.float32)
return segm | [
"torch.distributed.get_world_size",
"torch.cat",
"torch.utils.data.RandomSampler",
"torch.stack",
"torch.distributed.reduce",
"torch.sum",
"torch.is_tensor",
"torch.FloatTensor",
"torch.manual_seed",
"torch.distributed.is_initialized",
"torch.distributed.get_rank",
"torch.set_num_threads",
"torch.cuda.manual_seed_all",
"torch.utils.data.SequentialSampler",
"torch.distributed.is_available",
"torch.no_grad",
"torch.from_numpy",
"torch.utils.data.distributed.DistributedSampler",
"torch.flatten"
] | 0.7.0 | saic-vul/style-people | a48418ace25b99a50801a54a9e282cd986c305ba |
1.11 | """
libraries
"""
import os
import gc
import re
import ast
import sys
import copy
import json
import time
import math
import string
import pickle
import random
import joblib
import itertools
import warnings
from IPython.core.display_functions import display
import scipy as sp
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from torch.optim import Adam, SGD, AdamW
from torch.utils.data import DataLoader, Dataset
import tokenizers
import transformers
from transformers import AutoTokenizer, AutoModel, AutoConfig
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
import wandb
from wandb_creds import *
from transformers import AutoModel, DistilBertTokenizerFast
"""
constants & options
"""
SEED = 42
OUTPUT_DIR = 'EXPERIMENT_1_' # increment for each iteration
MODEL = AutoModel.from_pretrained('distilbert-base-uncased')
TOKENIZER = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
"""
CONFIGURATION
"""
class CONFIGURATION:
wandb = False
competition = 'NBME'
_wandb_kernel = 'mkingo'
debug = False
apex = True
print_freq = 100
num_workers = 4
model = MODEL
tokenizer = TOKENIZER
scheduler = 'cosine' # ['linear', 'cosine']
batch_scheduler = True
num_cycles = 0.5
num_warmup_steps = 0
epochs = 5
encoder_lr = 2e-5
decoder_lr = 2e-5
min_lr = 1e-6
eps = 1e-6
betas = (0.9, 0.999)
batch_size = 4
fc_dropout = 0.2
max_len = 512
weight_decay = 0.01
gradient_accumulation_steps = 1
max_grad_norm = 1000
seed = 42
n_fold = 5
trn_fold = [0]
train = True
if CONFIGURATION.debug:
CONFIGURATION.epochs = 2
CONFIGURATION.trn_fold = [0]
"""
wandb
"""
if CONFIGURATION.wandb:
wandb.login(key=API_KEY)
def class2dict(f):
"""
:param f:
:return:
"""
return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))
run = wandb.init(
project=CONFIGURATION.competition,
name=CONFIGURATION.model,
config=class2dict(CONFIGURATION),
group=CONFIGURATION.model,
job_type="train",
)
| [
"torch.cuda.is_available"
] | 1.11.0 | mkingopng/NBME_score_clinical_patient_notes | 4ca9816be2665d7585ab0d168376a340aa800088 |
1.3 | from functools import reduce
import torch
import torch.nn.functional as F
from .td import generalized_lambda_returns
from ding.hpc_rl import hpc_wrapper
def tb_cross_entropy(logit, label, mask=None):
assert (len(label.shape) >= 2)
T, B = label.shape[:2]
# Special 2D case
if len(label.shape) > 2:
assert len(label.shape) == 3
s, n = logit.shape[-2:]
logit = logit.reshape(-1, n)
label = label.reshape(-1)
ce = -F.cross_entropy(logit, label, reduction='none')
ce = ce.view(T * B, -1)
if mask is not None:
ce *= mask.reshape(-1, s)
ce = ce.sum(dim=1)
ce = ce.reshape(T, B)
else:
label = label.reshape(-1)
logit = logit.reshape(-1, logit.shape[-1])
ce = -F.cross_entropy(logit, label, reduction='none')
ce = ce.reshape(T, B, -1)
ce = ce.mean(dim=2)
return ce
def upgo_returns(rewards: torch.Tensor, bootstrap_values: torch.Tensor) -> torch.Tensor:
r"""
Overview:
Computing UPGO return targets. Also notice there is no special handling for the terminal state.
Arguments:
- rewards (:obj:`torch.Tensor`): the returns from time step 0 to T-1, \
of size [T_traj, batchsize]
- bootstrap_values (:obj:`torch.Tensor`): estimation of the state value at step 0 to T, \
of size [T_traj+1, batchsize]
Returns:
- ret (:obj:`torch.Tensor`): Computed lambda return value for each state from 0 to T-1, \
of size [T_traj, batchsize]
"""
# UPGO can be viewed as a lambda return! The trace continues for V_t (i.e. lambda = 1.0) if r_tp1 + V_tp2 > V_tp1.
# as the lambdas[-1, :] is ignored in generalized_lambda_returns, we don't care about bootstrap_values_tp2[-1]
lambdas = (rewards + bootstrap_values[1:]) >= bootstrap_values[:-1]
lambdas = torch.cat([lambdas[1:], torch.ones_like(lambdas[-1:])], dim=0)
return generalized_lambda_returns(bootstrap_values, rewards, 1.0, lambdas)
@hpc_wrapper(
shape_fn=lambda args: args[0].shape,
namedtuple_data=True,
include_args=5,
include_kwargs=['target_output', 'rhos', 'action', 'rewards', 'bootstrap_values']
)
def upgo_loss(
target_output: torch.Tensor,
rhos: torch.Tensor,
action: torch.Tensor,
rewards: torch.Tensor,
bootstrap_values: torch.Tensor,
mask=None
) -> torch.Tensor:
r"""
Overview:
Computing UPGO loss given constant gamma and lambda. There is no special handling for terminal state value,
if the last state in trajectory is the terminal, just pass a 0 as bootstrap_terminal_value.
Arguments:
- target_output (:obj:`torch.Tensor`): the output computed by the target policy network, \
of size [T_traj, batchsize, n_output]
- rhos (:obj:`torch.Tensor`): the importance sampling ratio, of size [T_traj, batchsize]
- action (:obj:`torch.Tensor`): the action taken, of size [T_traj, batchsize]
- rewards (:obj:`torch.Tensor`): the returns from time step 0 to T-1, of size [T_traj, batchsize]
- bootstrap_values (:obj:`torch.Tensor`): estimation of the state value at step 0 to T, \
of size [T_traj+1, batchsize]
Returns:
- loss (:obj:`torch.Tensor`): Computed importance sampled UPGO loss, averaged over the samples, of size []
"""
# discard the value at T as it should be considered in the next slice
with torch.no_grad():
returns = upgo_returns(rewards, bootstrap_values)
advantages = rhos * (returns - bootstrap_values[:-1])
metric = tb_cross_entropy(target_output, action, mask)
assert (metric.shape == action.shape[:2])
losses = advantages * metric
return -losses.mean()
| [
"torch.no_grad",
"torch.nn.functional.cross_entropy",
"torch.ones_like"
] | 1.3.1 | uuid0000/DI-engine | cc2713fa01e5288bae21cfeb595729d665e092d1 |
1.4 | import torch
import os
from PIL import Image
from xml.dom.minidom import parse
import numpy as np
import utilities.transforms as T
class FacialDataset(object):
def __init__(self, root, transforms, train=True):
self.root = root
self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.train = train
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
self.annotations = list(sorted(os.listdir(os.path.join(root, "annotations"))))
def __getitem__(self, idx):
# load images
img_path = os.path.join(self.root+"/images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotation_path = os.path.join(self.root+"/annotations", self.annotations[idx])
dom = parse(annotation_path)
root = dom.documentElement
objects = root.getElementsByTagName("object")
size = root.getElementsByTagName("size")[0]
image_width = int(size.getElementsByTagName("width")[0].childNodes[0].data)
image_height = int(size.getElementsByTagName("height")[0].childNodes[0].data)
masks = np.zeros((len(objects), image_width, image_height))
boxes = []
labels = []
box_num = 0
for box in objects:
cls_name = box.getElementsByTagName("name")[0].childNodes[0].data
xmin = int(box.getElementsByTagName("xmin")[0].childNodes[0].data)
ymin = int(box.getElementsByTagName("ymin")[0].childNodes[0].data)
xmax = int(box.getElementsByTagName("xmax")[0].childNodes[0].data)
ymax = int(box.getElementsByTagName("ymax")[0].childNodes[0].data)
boxes.append([xmin, ymin, xmax, ymax])
if cls_name=="without_mask":
labels.append(1)
else:
labels.append(2)
for i in range(xmin, min(xmax+1, image_width)):
for j in range(ymin, min(ymax+1, image_height)):
masks[box_num, i, j] = 1
box_num += 1
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = torch.as_tensor((boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]), dtype=torch.float32)
# iscrowd is needed in evaluation, which converts everything into coco datatype
iscrowd = torch.zeros((len(objects),), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
def get_transform(horizontal_flip):
transforms = []
# converts the image, a PIL image, into a PyTorch Tensor
transforms.append(T.ToTensor())
if horizontal_flip:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms) | [
"torch.as_tensor",
"torch.tensor"
] | 1.4.0 | jerinka/traffic_sign_frcnn | 3a6cd77af965ea88de61d6718a4f539f58b46a13 |
1.5 | from PIL import Image
import requests
import matplotlib.pyplot as plt
import torch
from torch import nn
from torchvision.models import resnet50
import torchvision.transforms as T
torch.set_grad_enabled(False);
import gradio as gr
import io
model = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=True)
# COCO classes
CLASSES = [
'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',
'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',
'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',
'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush'
]
# colors for visualization
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
[0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
# standard PyTorch mean-std input image normalization
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def fig2img(fig):
"""Convert a Matplotlib figure to a PIL Image and return it"""
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
return Image.open(buf)
def plot_results(pil_img, prob, boxes):
plt.figure(figsize=(16,10))
plt.imshow(pil_img)
ax = plt.gca()
colors = COLORS * 100
for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
fill=False, color=c, linewidth=3))
cl = p.argmax()
text = f'{CLASSES[cl]}: {p[cl]:0.2f}'
ax.text(xmin, ymin, text, fontsize=15,
bbox=dict(facecolor='yellow', alpha=0.5))
plt.axis('off')
return fig2img(plt)
def detr(im):
# mean-std normalize the input image (batch-size: 1)
img = transform(im).unsqueeze(0)
# propagate through the model
outputs = model(img)
# keep only predictions with 0.7+ confidence
probas = outputs['pred_logits'].softmax(-1)[0, :, :-1]
keep = probas.max(-1).values > 0.9
# convert boxes from [0; 1] to image scales
bboxes_scaled = rescale_bboxes(outputs['pred_boxes'][0, keep], im.size)
return plot_results(im, probas[keep], bboxes_scaled)
inputs = gr.inputs.Image(type='pil', label="Original Image")
outputs = gr.outputs.Image(type="pil",label="Output Image")
examples = [
['horses.jpg'],
['pandas.jpg']
]
title = "DETR"
description = "demo for Facebook DETR. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2005.12872'>End-to-End Object Detection with Transformers</a> | <a href='https://github.com/facebookresearch/detr'>Github Repo</a></p>"
gr.Interface(detr, inputs, outputs, title=title, description=description, article=article, examples=examples).launch() | [
"torch.stack",
"torch.tensor",
"torch.set_grad_enabled",
"torch.hub.load"
] | 1.5.0 | AK391/detr | 112396eec6b70b6bd1bb180d91c6e3b0e391cdc6 |
1.1 | """
The trainer handles all the logic for running a val loop, training loop, distributing, etc.. .
"""
import os
import sys
import warnings
import logging
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tqdm
from torch.optim.optimizer import Optimizer
from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin
from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin
from pytorch_lightning.trainer.distrib_parts import (
TrainerDPMixin,
parse_gpu_ids,
determine_root_gpu_device
)
from pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin
from pytorch_lightning.trainer.trainer_io import TrainerIOMixin
from pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin
from pytorch_lightning.utilities.debugging import MisconfigurationException
try:
from apex import amp
APEX_AVAILABLE = True
except ImportError:
APEX_AVAILABLE = False
class Trainer(TrainerIOMixin,
TrainerDPMixin,
TrainerDDPMixin,
TrainerLoggingMixin,
TrainerModelHooksMixin,
TrainerTrainingTricksMixin,
TrainerDataLoadingMixin,
TrainerAMPMixin,
TrainerEvaluationLoopMixin,
TrainerTrainLoopMixin,
TrainerCallbackConfigMixin,
):
def __init__(
self,
logger=True,
checkpoint_callback=True,
early_stop_callback=True,
default_save_path=None,
gradient_clip_val=0,
gradient_clip=None, # backward compatible, todo: remove in v0.8.0
process_position=0,
nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0
num_nodes=1,
gpus=None,
log_gpu_memory=None,
show_progress_bar=True,
overfit_pct=0.0,
track_grad_norm=-1,
check_val_every_n_epoch=1,
fast_dev_run=False,
accumulate_grad_batches=1,
max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
max_num_epochs=1000,
min_num_epochs=1,
train_percent_check=1.0,
val_percent_check=1.0,
test_percent_check=1.0,
val_check_interval=1.0,
log_save_interval=100,
row_log_interval=10,
add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0
distributed_backend=None,
use_amp=False,
print_nan_grads=False,
weights_summary='full',
weights_save_path=None,
amp_level='O1',
nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0
num_sanity_val_steps=5,
truncated_bptt_steps=None,
resume_from_checkpoint=None,
):
"""
:param logger: Logger for experiment tracking
:param checkpoint_callback: Callback for checkpointing
:param early_stop_callback: Callback for early stopping
:param str default_save_path: Default path for logs+weights if no logger/ckpt_callback passed
:param int gradient_clip_val: 0 means don't clip.
:param int gradient_clip: 0 means don't clip. Deprecated.
:param process_position: shown in the tqdm bar
:param int num_nodes: number of GPU nodes
:param list|str|int gpus: int. (ie: 2 gpus) OR list to specify which GPUs [0, 1] OR '0,1'
OR '-1' / -1 to use all available gpus
:param str log_gpu_memory: None, 'min_max', 'all'
:param bool show_progress_bar: If true shows tqdm bar
:param float overfit_pct: uses this much of all datasets
:param int track_grad_norm: -1 no tracking. Otherwise tracks that norm
:param int check_val_every_n_epoch: check val every n train epochs
:param bool fast_dev_run: runs full iteration over everything to find bugs
:param int accumulate_grad_batches: Accumulates grads every k batches
:param int max_num_epochs:
:param int min_num_epochs:
:param int train_percent_check: How much of train set to check
:param int val_percent_check: How much of val set to check
:param int test_percent_check: How much of test set to check
:param float|int val_check_interval: If float, % of tng epoch. If int, check every n batch
:param int log_save_interval: Writes logs to disk this often
:param int row_log_interval: How often to add logging rows
:param int add_row_log_interval: How often to add logging rows. Deprecated.
:param str distributed_backend: Options: 'dp', 'ddp', 'ddp2'.
:param bool use_amp: If true uses apex for 16bit precision
:param bool print_nan_grads: Prints nan gradients
:param str weights_summary: Options: 'full', 'top', None to not print.
:param bool weights_save_path: Where to save weights if on cluster
:param str amp_level: Check nvidia docs for level
:param int num_sanity_val_steps: How many val steps before a full train loop.
:param int truncated_bptt_steps: Enables multiple backward passes for each batch.
.. warning:: Following arguments become deprecated and they will be removed in v0.8.0:
- `gradient_clip`,
- `nb_gpu_nodes`,
- `max_nb_epochs`,
- `min_nb_epochs`,
- `add_row_log_interval`,
- `nb_sanity_val_steps`
"""
# Transfer params
if nb_gpu_nodes is not None: # Backward compatibility
warnings.warn("`nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not num_nodes: # in case you did not set the proper value
num_nodes = nb_gpu_nodes
self.num_gpu_nodes = num_nodes
self.log_gpu_memory = log_gpu_memory
if gradient_clip is not None: # Backward compatibility
warnings.warn("`gradient_clip` has renamed to `gradient_clip_val` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not gradient_clip_val: # in case you did not set the proper value
gradient_clip_val = gradient_clip
self.gradient_clip_val = gradient_clip_val
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
self.process_position = process_position
self.weights_summary = weights_summary
if max_nb_epochs is not None: # Backward compatibility
warnings.warn("`max_nb_epochs` has renamed to `max_num_epochs` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not max_num_epochs: # in case you did not set the proper value
max_num_epochs = max_nb_epochs
self.max_num_epochs = max_num_epochs
if min_nb_epochs is not None: # Backward compatibility
warnings.warn("`min_nb_epochs` has renamed to `min_num_epochs` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not min_num_epochs: # in case you did not set the proper value
min_num_epochs = min_nb_epochs
self.min_num_epochs = min_num_epochs
if nb_sanity_val_steps is not None: # Backward compatibility
warnings.warn("`nb_sanity_val_steps` has renamed to `num_sanity_val_steps` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not num_sanity_val_steps: # in case you did not set the proper value
num_sanity_val_steps = nb_sanity_val_steps
self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.truncated_bptt_steps = truncated_bptt_steps
self.resume_from_checkpoint = resume_from_checkpoint
self.shown_warnings = set()
self.fast_dev_run = fast_dev_run
if self.fast_dev_run:
self.num_sanity_val_steps = 1
self.max_num_epochs = 1
m = '''
Running in fast_dev_run mode: will run a full train,
val loop using a single batch
'''
logging.info(m)
# set default save path if user didn't provide one
self.default_save_path = default_save_path
if self.default_save_path is None:
self.default_save_path = os.getcwd()
# training bookeeping
self.total_batch_idx = 0
self.running_loss = []
self.avg_loss = 0
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.get_train_dataloader = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
self.is_iterable_train_dataloader = False
# training state
self.model = None
self.testing = False
self.lr_schedulers = []
self.optimizers = None
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
# configure early stop callback
# creates a default one if none passed in
self.early_stop_callback = None
self.configure_early_stopping(early_stop_callback, logger)
self.reduce_lr_on_plateau_scheduler = None
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.weights_save_path = weights_save_path
# accumulated grads
self.configure_accumulated_gradients(accumulate_grad_batches)
# allow int, string and gpu list
self.data_parallel_device_ids = parse_gpu_ids(gpus)
self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)
# distributed backend choice
self.use_ddp = False
self.use_ddp2 = False
self.use_dp = False
self.single_gpu = False
self.distributed_backend = distributed_backend
self.set_distributed_mode(distributed_backend, num_nodes)
# init flags for SLURM+ddp to work
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
self.configure_slurm_ddp(num_nodes)
# nvidia setup
self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
if add_row_log_interval is not None:
# backward compatibility
warnings.warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not row_log_interval: # in case you did not set the proper value
row_log_interval = add_row_log_interval
self.row_log_interval = row_log_interval
# how much of the data to use
self.determine_data_use_amount(train_percent_check, val_percent_check,
test_percent_check, overfit_pct)
# 16 bit mixed precision training using apex
self.amp_level = amp_level
self.init_amp(use_amp)
@property
def slurm_job_id(self):
try:
job_id = os.environ['SLURM_JOB_ID']
job_id = int(job_id)
except Exception:
job_id = None
return job_id
def __parse_gpu_ids(self, gpus):
"""Parse GPUs id.
:param list|str|int gpus: input GPU ids
:return list(int):
"""
# if gpus = -1 then use all available devices
# otherwise, split the string using commas
if gpus is not None:
if isinstance(gpus, list):
gpus = gpus
elif isinstance(gpus, str):
if gpus == '-1':
gpus = list(range(0, torch.cuda.device_count()))
else:
gpus = [int(x.strip()) for x in gpus.split(',')]
elif isinstance(gpus, int):
gpus = gpus
else:
raise ValueError('`gpus` has to be a string, int or list of ints')
return gpus
def __set_root_gpu(self, gpus):
if gpus is None:
return None
# set root gpu
root_gpu = 0
if type(gpus) is list:
root_gpu = gpus[0]
return root_gpu
@property
def num_gpus(self):
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
else:
return len(gpus)
@property
def data_parallel(self):
return self.use_dp or self.use_ddp or self.use_ddp2
@property
def training_tqdm_dict(self):
"""Read-only for tqdm metrics.
:return:
"""
tqdm_dict = {
'loss': '{0:.3f}'.format(self.avg_loss),
'batch_idx': '{}'.format(self.batch_idx),
}
if self.truncated_bptt_steps is not None:
tqdm_dict['split_idx'] = self.split_idx
if self.logger is not None and self.logger.version is not None:
tqdm_dict['v_nb'] = self.logger.version
tqdm_dict.update(self.tqdm_metrics)
if self.on_gpu:
tqdm_dict['gpu'] = '{}'.format(torch.cuda.current_device())
return tqdm_dict
@property
def tng_tqdm_dic(self):
"""Read-only for tqdm metrics.
.. warning:: Deprecated in v0.5.0. use training_tqdm_dict instead.
:return:
"""
warnings.warn("`tng_tqdm_dic` has renamed to `training_tqdm_dict` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
return self.training_tqdm_dict
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(self, model):
# when using multi-node or DDP within a node start each module in a separate process
if self.use_ddp2:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
elif self.use_ddp:
if self.is_slurm_managing_tasks:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
else:
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
# 1 gpu or dp option triggers training using DP module
# easier to avoid NCCL issues
elif self.use_dp:
self.dp_train(model)
elif self.single_gpu:
self.single_gpu_train(model)
# ON CPU
else:
# run through amp wrapper
if self.use_amp:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
self.run_pretrain_routine(model)
# return 1 when finished
# used for testing or when we need to know that training succeeded
return 1
def init_optimizers(self, optimizers):
# single optimizer
if isinstance(optimizers, Optimizer):
return [optimizers], []
# two lists
elif len(optimizers) == 2 and isinstance(optimizers[0], list):
optimizers, lr_schedulers = optimizers
lr_schedulers, self.reduce_lr_on_plateau_scheduler = self.configure_schedulers(lr_schedulers)
return optimizers, lr_schedulers
# single list or tuple
elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
return optimizers, []
def configure_schedulers(self, schedulers):
for i, scheduler in enumerate(schedulers):
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
reduce_lr_on_plateau_scheduler = schedulers.pop(i)
return schedulers, reduce_lr_on_plateau_scheduler
return schedulers, None
def run_pretrain_routine(self, model):
"""Sanity check a few things before starting actual training.
:param model:
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# link up experiment object
if self.logger is not None:
ref_model.logger = self.logger
# save exp to get started
if hasattr(ref_model, "hparams"):
self.logger.log_hyperparams(ref_model.hparams)
self.logger.save()
if self.use_ddp or self.use_ddp2:
dist.barrier()
# set up checkpoint callback
self.configure_checkpoint_callback()
# register auto-resubmit when on SLURM
self.register_slurm_signal_handlers()
# transfer data loaders from model
self.get_dataloaders(ref_model)
# print model summary
if self.proc_rank == 0 and self.weights_summary is not None:
if self.weights_summary in ['full', 'top']:
ref_model.summarize(mode=self.weights_summary)
else:
m = "weights_summary can be None, 'full' or 'top'"
raise MisconfigurationException(m)
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# restore training and model before hpc call
self.restore_weights(model)
# when testing requested only run test and return
if self.testing:
self.run_evaluation(test=True)
return
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
if self.get_val_dataloaders() is not None and self.num_sanity_val_steps > 0:
# init progress bars for validation sanity check
pbar = tqdm.tqdm(desc='Validation sanity check', total=self.num_sanity_val_steps,
leave=False, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm.tqdm(disable=True)
self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
# init progress bar
pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
file=sys.stdout)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model=None):
self.testing = True
if model is not None:
self.fit(model)
else:
self.run_evaluation(test=True)
| [
"torch.multiprocessing.spawn",
"torch.cuda.current_device",
"torch.cuda.device_count",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.distributed.barrier"
] | 1.1 | YehCF/pytorch-lightning | 6666ca5af39aa2d3e5a483da3d7f6bb76514cc9f |
1.6 | import torch
from torch.utils.data import Dataset
from .data_utils import get_tokenizer, natural_sort, skip, FixedSizeOrderedDict
import random
import glob
import tensorflow as tf
import re
import logging
from itertools import cycle
import os
import subprocess
import simdjson as json
import hub
class HubAdapter(torch.utils.data.Dataset):
def __init__(self, ods):
self.ds = ods
@classmethod
def __instancecheck__(cls, instance):
return isinstance(instance, torch.utils.data.Dataset)
def __len__(self):
return len(self.ds)
# def __iter__(self):
# for i in range(len(self)):
# yield self[i]
def __getitem__(self, index):
x = self.ds.__getitem__(index)
return x['text'][:1024]
def get_hub_dataset():
schema = hub.schema.SchemaDict({'text': hub.schema.Tensor(shape=(None,), dtype='int64', max_shape=(2049,))})
ds = hub.Dataset("snsi/pile_train0", schema=schema, shape=(100000,)).to_pytorch()
# ds = hub.Dataset("interneuron/pile_train0", shape=(None,)).to_pytorch()
return HubAdapter(ds)
class GPT2Dataset(Dataset):
def __init__(self, glob_pattern, seq_len, seed=1, shuffle_input_filenames=True, pretokenized=True,
filetype="tfrecords", mode="chunks", train=True, tokenizer=None, **kwargs):
super().__init__()
self.files = glob.glob(glob_pattern) # glob pattern pointing to files
self.seed = seed # random seed for shuffling
# shuffle or sort files
if shuffle_input_filenames:
random.seed(self.seed)
random.shuffle(self.files)
else:
self.files = natural_sort(self.files)
self.filetype = filetype # filetype ["tfrecords"]
implemented_filetypes = ["tfrecords"]
if self.filetype not in implemented_filetypes:
raise NotImplementedError
self.processed_files = FixedSizeOrderedDict(max=1) # storage for lazily loading data
# parses the length of the files, either by encoding in the filenames or by iterating over them
self._get_lens()
self.seq_len = seq_len # set sequence length
self.mode = mode # set mode ["chunks"]
implemented_modes = ["chunks"]
if self.mode not in implemented_modes:
raise NotImplementedError
self.pretokenized = pretokenized
if not self.pretokenized:
raise NotImplementedError # TODO: tokenize text data on the fly
self.train = train
def _get_number_of_documents(self, filename):
# extracts number of files from a filename formatted "<name>_<num_documents>.{filetype}."
# if no pattern is matched, returns None
match = re.search("_(\d{1,})." + self.filetype + "$", filename)
return int(match.group(1)) if match is not None else match
def _get_number_of_documents_by_iteration(self, filename):
# extracts number of files from a tfrecord document in the event it doesn't have metadata in the filename
# this could be very slow.
logging.warning(
"Found no metadata found in filename - iterating through first tfrecord to find global length")
count = 0
if self.filetype == "tfrecords":
for _ in tf.io.tf_record_iterator(filename):
count += 1
return count
def _get_lens(self):
lens = []
for f in self.files:
n_documents = self._get_number_of_documents(f)
if n_documents is None:
n_documents = self._get_number_of_documents_by_iteration(f)
lens.append(n_documents)
self.lens = lens
self._len = sum(self.lens)
def _parse_function(self, example_proto):
features = {
"text": tf.io.VarLenFeature(tf.int64)
}
parsed_features = tf.io.parse_single_example(example_proto, features)
return tf.sparse.to_dense(parsed_features["text"], parsed_features["text"].dense_shape[0])
def _process_tfrecord(self, tfrecords_file, resume_idx=None):
dataset = tf.data.TFRecordDataset([tfrecords_file])
dataset = dataset.map(self._parse_function, num_parallel_calls=1)
for example in dataset.as_numpy_iterator():
yield torch.tensor(example, dtype=torch.long)
def _maybe_process_tfrecord(self, file_idx):
if self.processed_files.get(file_idx) is None:
self.processed_files[file_idx] = list(self._process_tfrecord(self.files[file_idx]))
return self.processed_files[file_idx]
def _seek(self, idx):
cumsum = 0
for count, (f, length) in cycle(enumerate(zip(self.files, self.lens))):
prev_cumsum = cumsum
cumsum += length
if cumsum == idx:
remainder = 0
skip_idx = count + 1
return skip_idx, remainder
elif cumsum > idx:
remainder = idx - prev_cumsum
skip_idx = count
return skip_idx, remainder
def __getitem__(self, idx):
# seek to correct chunk
seek_idx, remainder = self._seek(idx)
f = self.files[seek_idx]
if self.filetype == "tfrecords":
chunk = self._maybe_process_tfrecord(
seek_idx) # parses tfrecord file to a list *once* then stores in memory
else:
raise NotImplementedError
return chunk[remainder] # get item from current chunk
def __len__(self):
return self._len
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
class DynamicDataset(Dataset):
def __init__(self, input_files, tokenizer, max_seq_len, target_field='text', seed=1, shuffle_files=True, **kwargs):
super().__init__()
self.files = []
self.setup_files(input_files)
if shuffle_files:
random.seed(seed)
random.shuffle(self.files)
self.create_pipeline()
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.target_field = target_field
self.parser = json.Parser()
self.idx = 0
def setup_files(self, input_files):
if isinstance(input_files, str):
if input_files.endswith('*'):
self.files = glob.glob(input_files)
elif os.path.isdir(input_files):
self.files = glob.glob(os.path.join(input_files, '*'))
elif isinstance(input_files, list):
for file_path in input_files:
if os.path.isfile(file_path) and os.path.exists(file_path):
self.files.append(file_path)
elif file_path.endswith('*'):
self.files.extend(glob.glob(file_path))
elif os.path.isdir(file_path):
self.files.extend(glob.glob(os.path.join(file_path, '*')))
self.total_files = len(self.files)
self.file_idx, self.total_lines = {}, 0
for file_path in self.files:
total_lines = self.total_lines_in_file(file_path)
self.file_idx[file_path] = total_lines
self.total_lines += total_lines
logging.info(f'Total Files: {self.total_files}. Total Lines: {self.total_lines}')
def create_pipeline(self):
self.pipeline = tf.data.TextLineDataset(self.files, num_parallel_reads=tf.data.experimental.AUTOTUNE).as_numpy_iterator()
def parse_json(self, line):
try:
return self.parser.parse(line).as_dict()
except ValueError:
return line
@classmethod
def total_lines_in_file(cls, file_path):
return int(subprocess.check_output(['wc', '-l', file_path]).split()[0])
def tokenize_example(self, ex):
self.idx += 1
return self.tokenizer(ex[self.target_field], max_length=self.max_seq_len, truncation=True, return_tensors='pt')['input_ids']
def __getitem__(self, idx):
try:
ex = next(self.pipeline)
except StopIteration:
del self.pipeline
self.create_pipeline()
ex = next(self.pipeline)
return self.tokenize_example(self.parse_json(ex))
def __len__(self):
return self.total_lines | [
"torch.tensor"
] | 1.6 | raijinspecial/gpt-neox | 89749e0b76938fa1ff84a3dd1cbcbe64521d861b |
1.0 | import json
import argparse
import os
import time
import torch
import numpy as np
from scipy.spatial.distance import cosine
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression
import chart_studio
import chart_studio.plotly as py
import plotly.io as pio
import plotly.graph_objects as go
import plotly.figure_factory as ff
chart_studio.tools.set_credentials_file(username=
'jxhe', api_key='Bm0QOgX4fQf3bULtkpzZ')
chart_studio.tools.set_config_file(world_readable=True,
sharing='public')
def read_input(keys, vals):
def parse_fname(fname):
x = '.'.join(fname.split('.')[:-1])
x = x.split('/')[-1]
x = x.split('.')
size = int(x[-2].split('size')[-1])
embed = int(x[-1].split('hid')[-1])
return size, embed
size, embed = parse_fname(keys)
keys = np.memmap(keys,
dtype=np.float32,
mode='r',
shape=(size, embed))
val_data = []
with open(vals, 'r') as fin:
for line in fin:
s = json.loads(line.strip())
if s[0] != ' ' and s != '\n':
s = f'@{s}'
val_data.append(s)
return keys, val_data
def plot_html(keys,
vals,
start_id,
length,
output_dir,
fid='',
vis_feat='l2',
extra=None):
vis_size = length
vis_shape = (vis_size // 20, 20)
num_vis = vis_shape[0] * vis_shape[1]
symbol = np.reshape(vals[:num_vis], vis_shape)
keys = keys[:num_vis]
def cosine_dist(keys):
dist = [cosine(keys[1], keys[0])]
for i in range(1, num_vis):
dist.append(cosine(keys[i], keys[i-1]))
return np.array(dist)
def l2_dist(keys):
diff = [keys[1] - keys[0]]
for i in range(1, num_vis):
diff.append(keys[i] - keys[i-1])
diff = np.array(diff)
return np.linalg.norm(diff, axis=-1)
def hyper_z(keys):
index = extra['model'].coef_[0].nonzero()[0]
print(f'{index.shape[0]} neurons')
w = extra['model'].coef_[0][index]
x = keys[:, index]
b = extra['model'].intercept_[0]
# the distance between x0 and hyperplane wx+b=0 is
# |wx0+b| / |w|
return (np.dot(x, w) + b) / np.sqrt(np.dot(w, w))
# import pdb; pdb.set_trace()
if vis_feat == 'hyper_z':
hyper_z = hyper_z(keys)
else:
hyper_z = None
l2_d = l2_dist(keys)
cosine_d = cosine_dist(keys)
norm = np.linalg.norm(keys, axis=-1)
shift_norm = np.zeros(norm.shape)
shift_norm[1:] = norm[:-1]
shift_norm[0] = shift_norm[1]
relative_l2 = l2_d / shift_norm
if vis_feat == 'hyper_z':
hyper_z = np.reshape(hyper_z, vis_shape)
else:
hyper_z = None
l2_d = np.reshape(l2_d, vis_shape)
cosine_d = np.reshape(cosine_d, vis_shape)
norm = np.reshape(norm, vis_shape)
relative_l2 = np.reshape(relative_l2, vis_shape)
# Display element name and atomic mass on hover
hover=[]
for i in range(vis_shape[0]):
local = []
for j in range(vis_shape[1]):
text = ''
text += f'norm: {norm[i, j]:.3f} <br>'
text += f'l2_d: {l2_d[i, j]:.3f} <br>'
text += f'relative_l2_d: {relative_l2[i, j]:.3f} <br>'
text += f'cosine_d: {cosine_d[i, j]:.3f} <br>'
text += f'hyper_z: {hyper_z[i, j]:.3f} <br>' if hyper_z is not None else 'none'
local.append(text)
hover.append(local)
# Invert Matrices
symbol = symbol[::-1]
hover = hover[::-1]
plot_args = {'colorscale': 'inferno'}
if vis_feat == 'l2':
z = l2_d
elif vis_feat == 'cosine':
z = cosine_d
z[z<0.15] = 0.15
elif vis_feat == 'norm':
z = norm
elif vis_feat == 'relative_l2':
z = relative_l2
z[z<0.1] = 1000
z[z==1000] = 1
elif vis_feat == 'hyper_z':
z = hyper_z
# z = z.flatten()
z = np.clip(z, -2.5, 2.5)
plot_args = {'colorscale': 'RdYlGn', 'font_colors': ['black']}
else:
raise ValueError
z = z[::-1]
# Set Colorscale
# colorscale=[[0.0, 'rgb(255,255,255)'], [.2, 'rgb(255, 255, 153)'],
# [.4, 'rgb(153, 255, 204)'], [.6, 'rgb(179, 217, 255)'],
# [.8, 'rgb(240, 179, 255)'],[1.0, 'rgb(255, 77, 148)']]
# Make Annotated Heatmap
fig = ff.create_annotated_heatmap(z, annotation_text=symbol, text=hover,
hoverinfo='text', **plot_args)
fig.update_layout(title_text=f'gpt2-large visualizing {vis_feat} from {fid}')
fid = fid.split('/')[-1]
fid = '.'.join(fid.split('.')[:-1])
if output_dir:
pio.write_html(fig, file=os.path.join(output_dir, f'gpt2_vis_{vis_feat}_{fid}.html'))
else:
py.plot(fig, filename=f'gpt2_vis_{vis_feat}_{fid}.html')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--vis-feat', type=str, default='l2',
choices=['l2', 'cosine', 'hyper_z'],
help='the feature used to reflect colors of the heatmap')
parser.add_argument('--key', type=str, default='features.jsonl',
help='the input jsonl file')
parser.add_argument('--val', type=str, default='features.jsonl',
help='the input jsonl file')
parser.add_argument('--max-tok', type=int, default=1e5,
help='maximum number of tokens to visualize')
parser.add_argument('--output_dir', type=str, default=None,
help='the output html directory. If not set, the figures would \
be uploaded to plotly chart studio')
parser.add_argument('--extra-path', type=str, default=None,
help='some extra files to load for visualization, depending \
on the specific mode of vis_feat')
# parser.add_argument('--prefix', type=str, default='',
# help='the prefix of outputs files, to distinguish in case')
args = parser.parse_args()
np.random.seed(22)
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
print('reading features')
start = time.time()
keys, vals = read_input(args.key, args.val)
print(f'reading features complete, costing {time.time() - start} seconds')
length = min(len(keys), args.max_tok)
keys = keys[:length]
vals = vals[:length]
extra = torch.load(args.extra_path) if args.extra_path is not None else None
plot_html(keys, vals, 0, length, args.output_dir,
vis_feat=args.vis_feat, fid=args.key, extra=extra)
| [
"torch.load"
] | 1.0 | MGheini/unify-parameter-efficient-tuning | 3222ce2c0079566a28043e22380eb4ab6ad14389 |
1.9 | import torch.nn as nn
from torch.autograd import Variable
from collections import defaultdict
from .layers.PRM import Residual as ResidualPyramid
from .layers.Residual import Residual as Residual
from common.arguments import sppe_args
opt = sppe_args()
class Hourglass(nn.Module):
def __init__(self, n, nFeats, nModules, inputResH, inputResW, net_type, B, C):
super(Hourglass, self).__init__()
self.ResidualUp = ResidualPyramid if n >= 2 else Residual
self.ResidualDown = ResidualPyramid if n >= 3 else Residual
self.depth = n
self.nModules = nModules
self.nFeats = nFeats
self.net_type = net_type
self.B = B
self.C = C
self.inputResH = inputResH
self.inputResW = inputResW
self.up1 = self._make_residual(self.ResidualUp, False, inputResH, inputResW)
self.low1 = nn.Sequential(
nn.MaxPool2d(2),
self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
)
if n > 1:
self.low2 = Hourglass(n - 1, nFeats, nModules, inputResH / 2, inputResW / 2, net_type, B, C)
else:
self.low2 = self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
self.low3 = self._make_residual(self.ResidualDown, True, inputResH / 2, inputResW / 2)
self.up2 = nn.UpsamplingNearest2d(scale_factor=2)
self.upperBranch = self.up1
self.lowerBranch = nn.Sequential(
self.low1,
self.low2,
self.low3,
self.up2
)
def _make_residual(self, resBlock, useConv, inputResH, inputResW):
layer_list = []
for i in range(self.nModules):
layer_list.append(resBlock(self.nFeats, self.nFeats, inputResH, inputResW,
stride=1, net_type=self.net_type, useConv=useConv,
baseWidth=self.B, cardinality=self.C))
return nn.Sequential(*layer_list)
def forward(self, x: Variable):
up1 = self.upperBranch(x)
up2 = self.lowerBranch(x)
out = up1 + up2
return out
class PyraNet(nn.Module):
def __init__(self):
super(PyraNet, self).__init__()
B, C = opt.baseWidth, opt.cardinality
self.inputResH = opt.inputResH / 4
self.inputResW = opt.inputResW / 4
self.nStack = opt.nStack
self.cnv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(True)
)
self.r1 = nn.Sequential(
ResidualPyramid(64, 128, opt.inputResH / 2, opt.inputResW / 2,
stride=1, net_type='no_preact', useConv=False, baseWidth=B, cardinality=C),
nn.MaxPool2d(2)
)
self.r4 = ResidualPyramid(128, 128, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
self.r5 = ResidualPyramid(128, opt.nFeats, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
self.preact = nn.Sequential(
self.cnv1,
self.r1,
self.r4,
self.r5
)
self.stack_layers = defaultdict(list)
for i in range(self.nStack):
hg = Hourglass(4, opt.nFeats, opt.nResidual, self.inputResH, self.inputResW, 'preact', B, C)
lin = nn.Sequential(
hg,
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True),
nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True)
)
tmpOut = nn.Conv2d(opt.nFeats, opt.nClasses, kernel_size=1, stride=1, padding=0)
self.stack_layers['lin'].append(lin)
self.stack_layers['out'].append(tmpOut)
if i < self.nStack - 1:
lin_ = nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0)
tmpOut_ = nn.Conv2d(opt.nClasses, opt.nFeats, kernel_size=1, stride=1, padding=0)
self.stack_layers['lin_'].append(lin_)
self.stack_layers['out_'].append(tmpOut_)
def forward(self, x: Variable):
out = []
inter = self.preact(x)
for i in range(self.nStack):
lin = self.stack_layers['lin'][i](inter)
tmpOut = self.stack_layers['out'][i](lin)
out.append(tmpOut)
if i < self.nStack - 1:
lin_ = self.stack_layers['lin_'][i](lin)
tmpOut_ = self.stack_layers['out_'][i](tmpOut)
inter = inter + lin_ + tmpOut_
return out
def createModel(**kw):
model = PyraNet()
return model
| [
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.UpsamplingNearest2d"
] | 1.9.0 | fsImageries/video-to-pose3D | 098c87ce19dc3331da03e6eac0b9744684eb66f6 |
1.6 | from typing import Iterable, Dict, List
import torch
from einops import rearrange, repeat
from torch import Tensor
from torch import nn
from torch.nn import Identity
from perceiver_pytorch.caching import cache_by_name_fn
from perceiver_pytorch.modalities import InputModality, modality_encoding
from perceiver_pytorch.perceiver_pytorch import PreNorm, Attention, FeedForward, cache_fn, fourier_encode, \
FeedForwardGELU
from perceiver_pytorch.common import build_perceiver_layers
# An implementation of Perceiver that can accept multiple data modalities in the same forward.
class MultiModalityPerceiver(nn.Module):
def __init__(
self,
*,
modalities: Iterable[InputModality],
depth,
num_latents=512,
latent_dim=512,
cross_heads=1,
latent_heads=8,
cross_dim_head=64,
latent_dim_head=64,
num_classes=None,
attn_dropout=0.,
ff_dropout=0.,
weight_tie_layers=False,
num_latent_blocks_per_layer=1,
use_gelu: bool = False,
):
"""
:param modalities:
:param depth: Number of times the perceiver will perform cross-attention between latent and input.
:param num_latents:
:param latent_dim:
:param cross_heads:
:param latent_heads:
:param cross_dim_head:
:param latent_dim_head:
:param num_classes: Number of classes to predict, or if None, return the hidden state (num latents x hidden_dim)
:param attn_dropout:
:param ff_dropout:
:param weight_tie_layers: True: share weights across layers, False no shared weights.
:param num_latent_blocks_per_layer: Number of blocks in the latent transformer.
:param use_gelu: Use GELU activation like the Perceiver preprint indicates. False,
with Lucidrains' GEGLU activation in feed forward instead.
"""
super().__init__()
self.modalities = {modality.name: modality for modality in modalities}
# we encode modality with one hot encoding, so need one dim per modality:
modality_encoding_dim = sum([1 for _ in modalities])
# input_dim is the maximum dimension over all input modalities:
input_dim = max(modality.input_dim for modality in modalities) + modality_encoding_dim
self.max_modality_dim = input_dim
self.latents = nn.Parameter(torch.randn(num_latents, latent_dim))
ff_type = FeedForwardGELU if use_gelu else FeedForward
get_cross_attn = lambda: PreNorm(latent_dim,
Attention(latent_dim, input_dim, heads=cross_heads, dim_head=cross_dim_head,
dropout=attn_dropout), context_dim=input_dim)
get_cross_ff = lambda: PreNorm(latent_dim, ff_type(latent_dim, dropout=ff_dropout))
get_latent_attn = lambda: PreNorm(latent_dim,
Attention(latent_dim, heads=latent_heads, dim_head=latent_dim_head,
dropout=attn_dropout))
get_latent_ff = lambda: PreNorm(latent_dim, ff_type(latent_dim, dropout=ff_dropout))
get_cross_attn, get_cross_ff, get_latent_attn, get_latent_ff = map(cache_by_name_fn, (
get_cross_attn, get_cross_ff, get_latent_attn, get_latent_ff))
self.layers = nn.ModuleList([])
build_perceiver_layers(self.layers, depth, get_cross_attn, get_cross_ff,
get_latent_attn, get_latent_ff,
weight_tie_layers,
num_latent_blocks_per_layer=num_latent_blocks_per_layer)
self.to_logits = nn.Sequential(
nn.LayerNorm(latent_dim),
nn.Linear(latent_dim, num_classes)
)
def forward(self, multi_modality_data: Dict[str, Tensor], mask=None):
"""
:param data: a dictionary where keys are modality names and Tensor contain a batch
of modality input data.
:param mask:
:return:
"""
batch_sizes = set()
num_modalities = len(multi_modality_data)
linearized_data = []
linearized_data_per_layer: Dict[int, List[Tensor]] = {}
for modality_index, modality_name in enumerate(sorted(multi_modality_data.keys())):
assert modality_name in self.modalities, f"modality {modality_name} was not defined in constructor"
data = multi_modality_data[modality_name]
modality = self.modalities[modality_name]
b, *axis, _, device = *data.shape, data.device
assert len(
axis) == modality.input_axis, f'input data must have the right number of for modality {modality_name}. ' \
f'Expected {modality.input_axis} while forward argument offered {len(axis)}'
batch_sizes.add(b)
assert len(batch_sizes) == 1, "batch size must be the same across all modalities"
# calculate fourier encoded positions in the range of [-1, 1], for all axis
# Figure out padding for this modality, given max dimension across all modalities:
padding_size = self.max_modality_dim - modality.input_dim - num_modalities
padding = torch.zeros(size=data.size()[0:-1] + (padding_size,)).to(device)
# concat to channels of data and flatten axis
modality_encodings = modality_encoding(b, axis, modality_index, num_modalities, device=device)
if modality.num_freq_bands > 0:
axis_pos = list(map(lambda size: torch.linspace(-1., 1., steps=size, device=device), axis))
pos = torch.stack(torch.meshgrid(*axis_pos), dim=-1)
enc_pos = fourier_encode(pos, modality.max_freq, modality.num_freq_bands, modality.freq_base)
enc_pos = rearrange(enc_pos, '... n d -> ... (n d)')
enc_pos = repeat(enc_pos, '... -> b ...', b=b)
else:
enc_pos = torch.zeros(data.shape).to(device)
to_concat = (data, padding, enc_pos, modality_encodings)
data = torch.cat(to_concat, dim=-1)
data = rearrange(data, 'b ... d -> b (...) d').to(device)
linearized_data.append(data)
b = batch_sizes.pop()
x = repeat(self.latents, 'n d -> b n d', b=b).to(device)
# Concatenate all the modalities:
data = torch.cat(linearized_data, dim=1)
for cross_attn, cross_ff, latent_transformer in self.layers:
x = cross_attn(x, context=data, mask=mask) + x
x = cross_ff(x) + x
x = latent_transformer(x) + x
x = self.pool(x)
return self.to_logits(x)
def pool(self, x):
"""
Perform pooling over latents.
:param x: batch x num_latents x latent_dim
:return: pooled x
"""
# implement global pooling
return x.mean(dim=-2)
class MultiModalityPerceiverNoPooling(MultiModalityPerceiver):
def __init__(self, *, modalities: Iterable[InputModality], depth,
num_latents=512, latent_dim=512, cross_heads=1,
latent_heads=8, cross_dim_head=64, latent_dim_head=64,
attn_dropout=0., ff_dropout=0.,
weight_tie_layers=False, num_latent_blocks_per_layer=1,
use_gelu: bool = True):
"""
Perceiver that returns hidden state. Makes it possible to configure pooling with
the result of forward.
:param modalities:
:param depth: Number of times the perceiver will perform cross-attention between latent and input.
:param num_latents:
:param latent_dim:
:param cross_heads:
:param latent_heads:
:param cross_dim_head:
:param latent_dim_head:
:param attn_dropout:
:param ff_dropout:
:param weight_tie_layers: True: share weights across layers, False no shared weights.
:param num_latent_blocks_per_layer: Number of blocks in the latent transformer.
:param use_gelu: Use GELU activation like the Perceiver preprint indicates. False,
with Lucidrains' GEGLU activation in feed forward instead.
"""
super().__init__(modalities=modalities, depth=depth, num_latents=num_latents, latent_dim=latent_dim,
cross_heads=cross_heads, latent_heads=latent_heads, cross_dim_head=cross_dim_head,
latent_dim_head=latent_dim_head, attn_dropout=attn_dropout, ff_dropout=ff_dropout,
weight_tie_layers=weight_tie_layers, num_latent_blocks_per_layer=num_latent_blocks_per_layer,
use_gelu=use_gelu, num_classes=1)
self.to_logits = Identity()
def pool(self, x):
"""
Do not pool.
:param x: batch x num_latents x latent_dim
:return: pooled x
"""
# no pooling
return x
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.Identity",
"torch.nn.ModuleList",
"torch.linspace",
"torch.meshgrid",
"torch.randn"
] | 1.6 | frenkiboy/perceiver-pytorch | b07d5154c5dee63684c59f57d02a1b405701845f |
1.0 | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModel
@require_torch
@require_sentencepiece
@require_tokenizers
class BortIntegrationTest(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = AutoModel.from_pretrained("amazon/bort")
model.to(torch_device)
input_ids = torch.tensor(
[[0, 18077, 4082, 7804, 8606, 6195, 2457, 3321, 11, 10489, 16, 269, 2579, 328, 2]],
device=torch_device,
dtype=torch.long,
) # Schloß Nymphenburg in Munich is really nice!
output = model(input_ids)["last_hidden_state"]
expected_shape = torch.Size((1, 15, 1024))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0349, 0.0436, -1.8654], [-0.6964, 0.0835, -1.7393], [-0.9819, 0.2956, -0.2868]]],
device=torch_device,
dtype=torch.float,
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| [
"torch.Size",
"torch.allclose",
"torch.tensor"
] | 1.0 | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import os
import re
import numpy
from .file_utils import ExplicitEnum
from .utils import logging
logger = logging.get_logger(__name__)
class TransposeType(ExplicitEnum):
"""
Possible ...
"""
NO = "no"
SIMPLE = "simple"
CONV2D = "conv2d"
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove="", tf_weight_shape=None):
"""
Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: `TransposeType` member indicating whether and how TF2.0 and PyTorch weights matrices should be
transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
# Some weights have a single name without "/" such as final_logits_bias in BART
if len(tf_name) > 1:
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
if tf_name[-1] == "kernel" and tf_weight_shape is not None and tf_weight_shape.rank == 4:
# A simple heuristic to detect conv layer using weight array shape
transpose = TransposeType.CONV2D
elif bool(
tf_name[-1] in ["kernel", "pointwise_kernel", "depthwise_kernel"]
or "emb_projs" in tf_name
or "out_projs" in tf_name
):
transpose = TransposeType.SIMPLE
else:
transpose = TransposeType.NO
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# The SeparableConv1D TF layer contains two weights that are translated to PyTorch Conv1D here
if tf_name[-1] == "pointwise_kernel" or tf_name[-1] == "depthwise_kernel":
tf_name[-1] = tf_name[-1].replace("_kernel", ".weight")
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info(f"Loading PyTorch weights from {pt_path}")
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters")
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch state_dict in a TF 2.0 model."""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
missing_keys = []
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=symbolic_weight.shape
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
missing_keys.append(name)
continue
elif tf_model._keys_to_ignore_on_load_missing is not None:
# authorized missing keys don't have to be loaded
if any(re.search(pat, name) is not None for pat in tf_model._keys_to_ignore_on_load_missing):
continue
raise AttributeError(f"{name} not found in PyTorch model")
array = pt_state_dict[name].numpy()
if transpose is TransposeType.CONV2D:
# Conv2D weight:
# PT: (num_out_channel, num_in_channel, kernel[0], kernel[1])
# -> TF: (kernel[0], kernel[1], num_in_channel, num_out_channel)
array = numpy.transpose(array, axes=(2, 3, 1, 0))
elif transpose is TransposeType.SIMPLE:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
if list(symbolic_weight.shape) != list(array.shape):
try:
array = numpy.reshape(array, symbolic_weight.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning(f"Initialize TF weight {symbolic_weight.name}")
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info(f"Loaded {tf_loaded_numel:,} parameters in the TF 2.0 model.")
unexpected_keys = list(all_pytorch_weights)
if tf_model._keys_to_ignore_on_load_missing is not None:
for pat in tf_model._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if tf_model._keys_to_ignore_on_load_unexpected is not None:
for pat in tf_model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the PyTorch model were not used when "
f"initializing the TF 2.0 model {tf_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model trained on another task "
f"or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect "
f"to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All PyTorch model weights were used when initializing {tf_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the PyTorch model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {tf_model.__class__.__name__} for predictions without further training."
)
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""
Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see
https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
from .modeling_tf_utils import load_tf_weights
logger.info(f"Loading TensorFlow weights from {tf_checkpoint_path}")
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beginning
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
load_tf_weights(tf_model, tf_checkpoint_path)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
"""Load TF 2.0 model in a pytorch model"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
"""Load TF2.0 symbolic weights in a PyTorch model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=tf_weight.shape
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError(f"{pt_weight_name} not found in TF 2.0 model")
array, transpose = tf_weights_map[pt_weight_name]
if transpose is TransposeType.CONV2D:
# Conv2D weight:
# TF: (kernel[0], kernel[1], num_in_channel, num_out_channel)
# -> PT: (num_out_channel, num_in_channel, kernel[0], kernel[1])
array = numpy.transpose(array, axes=(3, 2, 0, 1))
elif transpose is TransposeType.SIMPLE:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
if list(pt_weight.shape) != list(array.shape):
try:
array = numpy.reshape(array, pt_weight.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning(f"Initialize PyTorch weight {pt_weight_name}")
# Make sure we have a proper numpy array
if numpy.isscalar(array):
array = numpy.array(array)
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if pt_model._keys_to_ignore_on_load_missing is not None:
for pat in pt_model._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if pt_model._keys_to_ignore_on_load_unexpected is not None:
for pat in pt_model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the TF 2.0 model were not used when "
f"initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a TFBertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect "
f"to be exactly identical (e.g. initializing a BertForSequenceClassification model from a TFBertForSequenceClassification model)."
)
else:
logger.warning(f"All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {pt_model.__class__.__name__} for predictions without further training."
)
logger.info(f"Weights or buffers not loaded from TF 2.0 model: {all_tf_weights}")
return pt_model
| [
"torch.from_numpy",
"torch.load"
] | 1.0 | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 |
3 | import torch
import torch.nn as nn
import torch.nn.functional as F
import neural_renderer as nr
import numpy as np
import cv2
class DepthRenderer(nn.Module):
def __init__(self, cfg):
super(DepthRenderer, self).__init__()
min_depth = cfg.MODEL.MVSNET.MIN_DEPTH
max_depth = min_depth + (cfg.MODEL.MVSNET.DEPTH_INTERVAL \
* cfg.MODEL.MVSNET.NUM_DEPTHS)
self.renderer = nr.Renderer(
camera_mode='projection',
near=min_depth, far=max_depth,
anti_aliasing=False
)
fx, fy = cfg.MODEL.MVSNET.FOCAL_LENGTH
cx, cy = cfg.MODEL.MVSNET.PRINCIPAL_POINT
self.camera_k = torch.tensor(
[[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]],
dtype=torch.float32
)
self.dist_coeffs = torch.zeros(5, dtype=torch.float32)
# conversion from shapenet convention (East-Up_South)
# to renderer convention (East-Down-North)
# final rotation: R_renderer_shapenet * extrinsics
# inverse y and z, equivalent to inverse x, but gives positive z
rvec = np.array([np.pi, 0., 0.], dtype=np.float32)
R = cv2.Rodrigues(rvec)[0]
T = np.eye(4, dtype=np.float32)
T[:3, :3] = R
self.T_renderer_shapenet = torch.from_numpy(T)
self.T_shapenet_renderer = torch.inverse(self.T_renderer_shapenet)
def transform_to_renderer_frame(self, T_view_world):
"""
Args:
- T_view_world: (batch x 4 x 4) transformation
in shapenet coordinates (East-Up-South)
Returns:
- (batch x 4 x 4) transformation in renderer frame (East-Down-North)
"""
batch_size = T_view_world.size(0)
device = T_view_world.device
self.T_renderer_shapenet = self.T_renderer_shapenet.to(device)
self.T_shapenet_renderer = self.T_shapenet_renderer.to(device)
# change to correct shape (batched)
T_renderer_shapenet = self.T_renderer_shapenet \
.unsqueeze(0) .expand(batch_size, -1, -1)
T_shapenet_renderer = self.T_shapenet_renderer \
.unsqueeze(0).expand(batch_size, -1, -1)
# inverse y and z, equivalent to inverse x, but gives positive z
T_view_world = torch.bmm(T_renderer_shapenet, T_view_world)
return T_view_world
def forward(self, coords, faces, extrinsics, image_shape):
"""
Multi-view rendering
Args:
- pred_coords: (batch x vertices x 3) tensor
- faces: (batch x faces x 3) tensor
- image_shape: shape of the depth image to be rendered
- extrinsics: (batch x view x 2 x 4 x 4) tensor
Returns:
- depth tensor batch x view x height x width
"""
batch_size = extrinsics.size(0)
num_views = extrinsics.size(1)
# augment views: size = (batch x view x vertices x 3)
coords_augmented = coords.unsqueeze(1).expand(-1, num_views, -1, -1) \
.contiguous()
# size = (batch x view x faces` x 3)
faces_augmented = faces.unsqueeze(1).expand(-1, num_views, -1, -1) \
.contiguous()
depth_flattened = self.render_depth(
_flatten_batch_view(coords_augmented),
_flatten_batch_view(faces_augmented),
_flatten_batch_view(extrinsics),
image_shape
)
return _unflatten_batch_view(depth_flattened, batch_size)
def render_depth(self, coords, faces, T_view_world, image_shape):
"""
renders a batch of depths
Args:
- pred_coords: (batch x vertices x 3) tensor
- faces: (batch x faces x 3) tensor
- image_shape shape of the depth image to be rendered
- T_view_world: (batch x 4 x 4) transformation
in shapenet coordinates (EUS)
Returns:
- depth tensors of shape (batch x h x w)
"""
image_size = image_shape.max()
# This is not thread safe!
self.renderer.image_size = image_size
batch_size, num_points = coords.size()[:2]
# move to correct device
device = coords.device
self.camera_k = self.camera_k.to(device)
self.dist_coeffs = self.dist_coeffs.to(device)
faces = faces.type(torch.int32).to(device)
# change to correct shape (batches)
dist_coeffs = self.dist_coeffs.unsqueeze(0).expand(batch_size, -1)
# transformation stuffs
T_view_world = self.transform_to_renderer_frame(T_view_world)
R = T_view_world[:, :3, :3]
t = T_view_world[:, :3, 3].unsqueeze(1)
depth = self.renderer(
vertices=coords, faces=faces, mode='depth',
K=self.camera_k.unsqueeze(0), dist_coeffs=dist_coeffs,
R=R, t=t, orig_size=image_size
)
depth[depth <= self.renderer.near] = 0
depth[depth >= self.renderer.far] = 0
return depth
## Private utility functions
def _flatten_batch_view(tensor):
return tensor.view(-1, *(tensor.size()[2:]))
def _unflatten_batch_view(tensor, batch_size):
return tensor.view(batch_size, -1, *(tensor.size()[1:]))
| [
"torch.zeros",
"torch.inverse",
"torch.bmm",
"torch.from_numpy",
"torch.tensor"
] | 3 | rakeshshrestha31/meshmvs | e82cf0121ae49dd781d87d172218f41a882e3c04 |
0.3 | import argparse
import sys
import time
import math
import torch.nn as nn
import torch.optim as optim
from drnn import DRNN
from char_rnn.utils import *
from char_rnn.model import DRNN_Char
import warnings
warnings.filterwarnings("ignore") # Suppress the RunTimeWarning on unicode
parser = argparse.ArgumentParser(description='Sequence Modeling - Character Level Language Model')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size (default: 32)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA (default: True)')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout applied to layers (default: 0.1)')
parser.add_argument('--emb_dropout', type=float, default=0.1,
help='dropout applied to the embedded layer (0 = no dropout) (default: 0.1)')
parser.add_argument('--clip', type=float, default=0.15,
help='gradient clip, -1 means no clip (default: 0.15)')
parser.add_argument('--epochs', type=int, default=100,
help='upper epoch limit (default: 100)')
parser.add_argument('--levels', type=int, default=3,
help='# of levels (default: 3)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval (default: 100')
parser.add_argument('--lr', type=float, default=4,
help='initial learning rate (default: 4)')
parser.add_argument('--emsize', type=int, default=100,
help='dimension of character embeddings (default: 100)')
parser.add_argument('--optim', type=str, default='SGD',
help='optimizer to use (default: SGD)')
parser.add_argument('--nhid', type=int, default=450,
help='number of hidden units per layer (default: 450)')
parser.add_argument('--validseqlen', type=int, default=320,
help='valid sequence length (default: 320)')
parser.add_argument('--seq_len', type=int, default=400,
help='total sequence length, including effective history (default: 400)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed (default: 1111)')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to use (default: ptb)')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
print(args)
file, file_len, valfile, valfile_len, testfile, testfile_len, corpus = data_generator(args)
n_characters = len(corpus.dict)
train_data = batchify(char_tensor(corpus, file), args.batch_size, args)
val_data = batchify(char_tensor(corpus, valfile), 1, args)
test_data = batchify(char_tensor(corpus, testfile), 1, args)
print("Corpus size: ", n_characters)
model = DRNN_Char(input_size=args.emsize,
output_size=n_characters,
hidden_size=args.nhid,
num_layers=args.levels,
dropout=args.dropout,
emb_dropout=args.emb_dropout)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
lr = args.lr
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)
def evaluate(source):
model.eval()
total_loss = 0
count = 0
source_len = source.size(1)
for batch, i in enumerate(range(0, source_len - 1, args.validseqlen)):
if i + args.seq_len - args.validseqlen >= source_len:
continue
inp, target = get_batch(source, i, args)
output = model(inp)
eff_history = args.seq_len - args.validseqlen
final_output = output[:, eff_history:].contiguous().view(-1, n_characters)
final_target = target[:, eff_history:].contiguous().view(-1)
loss = criterion(final_output, final_target)
total_loss += loss.data * final_output.size(0)
count += final_output.size(0)
val_loss = total_loss[0] / count * 1.0
return val_loss
def train(epoch):
model.train()
total_loss = 0
start_time = time.time()
losses = []
source = train_data
source_len = source.size(1)
for batch_idx, i in enumerate(range(0, source_len - 1, args.validseqlen)):
if i + args.seq_len - args.validseqlen >= source_len:
continue
inp, target = get_batch(source, i, args)
optimizer.zero_grad()
output = model(inp)
eff_history = args.seq_len - args.validseqlen
final_output = output[:, eff_history:].contiguous().view(-1, n_characters)
final_target = target[:, eff_history:].contiguous().view(-1)
loss = criterion(final_output, final_target)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
total_loss += loss.data
if batch_idx % args.log_interval == 0 and batch_idx > 0:
cur_loss = total_loss[0] / args.log_interval
losses.append(cur_loss)
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | ms/batch {:5.2f} | '
'loss {:5.3f} | bpc {:5.3f}'.format(
epoch, batch_idx, int((source_len-0.5) / args.validseqlen), lr,
elapsed * 1000 / args.log_interval, cur_loss, cur_loss / math.log(2)))
total_loss = 0
start_time = time.time()
# if batch % (200 * args.log_interval) == 0 and batch > 0:
# vloss = evaluate(val_data)
# print('-' * 89)
# print('| In epoch {:3d} | valid loss {:5.3f} | '
# 'valid bpc {:8.3f}'.format(epoch, vloss, vloss / math.log(2)))
# model.train()
return sum(losses) * 1.0 / len(losses)
def main():
global lr
try:
print("Training for %d epochs..." % args.epochs)
all_losses = []
best_vloss = 1e7
for epoch in range(1, args.epochs + 1):
loss = train(epoch)
vloss = evaluate(val_data)
print('-' * 89)
print('| End of epoch {:3d} | valid loss {:5.3f} | valid bpc {:8.3f}'.format(
epoch, vloss, vloss / math.log(2)))
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of epoch {:3d} | test loss {:5.3f} | test bpc {:8.3f}'.format(
epoch, test_loss, test_loss / math.log(2)))
print('=' * 89)
if epoch > 5 and vloss > max(all_losses[-3:]):
lr = lr / 10.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
all_losses.append(vloss)
if vloss < best_vloss:
print("Saving...")
save(model)
best_vloss = vloss
except KeyboardInterrupt:
print('-' * 89)
print("Saving before quit...")
save(model)
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.3f} | test bpc {:8.3f}'.format(
test_loss, test_loss / math.log(2)))
print('=' * 89)
# train_by_random_chunk()
if __name__ == "__main__":
main()
| [
"torch.nn.CrossEntropyLoss"
] | 0.3.0 | dnbaker/pytorch-dilated-rnn | 4d7e04bf00d2f144c4435cb7ad78ecee7de81dd1 |
1.7 | import copy
import numpy as np
import scipy.linalg
import scipy.stats
import torch
from matplotlib import pyplot as plt
# from ray import tune
# from ray.tune.suggest import ConcurrencyLimiter
# from ray.tune.suggest.hyperopt import HyperOptSearch
# import tqdm
from torch import nn
from torch.nn import functional as F
import autonomous_optimizer
class Variable(nn.Module):
"""A wrapper to turn a tensor of parameters into a module for optimization."""
def __init__(self, data: torch.Tensor):
"""Create Variable holding `data` tensor."""
super().__init__()
self.x = nn.Parameter(data)
def convex_quadratic():
"""
Generate a symmetric positive semidefinite matrix A with eigenvalues
uniformly in [1, 30].
"""
num_vars = 2
# First generate an orthogonal matrix (of eigenvectors)
eig_vecs = torch.tensor(
scipy.stats.ortho_group.rvs(dim=(num_vars)), dtype=torch.float
)
# Now generate eigenvalues
eig_vals = torch.rand(num_vars) * 29 + 1
A = eig_vecs @ torch.diag(eig_vals) @ eig_vecs.T
b = torch.normal(0, 1 / np.sqrt(num_vars), size=(num_vars,))
x0 = torch.normal(0, 0.5 / np.sqrt(num_vars), size=(num_vars,))
def quadratic(var):
x = var.x
return 0.5 * x.T @ A @ x + b.T @ x
optimal_x = scipy.linalg.solve(A.numpy(), -b.numpy(), assume_a="pos")
optimal_val = quadratic(Variable(torch.tensor(optimal_x))).item()
return {
"model0": Variable(x0),
"obj_function": quadratic,
"optimal_x": optimal_x,
"optimal_val": optimal_val,
"A": A.numpy(),
"b": b.numpy(),
}
def rosenbrock():
num_vars = 2
# Initialization strategy: x_i = -2 if i is even, x_i = +2 if i is odd
x0 = torch.tensor([-1.5 if i % 2 == 0 else 1.5 for i in range(num_vars)])
def rosen(var):
x = var.x
return torch.sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)
# Optimum at all x_i = 1, giving f(x) = 0
optimal_x = np.ones(num_vars)
optimal_val = 0
return {
"model0": Variable(x0),
"obj_function": rosen,
"optimal_x": optimal_x,
"optimal_val": optimal_val,
}
def logistic_regression():
num_vars = 3
g0 = torch.distributions.multivariate_normal.MultivariateNormal(
loc=torch.randn(num_vars),
scale_tril=torch.tril(torch.randn((num_vars, num_vars))),
)
g1 = torch.distributions.multivariate_normal.MultivariateNormal(
loc=torch.randn(num_vars),
scale_tril=torch.tril(torch.randn((num_vars, num_vars))),
)
x = torch.cat([g0.sample((50,)), g1.sample((50,))])
y = torch.cat([torch.zeros((50,)), torch.ones((50,))])
perm = torch.randperm(len(x))
x = x[perm]
y = y[perm]
model0 = nn.Sequential(nn.Linear(num_vars, 1), nn.Sigmoid())
def obj_function(model):
y_hat = model(x).view(-1)
weight_norm = model[0].weight.norm()
return F.binary_cross_entropy(y_hat, y) + 5e-4 / 2 * weight_norm
return {"model0": model0, "obj_function": obj_function, "data": (x, y)}
def robust_linear_regression():
num_vars = 3
# Create four gaussian distributions with random mean and covariance.
# For all points drawn from the same gaussian, their labels are
# generated by projecting them along the same random vector, adding
# the same random bias, and perturbing them with i.i.d. gaussian noise.
x = []
y = []
for _ in range(4):
gaussian = torch.distributions.multivariate_normal.MultivariateNormal(
loc=torch.randn(num_vars),
scale_tril=torch.tril(torch.randn((num_vars, num_vars))),
)
new_points = gaussian.sample((25,))
# y_i = true_vector `dot` x_i + true_bias + noise
true_vector = torch.randn(num_vars)
true_bias = torch.randn(1)
new_labels = new_points @ true_vector + true_bias + torch.randn(25)
x.append(new_points)
y.append(new_labels)
x = torch.cat(x)
y = torch.cat(y)
# Shuffle the dataset
perm = torch.randperm(len(x))
x = x[perm]
y = y[perm]
model0 = nn.Linear(num_vars, 1)
def geman_mcclure(model):
y_hat = model(x).view(-1)
squared_errors = (y - y_hat) ** 2
return (squared_errors / (1 + squared_errors)).mean()
return {"model0": model0, "obj_function": geman_mcclure}
def mlp():
num_vars = 2
# Create four gaussian distributions with random mean and covariance
gaussians = [
torch.distributions.multivariate_normal.MultivariateNormal(
loc=torch.randn(num_vars),
scale_tril=torch.tril(torch.randn((num_vars, num_vars))),
)
for _ in range(4)
]
# Randomly assign each of the four gaussians a 0-1 label
# Do again if all four gaussians have the same label (don't want that)
gaussian_labels = np.zeros((4,))
while (gaussian_labels == 0).all() or (gaussian_labels == 1).all():
gaussian_labels = torch.randint(0, 2, size=(4,))
# Generate a dataset of 100 points with 25 points drawn from each gaussian
# Label of the datapoint is the same as the label of the gaussian it came from
x = torch.cat([g.sample((25,)) for g in gaussians])
y = torch.cat([torch.full((25,), float(label)) for label in gaussian_labels])
perm = torch.randperm(len(x))
x = x[perm]
y = y[perm]
model0 = nn.Sequential(
nn.Linear(num_vars, 2), nn.ReLU(), nn.Linear(2, 1), nn.Sigmoid()
)
def obj_function(model):
y_hat = model(x).view(-1)
weight_norm = model[0].weight.norm() + model[2].weight.norm()
return F.binary_cross_entropy(y_hat, y) + 5e-4 / 2 * weight_norm
return {"model0": model0, "obj_function": obj_function, "dataset": (x, y)}
def run_optimizer(make_optimizer, problem, iterations, hyperparams):
# Initial solution
model = copy.deepcopy(problem["model0"])
obj_function = problem["obj_function"]
# Define optimizer
optimizer = make_optimizer(model.parameters(), **hyperparams)
# We will keep track of the objective values and weight trajectories
# throughout the optimization process.
values = []
trajectory = []
# Passed to optimizer. This setup is required to give the autonomous
# optimizer access to the objective value and not just its gradients.
def closure():
trajectory.append(copy.deepcopy(model))
optimizer.zero_grad()
obj_value = obj_function(model)
obj_value.backward()
values.append(obj_value.item())
return obj_value
# Minimize
for i in range(iterations):
optimizer.step(closure)
# Stop optimizing if we start getting nans as objective values
if np.isnan(values[-1]) or np.isinf(values[-1]):
break
return np.nan_to_num(values, 1e6), trajectory
def accuracy(model, x, y):
return ((model(x).view(-1) > 0.5) == y).float().mean().item()
def run_all_optimizers(problem, iterations, tune_dict, policy):
# SGD
sgd_vals, sgd_traj = run_optimizer(
torch.optim.SGD, problem, iterations, tune_dict["sgd"]["hyperparams"]
)
print(f"SGD best loss: {sgd_vals.min()}")
# Momentum
momentum_vals, momentum_traj = run_optimizer(
torch.optim.SGD, problem, iterations, tune_dict["momentum"]["hyperparams"]
)
print(f"Momentum best loss: {momentum_vals.min()}")
# Adam
adam_vals, adam_traj = run_optimizer(
torch.optim.Adam, problem, iterations, tune_dict["adam"]["hyperparams"]
)
print(f"Adam best loss: {adam_vals.min()}")
# LBFGS
lbfgs_vals, lbfgs_traj = run_optimizer(
torch.optim.LBFGS, problem, iterations, tune_dict["lbfgs"]["hyperparams"]
)
print(f"LBFGS best loss: {lbfgs_vals.min()}")
# Autonomous optimizer
ao_vals, ao_traj = run_optimizer(
autonomous_optimizer.AutonomousOptimizer,
problem,
iterations,
{"policy": policy},
)
print(f"Autonomous Optimizer best loss: {ao_vals.min()}")
return {
"sgd": (sgd_vals, sgd_traj),
"momentum": (momentum_vals, momentum_traj),
"adam": (adam_vals, adam_traj),
"lbfgs": (lbfgs_vals, lbfgs_traj),
"ao": (ao_vals, ao_traj),
}
def plot_trajectories(trajectories, problem, get_weights, set_weights):
"""Plot optimization trajectories on top of a contour plot.
Parameters:
trajectories (List(nn.Module))
problem (dict)
get_weights (Callable[[], Tuple[float, float]])
set_weights (Callable[[float, float], None])
"""
data = {}
for name, traj in trajectories.items():
data[name] = np.array([get_weights(model) for model in traj])
xmin = min(np.array(d)[:, 0].min() for d in data.values())
ymin = min(np.array(d)[:, 1].min() for d in data.values())
xmax = max(np.array(d)[:, 0].max() for d in data.values())
ymax = max(np.array(d)[:, 1].max() for d in data.values())
X = np.linspace(xmin - (xmax - xmin) * 0.2, xmax + (xmax - xmin) * 0.2)
Y = np.linspace(ymin - (ymax - ymin) * 0.2, ymax + (ymax - ymin) * 0.2)
model = copy.deepcopy(problem["model0"])
Z = np.empty((len(Y), len(X)))
for i in range(len(X)):
for j in range(len(Y)):
set_weights(model, X[i], Y[j])
Z[j, i] = problem["obj_function"](model)
plt.figure(figsize=(10, 6), dpi=500)
plt.contourf(X, Y, Z, 30, cmap="RdGy")
plt.colorbar()
for name, traj in data.items():
plt.plot(traj[:, 0], traj[:, 1], label=name)
plt.title("Convex Quadratic Trajectory Plot")
plt.plot(*get_weights(problem["model0"]), "bo")
plt.legend()
plt.plot()
plt.show()
'''def tune_algos(
dataset,
algo_iters,
tune_iters,
hyperparam_space,
algos=["sgd", "momentum" "adam", "lbfgs"],
):
"""Tune hyperparameters with Bayesian optimization."""
def make_experiment(make_optimizer):
def experiment(hyperparams):
best_obj_vals = []
for problem in dataset:
vals, traj = run_optimizer(
make_optimizer, problem, algo_iters, hyperparams
)
best_obj_vals.append(vals.min())
tune.report(objective_value=np.mean(best_obj_vals))
return experiment
results = {}
for algo in tqdm.tqdm(algos):
if algo == "sgd":
sgd_analysis = tune.run(
make_experiment(torch.optim.SGD),
config={"lr": hyperparam_space["lr"]},
metric="objective_value",
mode="min",
search_alg=ConcurrencyLimiter(HyperOptSearch(), max_concurrent=3),
num_samples=tune_iters,
verbose=0,
)
sgd_hyperparams = sgd_analysis.get_best_config(
metric="objective_value", mode="min"
)
results["sgd"] = {"analysis": sgd_analysis, "hyperparams": sgd_hyperparams}
if algo == "momentum":
momentum_analysis = tune.run(
make_experiment(torch.optim.SGD),
config={
"nesterov": True,
"lr": hyperparam_space["lr"],
"momentum": hyperparam_space["momentum"],
},
metric="objective_value",
mode="min",
search_alg=ConcurrencyLimiter(HyperOptSearch(), max_concurrent=3),
num_samples=tune_iters,
verbose=0,
)
momentum_hyperparams = momentum_analysis.get_best_config(
metric="objective_value", mode="min"
)
results["momentum"] = {
"analysis": momentum_analysis,
"hyperparams": momentum_hyperparams,
}
if algo == "adam":
adam_analysis = tune.run(
make_experiment(torch.optim.Adam),
config={"lr": hyperparam_space["lr"]},
metric="objective_value",
mode="min",
search_alg=ConcurrencyLimiter(HyperOptSearch(), max_concurrent=3),
num_samples=tune_iters,
verbose=0,
)
adam_hyperparams = adam_analysis.get_best_config(
metric="objective_value", mode="min"
)
results["adam"] = {
"analysis": adam_analysis,
"hyperparams": adam_hyperparams,
}
if algo == "lbfgs":
lbfgs_analysis = tune.run(
make_experiment(torch.optim.LBFGS),
config={"lr": hyperparam_space["lr"], "max_iter": 1},
metric="objective_value",
mode="min",
search_alg=ConcurrencyLimiter(HyperOptSearch(), max_concurrent=3),
num_samples=tune_iters,
verbose=0,
)
lbfgs_hyperparams = lbfgs_analysis.get_best_config(
metric="objective_value", mode="min"
)
results["lbfgs"] = {
"analysis": lbfgs_analysis,
"hyperparams": lbfgs_hyperparams,
}
return results'''
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Parameter",
"torch.ones",
"torch.sum",
"torch.randint",
"torch.tensor",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.functional.binary_cross_entropy",
"torch.rand",
"torch.nn.Sigmoid",
"torch.diag",
"torch.randn"
] | 1.7.1 | stewy33/Learning-to-Optimize | b5e6fd008f12d0b702d861f6d6a99b773fd7c024 |
0.4 | from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
from .losses import ContentLoss, StyleLoss, Normalization
from models.SAB import SpatialAttention
import torchvision.transforms as transforms
import torchvision.models as models
import copy
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], use_sab=False):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_sab=use_sab)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_sab=use_sab)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=content_layers_default,
style_layers=style_layers_default):
cnn = copy.deepcopy(cnn)
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def get_style_texture_algorithm():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cnn = models.vgg19(pretrained=True).features.to(device).eval()
# Additionally, VGG networks are trained on images with each channel normalized by
# mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225]. We will use them to normalize
# the image before sending it into the network.
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,
content_img, style_img, input_img)
return output
'''
we define a function that performs the neural transfer.
For each iteration of the networks, it is fed an updated input and computes new losses.
We will run the backward methods of each loss module to dynamicaly compute their gradients.
The optimizer requires a “closure” function, which reevaluates the modul and returns the loss
'''
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=300,
style_weight=1000000, content_weight=1):
"""Run the style transfer."""
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img, content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
print()
return style_score + content_score
optimizer.step(closure)
# a last correction...
input_img.data.clamp_(0, 1)
return input_img
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
elif gan_mode == 'style_texture':
self.loss = get_style_texture_algorithm()
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
alpha = alpha.to(device)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, use_sab=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
if use_sab:
unet_block = SpatialAttention(ngf*8)
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.InstanceNorm2d
else:
use_bias = norm_layer != nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| [
"torch.optim.lr_scheduler.StepLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.cuda.is_available",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.DataParallel",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.ReflectionPad2d",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.nn.ReplicationPad2d",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.lr_scheduler.LambdaLR"
] | 0.4.1 | akgokce/EndoL2H | da012dbf3a907000fc469c738976e8905d1dd423 |
1.5 | """Tools to ease model training (like torch.ignite)"""
import torch
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from nitorch.core.utils import benchmark, fold, unfold, rubiks_shuffle
from nitorch.core.py import make_tuple
from nitorch.nn.modules import Module
from nitorch.nn import DiceLoss
import string
import math
import os
import random
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
def SummaryWriter():
raise ImportError('Optional dependency TensorBoard not found')
def split_train_val_test(data, split=[0.6, 0.1, 0.3], shuffle=False, seed=0):
"""Split sequence of data into train, validation and test.
Parameters
----------
data : [N,] list
Input data.
split : [3,] list, default=[0.6, 0.2, 0.2]
Train, validation, test fractions.
suffle : bool, default=False
Randomly shuffle input data (with seed for reproducibility)
seed : int, default=0
Seed for random shuffling.
Returns
----------
train : [split[0]*N,] list
Train split.
val : [split[1]*N,] list
Validation split.
test : [split[2]*N,] list
Test split.
"""
N = len(data)
# Ensure split is normalised
split = [s / sum(split) for s in split]
# Randomly shuffle input data (with seed for reproducibility)
if shuffle:
random.seed(seed)
data = random.sample(data, N)
# Do train/val/test split
train, val, test = [], [], []
for i, d in enumerate(data):
if i < math.floor(split[0] * N):
train.append(d)
elif i < math.floor(sum(split[:2]) * N):
val.append(d)
elif i < math.floor(sum(split) * N):
test.append(d)
return train, val, test
def update_loss_dict(old, new, weight=1, inplace=True):
"""Update a dictionary of losses/metrics with a new batch
Parameters
----------
old : dict
Previous (accumulated) dictionary of losses/metrics
new : dict
Dictionary of losses/metrics for the current batch
weight : float, default=1
Weight for the batch
inplace : bool, default=True
Modify the dictionary in-place
Returns
-------
new : dict
Updated (accumulated) dictionary of losses/metrics
"""
if not inplace:
old = dict(old)
for key, val in new.items():
if key in old.keys():
old[key] += val * weight
else:
old[key] = val * weight
return old
def normalize_loss_dict(losses, weight=1, inplace=True):
"""Normalize all losses in a dict.
Parameters
----------
losses : dict
Accumulated dictionary of losses/metrics
weight : float, default=1
Sum of weights across all batches
inplace : bool, default=True
Modify the dictionary in-place
Returns
-------
losses : dict
Normalized dictionary of losses/metrics
"""
if not inplace:
losses = dict(losses)
for key, val in losses.items():
losses[key] /= weight
return losses
class ModelTrainer:
"""A class that simplifies training a network."""
_nb_steps = None
_train_set = None
_eval_set = None
_tensorboard = None
_tensorboard_callbacks = None
random_state = []
def __init__(self, model, train_set, eval_set=None,
optimizer=None,
nb_epoch=100,
nb_steps=None,
*, # the remaining parameters *must be* keywords
device=None,
dtype=None,
initial_epoch=0,
log_interval=10,
benchmark=False,
seed=None,
tensorboard=None,
save_model=None,
save_optimizer=None,
load_model=None,
load_optimizer=None,
show_losses=True,
show_metrics=False,
scheduler=ReduceLROnPlateau):
"""
Parameters
----------
model : Module
Model to train.
Its forward pass should accept a `loss` argument, and take as
inputs the elements that pop out of the training set.
train_set : sequence[tensor or tuple[tensor]]
Training set.
It should be a finite sequence of tensors or tuple of tensors.
eval_set : sequence[tensor or tuple[tensor]], optional
Evaluation set.
It should be a finite sequence of tensors or tuple of tensors.
optimizer : callable, default=Adam
A function that takes trainable parameters as inputs and
returns an Optimizer object.
nb_epoch : int, default=100
Number of epochs.
nb_steps : int, default=`len(train_set) or 100`
Number of steps per epoch.
If the training set is a finite sequence (i.e., `len` is
implemented), its length is used. Else, the training set
is assumed to be infinite and the default number of steps
is 100.
scheduler : Scheduler, default=ReduceLROnPlateau
Other Parameters
----------------
device : torch.device, optional
Device to use. By default, use the default cuda device if
any, else use cpu.
dtype : torch.dtype, optional
Data type to use. By default use `torch.get_default_dtype`.
initial_epoch : int, default=0
First epoch
log_interval : int, default=10
Number of steps between screen updates.
benchmark : bool, default=False
Use the cudnn benchmarking utility that uses the first forward
pass to compare different convolution algorithms and select the
best performing one. You should only use this option if the
spatial shape of your input data is constant across mini batches.
seed : int, optional
Manual seed to use for training. The seed is set when
training starts. A context manager is used so that the global
state is kept untouched. If `None`, use the global state.
tensorboard : str, optional
A path to the tensorboard log directory.
If provided, losses and metrics are registered to the board
by default.
save_model : str, optional
A path to save the model at each epoch. Can have a
formatted component ('mymodel_{}.pth') for the epoch number.
save_optimizer : str, optional
A path to save the optimizer at each epoch. Can have a
formatted component ('myoptim_{}.pth') for the epoch number.
load_model : str, optional
Path to saved weights to use to initialize the model.
load_optimizer : str, optional
Path to saved state to use to initialize the optimizer.
show_losses : bool, default=True
Print values of individual losses
show_metrics : bool, default=False
Print values of individual metrics
"""
self.model = model
self.train_set = train_set
self.eval_set = eval_set
if optimizer is None:
optimizer = torch.optim.Adam(model.parameters())
self.optimizer = optimizer
self.log_interval = log_interval
self.benchmark = benchmark
self.seed = seed
self.initial_seed = seed
self.tensorboard = tensorboard
self._tensorboard_callbacks = dict(train=dict(epoch=[], step=[]),
eval=dict(epoch=[], step=[]))
self.save_model = save_model
self.save_optimizer = save_optimizer
self.load_model = load_model
self.load_optimizer = load_optimizer
self.show_losses = show_losses
self.show_metrics = show_metrics
self.nb_epoch = nb_epoch
self.nb_steps = nb_steps
self.initial_epoch = initial_epoch
self.epoch = initial_epoch
self.device = device or 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.device)
self.dtype = dtype or torch.get_default_dtype()
self.scheduler = scheduler
if self.scheduler is not None:
self.scheduler = self.scheduler(self.optimizer)
if self.load_model:
self.model.load_state_dict(torch.load(self.load_model))
if self.load_optimizer:
self.optimizer.load_state_dict(torch.load(self.load_optimizer))
def _update_nb_steps(self):
def len_or(x, default):
return len(x) if hasattr(x, '__len__') else default
self._nb_train = self._nb_steps or len_or(self._train_set, 100)
self._nb_eval = self._nb_steps or len_or(self._eval_set, 100)
class _batch_iterator:
def __init__(self, set, length):
self.set = set
self.length = length
def __len__(self):
return self.length
def __iter__(self):
d = 0
while d < self.length:
for batch in self.set:
if d >= self.length:
return
yield batch
d += 1
@property
def tensorboard(self):
if self._tensorboard:
return self._tensorboard
else:
return self._tensorboard
@tensorboard.setter
def tensorboard(self, val):
if not val:
self._tensorboard = val
else:
self._tensorboard = SummaryWriter(val)
@property
def nb_steps(self):
return self._nb_steps
@nb_steps.setter
def nb_steps(self, val):
self._nb_steps = val
self._update_nb_steps()
@property
def train_set(self):
if self._train_set:
return self._batch_iterator(self._train_set, self._nb_train)
else:
return None
@train_set.setter
def train_set(self, val):
self._train_set = val
self._update_nb_steps()
@property
def eval_set(self):
if self._eval_set:
return self._batch_iterator(self._eval_set, self._nb_eval)
else:
return None
@eval_set.setter
def eval_set(self, val):
self._eval_set = val
self._update_nb_steps()
def _train(self, epoch=0):
"""Train for one epoch"""
self.model.train()
epoch_loss = 0.
epoch_losses = {}
epoch_metrics = {}
nb_batches = 0
nb_steps = len(self.train_set)
for n_batch, batch in enumerate(self.train_set):
losses = {}
metrics = {}
# forward pass
batch = make_tuple(batch)
batch = tuple(torch.as_tensor(b, device=self.device) for b in batch)
batch = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch)
nb_batches += batch[0].shape[0]
self.optimizer.zero_grad()
output = self.model(*batch, _loss=losses, _metric=metrics)
loss = sum(losses.values())
# backward pass
loss.backward()
self.optimizer.step()
# update average across batches
with torch.no_grad():
weight = float(batch[0].shape[0])
epoch_loss += loss * weight
update_loss_dict(epoch_losses, losses, weight)
update_loss_dict(epoch_metrics, metrics, weight)
# print
if n_batch % self.log_interval == 0:
self._print('train', epoch, n_batch+1, nb_steps,
loss, losses, metrics)
# tb callback
if self.tensorboard:
tbopt = dict(inputs=batch, outputs=output,
epoch=epoch, minibatch=n_batch, mode='train',
loss=loss, losses=losses, metrics=metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['train']['step']:
func(self.tensorboard, **tbopt)
del tbopt
# print summary
with torch.no_grad():
epoch_loss /= nb_batches
normalize_loss_dict(epoch_losses, nb_batches)
normalize_loss_dict(epoch_metrics, nb_batches)
self._print('train', epoch, nb_steps, nb_steps,
epoch_loss, epoch_losses, epoch_metrics, last=True)
self._board('train', epoch, epoch_loss, epoch_metrics)
# tb callback
if self.tensorboard:
tbopt = dict(epoch=epoch, loss=epoch_loss, mode='train',
losses=epoch_losses, metrics=epoch_metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['train']['epoch']:
func(self.tensorboard, **tbopt)
return epoch_loss
def _eval(self, epoch=0):
"""Evaluate once"""
if self.eval_set is None:
return
self.model.eval()
with torch.no_grad():
epoch_loss = 0
epoch_losses = {}
epoch_metrics = {}
nb_batches = 0
nb_steps = len(self.eval_set)
for n_batch, batch in enumerate(self.eval_set):
losses = {}
metrics = {}
# forward pass
batch = make_tuple(batch)
batch = tuple(torch.as_tensor(b, device=self.device) for b in batch)
batch = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch)
nb_batches += batch[0].shape[0]
self.optimizer.zero_grad()
output = self.model(*batch, _loss=losses, _metric=metrics)
loss = sum(losses.values())
# update average across batches
weight = float(batch[0].shape[0])
epoch_loss += loss * weight
update_loss_dict(epoch_losses, losses, weight)
update_loss_dict(epoch_metrics, metrics, weight)
# print
if n_batch % self.log_interval == 0:
self._print('eval', epoch, n_batch + 1, nb_steps,
loss, losses, metrics)
# tb callback
if self.tensorboard:
tbopt = dict(inputs=batch, outputs=output,
epoch=epoch, minibatch=n_batch, mode='eval',
loss=loss, losses=losses, metrics=metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['eval']['step']:
func(self.tensorboard, **tbopt)
# print summary
epoch_loss /= nb_batches
normalize_loss_dict(epoch_losses, nb_batches)
normalize_loss_dict(epoch_metrics, nb_batches)
self._print('eval', epoch, nb_steps, nb_steps,
epoch_loss, epoch_losses, epoch_metrics, last=True)
self._board('eval', epoch, epoch_loss, epoch_metrics)
# tb callback
if self.tensorboard:
tbopt = dict(epoch=epoch, loss=epoch_loss, mode='eval',
losses=epoch_losses, metrics=epoch_metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['eval']['epoch']:
func(self.tensorboard, **tbopt)
return epoch_loss
def _print(self, mode, n_epoch, n_batch, nb_steps, loss,
losses=None, metrics=None, last=False):
"""Pretty printing
Parameters
----------
mode : {'train', 'eval'}
n_epoch : int
Index of current epoch (starts at one)
n_batch : int
Index of current batch (starts at one)
nb_steps : int
Total number of batches
loss : () tensor
Loss for this batch
losses : dict[str: () tensor]
Loss components for this batch
metrics : dict[str: () tensor]
Metrics for this batch
last : bool, default=False
Is this the end of the batch?
If True, loss/losses/metrics should contain the average loss
across all batches.
"""
name = 'Train' if mode == 'train' else 'Eval '
if last:
pct = 1
bar = '[' + '=' * 10 + ']'
else:
pct = n_batch/nb_steps
len_arrow = min(math.floor(pct*10 + 0.5), 9)
bar = '[' + '=' * len_arrow + '>' + ' ' * (9-len_arrow) + ']'
lepoch = str(len(str(self.nb_epoch)))
evolution = '{:s} | {:' + lepoch + 'd} | {:3.0f}% ' + bar + ' '
evolution = evolution.format(name, n_epoch, pct*100)
values = ''
if mode == 'train':
values += '| loss = {:12.6g} '.format(loss.item())
if losses and self.show_losses:
values += '|'
for key, val in losses.items():
values += ' {}: {:12.6g} '.format(key, val.item())
if metrics and (mode == 'eval' or self.show_metrics):
values += '|'
for key, val in metrics.items():
values += ' {}: {:12.6g} '.format(key, val.item())
print(evolution + values, end='\r', flush=True)
if last:
print('')
def _board(self, mode, epoch, loss, epoch_metrics):
"""Add losses and metrics to tensorboard."""
if not self.tensorboard:
return
tb = self.tensorboard
tb.add_scalars('loss', {mode: loss.item()}, epoch)
for tag, value in epoch_metrics.items():
tb.add_scalars(tag, {mode: value.item()}, epoch)
tb.flush()
def add_tensorboard_callback(self, func, mode='train', trigger='epoch'):
"""Register tensorboard callbacks
Parameters
----------
func : callable
If trigger 'step', with signature
`(tb, input, output, epoch, step, loss, losses, metrics)`
If trigger 'epoch', with signature:
`(tb, epoch, loss, losses, metrics)`
mode : {'train', 'eval'}
Trigger either during a training or evaluation call.
trigger : {'epoch', 'step'}
Trigger either at the end of a step or at the end of an epoch.
"""
if mode not in self._tensorboard_callbacks.keys():
self._tensorboard_callbacks[mode] = dict()
if trigger not in self._tensorboard_callbacks[mode].keys():
self._tensorboard_callbacks[mode][trigger] = list()
self._tensorboard_callbacks[mode][trigger].append(func)
def _hello(self, mode):
"""Tell the use what we are going to do (mode, device, dtype, ...)
Parameters
----------
mode : {'train', 'eval'}
"""
if self.device.type == 'cuda':
device = torch.cuda.get_device_name(self.device)
else:
assert self.device.type == 'cpu'
device = 'CPU'
dtype = str(self.dtype).split('.')[-1]
if mode == 'train':
hello = 'Training model {} for {} epochs (steps per epoch: {}) ' \
'on {} (dtype = {})'
hello = hello.format(type(self.model).__name__, self.nb_epoch,
len(self.train_set), device, dtype)
else:
hello = 'Evaluating model {} (minibatches: {}) on {} (dtype = {})'
hello = hello.format(type(self.model).__name__,
len(self.eval_set), device, dtype)
print(hello, flush=True)
def _save(self, epoch):
"""Save once"""
if self.save_model:
save_model = self._formatfile(self.save_model, epoch)
dir_model = os.path.dirname(save_model)
if dir_model:
os.makedirs(dir_model, exist_ok=True)
torch.save(self.model.state_dict(), save_model)
if self.save_optimizer:
save_optimizer = self._formatfile(self.save_optimizer, epoch)
dir_optimizer = os.path.dirname(save_optimizer)
if dir_optimizer:
os.makedirs(dir_optimizer, exist_ok=True)
torch.save(self.optimizer.state_dict(), save_optimizer)
@staticmethod
def _formatfile(file, epoch):
"""Format filename for an epoch"""
keys = [tup[1] for tup in string.Formatter().parse(file)
if tup[1] is not None]
if len(keys) == 1:
file = file.format(epoch)
elif len(keys) > 1:
raise ValueError('Cannot have more than one format key')
return file
def train(self):
"""Launch training"""
self._hello('train')
with torch.random.fork_rng(enabled=self.seed is not None):
if self.seed is not None:
torch.random.manual_seed(self.seed)
self.initial_seed = torch.random.initial_seed()
with benchmark(self.benchmark):
self.model.to(dtype=self.dtype, device=self.device)
self.epoch = self.initial_epoch
self._eval(self.epoch)
self._save(self.epoch)
for self.epoch in range(self.epoch+1, self.nb_epoch+1):
train_loss = self._train(self.epoch)
print('Train loss: {}'.format(train_loss))
val_loss = self._eval(self.epoch)
self._save(self.epoch)
# scheduler
if isinstance(self.scheduler, ReduceLROnPlateau):
sched_loss = val_loss or train_loss
self.scheduler.step(sched_loss)
elif self.scheduler:
self.scheduler.step()
def eval(self):
"""Launch evaluation"""
self._hello('eval')
self.model.to(dtype=self.dtype, device=self.device)
self._eval()
def init(self):
"""Initialize the random state + run one evaluation."""
with torch.random.fork_rng(enabled=self.seed is not None):
if self.seed is not None:
torch.random.manual_seed(self.seed)
self.initial_seed = torch.random.initial_seed()
self.save_random_state()
self.epoch = self.initial_epoch
self.model.to(dtype=self.dtype, device=self.device)
self._eval(self.epoch)
self._save(self.epoch)
def set_random_state(self):
"""Populate the random state using a saved state."""
if self.random_state:
cpu_state, *gpu_states = self.random_state
devices = list(range(torch.cuda.device_count()))
torch.set_rng_state(self.random_state[0])
for device, state in zip(devices, gpu_states):
torch.cuda.set_rng_state(state, device)
def save_random_state(self):
"""Save the current random state."""
devices = list(range(torch.cuda.device_count()))
self.random_state = [torch.get_rng_state()]
self.random_state.extend(torch.cuda.get_rng_state(device)
for device in devices)
def train1(self):
"""Train for one epoch."""
with torch.random.fork_rng():
self.set_random_state()
self.model.to(dtype=self.dtype, device=self.device)
self.epoch += 1
self._train(self.epoch)
self._eval(self.epoch)
self._save(self.epoch)
self.save_random_state()
class PreTrainer:
"""A class that allows self-supervised training via a number of methods.
Useful for pre-training before using ModelTrainer for supervised tasks."""
_nb_steps = None
_train_set = None
_eval_set = None
_tensorboard = None
_tensorboard_callbacks = None
random_state = []
def __init__(self, model, train_set, eval_set=None,
optimizer=None,
nb_epoch=100,
nb_steps=None,
*, # the remaining parameters *must be* keywords
loss=torch.nn.L1Loss(),
adv_model=None,
lambda_adv=1,
lambda_gp=10,
device=None,
dtype=None,
initial_epoch=0,
log_interval=10,
benchmark=False,
seed=None,
tensorboard=None,
save_model=None,
save_optimizer=None,
load_model=None,
load_optimizer=None,
show_losses=True,
show_metrics=False,
scheduler=ReduceLROnPlateau):
"""
Parameters
----------
model : Module
Model to train.
Its forward pass should accept a `loss` argument, and take as
inputs the elements that pop out of the training set.
train_set : sequence[tensor or tuple[tensor]]
Training set.
It should be a finite sequence of tensors or tuple of tensors.
eval_set : sequence[tensor or tuple[tensor]], optional
Evaluation set.
It should be a finite sequence of tensors or tuple of tensors.
optimizer : callable, default=Adam
A function that takes trainable parameters as inputs and
returns an Optimizer object.
nb_epoch : int, default=100
Number of epochs.
nb_steps : int, default=`len(train_set) or 100`
Number of steps per epoch.
If the training set is a finite sequence (i.e., `len` is
implemented), its length is used. Else, the training set
is assumed to be infinite and the default number of steps
is 100.
scheduler : Scheduler, default=ReduceLROnPlateau
Other Parameters
----------------
loss : callable
Loss to use for pre-training task.
adv_model : nitorch Module or torch Sequential, default=None
If not None, will use adversarial loss weighted by lambda_adv
lambda_adv : int or float, default=1
If adversarial loss used then total loss will be:
Loss_total = loss + lambda_adv * (adv_model(y_hat))
lambda_gp : int or float, default=10
Gradient penalty for discriminator training, as per Wasserstein GAN
device : torch.device, optional
Device to use. By default, use the default cuda device if
any, else use cpu.
dtype : torch.dtype, optional
Data type to use. By default use `torch.get_default_dtype`.
initial_epoch : int, default=0
First epoch
log_interval : int, default=10
Number of steps between screen updates.
benchmark : bool, default=False
Use the cudnn benchmarking utility that uses the first forward
pass to compare different convolution algorithms and select the
best performing one. You should only use this option if the
spatial shape of your input data is constant across mini batches.
seed : int, optional
Manual seed to use for training. The seed is set when
training starts. A context manager is used so that the global
state is kept untouched. If `None`, use the global state.
tensorboard : str, optional
A path to the tensorboard log directory.
If provided, losses and metrics are registered to the board
by default.
save_model : str, optional
A path to save the model at each epoch. Can have a
formatted component ('mymodel_{}.pth') for the epoch number.
save_optimizer : str, optional
A path to save the optimizer at each epoch. Can have a
formatted component ('myoptim_{}.pth') for the epoch number.
load_model : str, optional
Path to saved weights to use to initialize the model.
load_optimizer : str, optional
Path to saved state to use to initialize the optimizer.
show_losses : bool, default=True
Print values of individual losses
show_metrics : bool, default=False
Print values of individual metrics
"""
self.model = model
self.train_set = train_set
self.eval_set = eval_set
self.loss = loss
self.adv_model = adv_model
if adv_model is not None:
self.adv_opt = torch.optim.Adam(adv_model.parameters())
self.lambda_adv = lambda_adv
self.lambda_gp = lambda_gp
if optimizer is None:
optimizer = torch.optim.Adam(model.parameters())
self.optimizer = optimizer
self.log_interval = log_interval
self.benchmark = benchmark
self.seed = seed
self.initial_seed = seed
self.tensorboard = tensorboard
self._tensorboard_callbacks = dict(train=dict(epoch=[], step=[]),
eval=dict(epoch=[], step=[]))
self.save_model = save_model
self.save_optimizer = save_optimizer
self.load_model = load_model
self.load_optimizer = load_optimizer
self.show_losses = show_losses
self.show_metrics = show_metrics
self.nb_epoch = nb_epoch
self.nb_steps = nb_steps
self.initial_epoch = initial_epoch
self.epoch = initial_epoch
self.device = device or 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.device)
self.dtype = dtype or torch.get_default_dtype()
self.scheduler = scheduler
if self.scheduler is not None:
self.scheduler = self.scheduler(self.optimizer)
if self.load_model:
self.model.load_state_dict(torch.load(self.load_model))
if self.load_optimizer:
self.optimizer.load_state_dict(torch.load(self.load_optimizer))
def _update_nb_steps(self):
def len_or(x, default):
return len(x) if hasattr(x, '__len__') else default
self._nb_train = self._nb_steps or len_or(self._train_set, 100)
self._nb_eval = self._nb_steps or len_or(self._eval_set, 100)
def wass_gp(self, disc, real, fake):
# Adapted from example provided by @eriklindernoren on GitHub
# debugging - print device of tensors
device = real.device
fake = fake.to(device)
# assume [B, C, **] -> dim = length of shape excluding B & C
dim = len(real.shape) - 2
# random number to scale between real & fake
shape = [real.shape[0], 1]
_ = [shape.append(1) for i in range(dim)]
eps = torch.rand(shape)
eps = eps.to(device)
mix = (real * eps + fake * (1 - eps)).requires_grad_(True)
disc_mix = disc(mix)
if isinstance(disc_mix, (list, tuple)):
disc_mix = disc_mix[0]
fake_ = torch.ones(disc_mix.shape, requires_grad=False)
fake_ = fake_.to(device)
grad = torch.autograd.grad(
outputs=disc_mix,
inputs=mix,
grad_outputs=fake_,
create_graph=True,
retain_graph=True,
only_inputs=True
)
grad = grad[0]
grad = grad.view(grad.shape[0], -1)
gp = ((grad.norm(2, dim=1) - 1)**2)
gp = gp.mean()
return gp
def rubiks_gen(self, image, kernel=[10,10,10]):
image = unfold(image, kernel)
image = rubiks_shuffle(image)
image = fold(image, len(kernel))
return image
class _batch_iterator:
def __init__(self, set, length):
self.set = set
self.length = length
def __len__(self):
return self.length
def __iter__(self):
d = 0
while d < self.length:
for batch in self.set:
if d >= self.length:
return
yield batch
d += 1
@property
def tensorboard(self):
if self._tensorboard:
return self._tensorboard
else:
return self._tensorboard
@tensorboard.setter
def tensorboard(self, val):
if not val:
self._tensorboard = val
else:
self._tensorboard = SummaryWriter(val)
@property
def nb_steps(self):
return self._nb_steps
@nb_steps.setter
def nb_steps(self, val):
self._nb_steps = val
self._update_nb_steps()
@property
def train_set(self):
if self._train_set:
return self._batch_iterator(self._train_set, self._nb_train)
else:
return None
@train_set.setter
def train_set(self, val):
self._train_set = val
self._update_nb_steps()
@property
def eval_set(self):
if self._eval_set:
return self._batch_iterator(self._eval_set, self._nb_eval)
else:
return None
@eval_set.setter
def eval_set(self, val):
self._eval_set = val
self._update_nb_steps()
def _train(self, epoch=0, adv=False, kernel=[10,10,10]):
"""Train for one epoch"""
self.model.train()
epoch_loss = 0.
nb_batches = 0
nb_steps = len(self.train_set)
for n_batch, batch in enumerate(self.train_set):
# forward pass
batch = make_tuple(batch)
batch = tuple(torch.as_tensor(b, device=self.device) for b in batch)
batch = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch)
nb_batches += batch[0].shape[0]
target = batch[0]
if len(batch) > 1:
meta = batch[-1]
else:
meta = None
self.optimizer.zero_grad()
image = self.rubiks_gen(target, kernel)
output = self.model(image, meta=meta, seg=False, gan=True, gan_meta=meta)
if adv == True:
self.adv_opt.zero_grad()
real_true = self.adv_model(target)
real_false = self.adv_model(output)
grad_pen = self.wass_gp(self.adv_model, target, output)
loss_adv_d = -torch.mean(real_true) + torch.mean(real_false) + self.lambda_gp * grad_pen
loss_adv_d.backward()
self.adv_opt.step()
loss = self.loss(output, target) - torch.mean(real_false)
else:
loss = self.loss(output, target)
# backward pass
loss.backward()
self.optimizer.step()
# update average across batches
with torch.no_grad():
weight = float(batch[0].shape[0])
epoch_loss += loss * weight
# # print
# if n_batch % self.log_interval == 0:
# self._print('train', epoch, n_batch+1, nb_steps,
# loss)
# # tb callback
# if self.tensorboard:
# tbopt = dict(inputs=batch, outputs=output,
# epoch=epoch, minibatch=n_batch, mode='train',
# loss=loss)
# self.model.board(self.tensorboard, **tbopt)
# for func in self._tensorboard_callbacks['train']['step']:
# func(self.tensorboard, **tbopt)
# del tbopt
# print summary
with torch.no_grad():
epoch_loss /= nb_batches
# self._print('train', epoch, nb_steps, nb_steps,
# epoch_loss, last=True)
# self._board('train', epoch)
# # tb callback
# if self.tensorboard:
# tbopt = dict(epoch=epoch, loss=epoch_loss, mode='train')
# self.model.board(self.tensorboard, **tbopt)
# for func in self._tensorboard_callbacks['train']['epoch']:
# func(self.tensorboard, **tbopt)
return epoch_loss
def _eval(self, epoch=0):
"""Evaluate once"""
if self.eval_set is None:
return
self.model.eval()
with torch.no_grad():
epoch_loss = 0
epoch_losses = {}
epoch_metrics = {}
nb_batches = 0
nb_steps = len(self.eval_set)
for n_batch, batch in enumerate(self.eval_set):
losses = {}
metrics = {}
# forward pass
batch = make_tuple(batch)
batch = tuple(torch.as_tensor(b, device=self.device) for b in batch)
batch = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch)
nb_batches += batch[0].shape[0]
self.optimizer.zero_grad()
output = self.model(*batch, _loss=losses, _metric=metrics)
loss = sum(losses.values())
# update average across batches
weight = float(batch[0].shape[0])
epoch_loss += loss * weight
update_loss_dict(epoch_losses, losses, weight)
update_loss_dict(epoch_metrics, metrics, weight)
# print
if n_batch % self.log_interval == 0:
self._print('eval', epoch, n_batch + 1, nb_steps,
loss, losses, metrics)
# tb callback
if self.tensorboard:
tbopt = dict(inputs=batch, outputs=output,
epoch=epoch, minibatch=n_batch, mode='eval',
loss=loss, losses=losses, metrics=metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['eval']['step']:
func(self.tensorboard, **tbopt)
# print summary
epoch_loss /= nb_batches
normalize_loss_dict(epoch_losses, nb_batches)
normalize_loss_dict(epoch_metrics, nb_batches)
self._print('eval', epoch, nb_steps, nb_steps,
epoch_loss, epoch_losses, epoch_metrics, last=True)
self._board('eval', epoch, epoch_loss, epoch_metrics)
# tb callback
if self.tensorboard:
tbopt = dict(epoch=epoch, loss=epoch_loss, mode='eval',
losses=epoch_losses, metrics=epoch_metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['eval']['epoch']:
func(self.tensorboard, **tbopt)
return epoch_loss
def _print(self, mode, n_epoch, n_batch, nb_steps, loss,
losses=None, metrics=None, last=False):
"""Pretty printing
Parameters
----------
mode : {'train', 'eval'}
n_epoch : int
Index of current epoch (starts at one)
n_batch : int
Index of current batch (starts at one)
nb_steps : int
Total number of batches
loss : () tensor
Loss for this batch
losses : dict[str: () tensor]
Loss components for this batch
metrics : dict[str: () tensor]
Metrics for this batch
last : bool, default=False
Is this the end of the batch?
If True, loss/losses/metrics should contain the average loss
across all batches.
"""
name = 'Train' if mode == 'train' else 'Eval '
if last:
pct = 1
bar = '[' + '=' * 10 + ']'
else:
pct = n_batch/nb_steps
len_arrow = min(math.floor(pct*10 + 0.5), 9)
bar = '[' + '=' * len_arrow + '>' + ' ' * (9-len_arrow) + ']'
lepoch = str(len(str(self.nb_epoch)))
evolution = '{:s} | {:' + lepoch + 'd} | {:3.0f}% ' + bar + ' '
evolution = evolution.format(name, n_epoch, pct*100)
values = ''
if mode == 'train':
values += '| loss = {:12.6g} '.format(loss.item())
if losses and self.show_losses:
values += '|'
for key, val in losses.items():
values += ' {}: {:12.6g} '.format(key, val.item())
if metrics and (mode == 'eval' or self.show_metrics):
values += '|'
for key, val in metrics.items():
values += ' {}: {:12.6g} '.format(key, val.item())
print(evolution + values, end='\r', flush=True)
if last:
print('')
def _board(self, mode, epoch, loss, epoch_metrics):
"""Add losses and metrics to tensorboard."""
if not self.tensorboard:
return
tb = self.tensorboard
tb.add_scalars('loss', {mode: loss.item()}, epoch)
for tag, value in epoch_metrics.items():
tb.add_scalars(tag, {mode: value.item()}, epoch)
tb.flush()
def add_tensorboard_callback(self, func, mode='train', trigger='epoch'):
"""Register tensorboard callbacks
Parameters
----------
func : callable
If trigger 'step', with signature
`(tb, input, output, epoch, step, loss, losses, metrics)`
If trigger 'epoch', with signature:
`(tb, epoch, loss, losses, metrics)`
mode : {'train', 'eval'}
Trigger either during a training or evaluation call.
trigger : {'epoch', 'step'}
Trigger either at the end of a step or at the end of an epoch.
"""
if mode not in self._tensorboard_callbacks.keys():
self._tensorboard_callbacks[mode] = dict()
if trigger not in self._tensorboard_callbacks[mode].keys():
self._tensorboard_callbacks[mode][trigger] = list()
self._tensorboard_callbacks[mode][trigger].append(func)
def _hello(self, mode):
"""Tell the use what we are going to do (mode, device, dtype, ...)
Parameters
----------
mode : {'train', 'eval'}
"""
if self.device.type == 'cuda':
device = torch.cuda.get_device_name(self.device)
else:
assert self.device.type == 'cpu'
device = 'CPU'
dtype = str(self.dtype).split('.')[-1]
if mode == 'train':
hello = 'Training model {} for {} epochs (steps per epoch: {}) ' \
'on {} (dtype = {})'
hello = hello.format(type(self.model).__name__, self.nb_epoch,
len(self.train_set), device, dtype)
else:
hello = 'Evaluating model {} (minibatches: {}) on {} (dtype = {})'
hello = hello.format(type(self.model).__name__,
len(self.eval_set), device, dtype)
print(hello, flush=True)
def _save(self, epoch):
"""Save once"""
if self.save_model:
save_model = self._formatfile(self.save_model, epoch)
dir_model = os.path.dirname(save_model)
if dir_model:
os.makedirs(dir_model, exist_ok=True)
torch.save(self.model.state_dict(), save_model)
if self.save_optimizer:
save_optimizer = self._formatfile(self.save_optimizer, epoch)
dir_optimizer = os.path.dirname(save_optimizer)
if dir_optimizer:
os.makedirs(dir_optimizer, exist_ok=True)
torch.save(self.optimizer.state_dict(), save_optimizer)
@staticmethod
def _formatfile(file, epoch):
"""Format filename for an epoch"""
keys = [tup[1] for tup in string.Formatter().parse(file)
if tup[1] is not None]
if len(keys) == 1:
file = file.format(epoch)
elif len(keys) > 1:
raise ValueError('Cannot have more than one format key')
return file
def train(self):
"""Launch training"""
self._hello('train')
with torch.random.fork_rng(enabled=self.seed is not None):
if self.seed is not None:
torch.random.manual_seed(self.seed)
self.initial_seed = torch.random.initial_seed()
with benchmark(self.benchmark):
self.model.to(dtype=self.dtype, device=self.device)
self.epoch = self.initial_epoch
self._eval(self.epoch)
self._save(self.epoch)
for self.epoch in range(self.epoch+1, self.nb_epoch+1):
train_loss = self._train(self.epoch)
print('Train loss: {}'.format(train_loss))
val_loss = self._eval(self.epoch)
self._save(self.epoch)
# scheduler
if isinstance(self.scheduler, ReduceLROnPlateau):
sched_loss = val_loss or train_loss
self.scheduler.step(sched_loss)
elif self.scheduler:
self.scheduler.step()
def eval(self):
"""Launch evaluation"""
self._hello('eval')
self.model.to(dtype=self.dtype, device=self.device)
self._eval()
def init(self):
"""Initialize the random state + run one evaluation."""
with torch.random.fork_rng(enabled=self.seed is not None):
if self.seed is not None:
torch.random.manual_seed(self.seed)
self.initial_seed = torch.random.initial_seed()
self.save_random_state()
self.epoch = self.initial_epoch
self.model.to(dtype=self.dtype, device=self.device)
self._eval(self.epoch)
self._save(self.epoch)
def set_random_state(self):
"""Populate the random state using a saved state."""
if self.random_state:
cpu_state, *gpu_states = self.random_state
devices = list(range(torch.cuda.device_count()))
torch.set_rng_state(self.random_state[0])
for device, state in zip(devices, gpu_states):
torch.cuda.set_rng_state(state, device)
def save_random_state(self):
"""Save the current random state."""
devices = list(range(torch.cuda.device_count()))
self.random_state = [torch.get_rng_state()]
self.random_state.extend(torch.cuda.get_rng_state(device)
for device in devices)
def train1(self):
"""Train for one epoch."""
with torch.random.fork_rng():
self.set_random_state()
self.model.to(dtype=self.dtype, device=self.device)
self.epoch += 1
self._train(self.epoch)
self._eval(self.epoch)
self._save(self.epoch)
self.save_random_state()
class SegGANTrainer:
"""Training class for Seg+CycleGAN model, may need tweaking for general use."""
_nb_steps = None
_train_set = None
_eval_set = None
_tensorboard = None
_tensorboard_callbacks = None
random_state = []
def __init__(self, model, disc, train_set, eval_set=None,
optimizer=None,
lambda_gp=10,
lambda_domain=1,
lambda_cycle=10,
lambda_id=0.1,
lambda_seg_domain=0.1,
lambda_seg_synth=0.3,
lambda_seg_adv=0.1,
seg_loss=DiceLoss(log=False, implicit=False),
domain_loss=torch.nn.CrossEntropyLoss(),
cycle_loss=torch.nn.L1Loss(),
gen_interval=1,
seg_interval=20,
adv_seg_start=5,
softplus=True,
r1=False,
nb_epoch=100,
nb_steps=None,
*, # the remaining parameters *must be* keywords
device=None,
dtype=None,
initial_epoch=0,
log_interval=10,
benchmark=False,
seed=None,
tensorboard=None,
save_model=None,
save_optimizer=None,
load_model=None,
load_optimizer=None,
show_losses=True,
show_metrics=False,
scheduler=ReduceLROnPlateau):
"""
Parameters
----------
model : Module
(Generative) Model to train.
Its forward pass should accept a `loss` argument, and take as
inputs the elements that pop out of the training set.
disc : Module or sequence[Module]
Discriminator model(s) for GAN training.
For cycleSeg model this should contain one for GAN and one for seg.
train_set : sequence[tensor or tuple[tensor]]
Training set.
It should be a finite sequence of tensors or tuple of tensors.
Should contain tuple of (Source, Target) domains.
eval_set : sequence[tensor or tuple[tensor]], optional
Evaluation set.
It should be a finite sequence of tensors or tuple of tensors.
Should contain tuple of (Source, Target) domains.
optimizer : callable, default=Adam
A function that takes trainable parameters as inputs and
returns an Optimizer object.
nb_epoch : int, default=100
Number of epochs.
nb_steps : int, default=`len(train_set) or 100`
Number of steps per epoch.
If the training set is a finite sequence (i.e., `len` is
implemented), its length is used. Else, the training set
is assumed to be infinite and the default number of steps
is 100.
scheduler : Scheduler, default=ReduceLROnPlateau
Other Parameters
----------------
device : torch.device, optional
Device to use. By default, use the default cuda device if
any, else use cpu.
dtype : torch.dtype, optional
Data type to use. By default use `torch.get_default_dtype`.
initial_epoch : int, default=0
First epoch
log_interval : int, default=10
Number of steps between screen updates.
benchmark : bool, default=False
Use the cudnn benchmarking utility that uses the first forward
pass to compare different convolution algorithms and select the
best performing one. You should only use this option if the
spatial shape of your input data is constant across mini batches.
seed : int, optional
Manual seed to use for training. The seed is set when
training starts. A context manager is used so that the global
state is kept untouched. If `None`, use the global state.
tensorboard : str, optional
A path to the tensorboard log directory.
If provided, losses and metrics are registered to the board
by default.
save_model : str, optional
A path to save the model at each epoch. Can have a
formatted component ('mymodel_{}.pth') for the epoch number.
save_optimizer : str, optional
A path to save the optimizer at each epoch. Can have a
formatted component ('myoptim_{}.pth') for the epoch number.
load_model : str, optional
Path to saved weights to use to initialize the model.
load_optimizer : str, optional
Path to saved state to use to initialize the optimizer.
show_losses : bool, default=True
Print values of individual losses
show_metrics : bool, default=False
Print values of individual metrics
"""
self.model = model
if isinstance(disc, (list, tuple)):
if len(disc) == 2:
self.disc_gan, self.disc_seg = disc
else:
self.disc_gan = disc[0]
self.disc_seg = None
else:
self.disc_gan = disc
self.disc_seg = None
self.train_set = train_set
self.eval_set = eval_set
if optimizer is None:
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, betas=(0.5,0.999))
self.optim_d_gan = None
self.optim_d_seg = None
if self.disc_gan:
self.optim_d_gan = torch.optim.Adam(self.disc_gan.parameters(), lr=0.00001, betas=(0.5,0.999))
if self.disc_seg:
self.optim_d_seg = torch.optim.Adam(self.disc_seg.parameters(), lr=0.00001, betas=(0.5,0.999))
self.optimizer = optimizer
self.lambda_gp = lambda_gp
self.lambda_domain = lambda_domain
self.lambda_cycle = lambda_cycle
self.lambda_id = lambda_id
self.lambda_seg_domain = lambda_seg_domain
self.lambda_seg_synth = lambda_seg_synth
self.lambda_seg_adv = lambda_seg_adv
self.seg_loss = seg_loss
self.domain_loss = domain_loss
self.cycle_loss = cycle_loss
self.gen_interval = gen_interval
self.seg_interval = seg_interval
self.adv_seg_start = adv_seg_start
self.softplus = softplus
self.r1 = r1
self.log_interval = log_interval
self.benchmark = benchmark
self.seed = seed
self.initial_seed = seed
self.tensorboard = tensorboard
self._tensorboard_callbacks = dict(train=dict(epoch=[], step=[]),
eval=dict(epoch=[], step=[]))
self.save_model = save_model
self.save_optimizer = save_optimizer
self.load_model = load_model
self.load_optimizer = load_optimizer
self.show_losses = show_losses
self.show_metrics = show_metrics
self.nb_epoch = nb_epoch
self.nb_steps = nb_steps
self.initial_epoch = initial_epoch
self.epoch = initial_epoch
self.device = device or 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.device)
self.dtype = dtype or torch.get_default_dtype()
self.scheduler = scheduler
if self.scheduler is not None:
self.scheduler = self.scheduler(self.optimizer)
if self.load_model:
self.model.load_state_dict(torch.load(self.load_model))
if self.load_optimizer:
self.optimizer.load_state_dict(torch.load(self.load_optimizer))
def _update_nb_steps(self):
def len_or(x, default):
return len(x) if hasattr(x, '__len__') else default
self._nb_train = self._nb_steps or len_or(self._train_set, 100)
self._nb_eval = self._nb_steps or len_or(self._eval_set, 100)
class _batch_iterator:
def __init__(self, set, length):
self.set = set
self.length = length
def __len__(self):
return self.length
def __iter__(self):
d = 0
while d < self.length:
for batch in self.set:
if d >= self.length:
return
yield batch
d += 1
@property
def tensorboard(self):
if self._tensorboard:
return self._tensorboard
else:
return self._tensorboard
@tensorboard.setter
def tensorboard(self, val):
if not val:
self._tensorboard = val
else:
self._tensorboard = SummaryWriter(val)
@property
def nb_steps(self):
return self._nb_steps
@nb_steps.setter
def nb_steps(self, val):
self._nb_steps = val
self._update_nb_steps()
@property
def train_set(self):
if self._train_set:
return self._batch_iterator(self._train_set, self._nb_train)
else:
return None
@train_set.setter
def train_set(self, val):
self._train_set = val
self._update_nb_steps()
@property
def eval_set(self):
if self._eval_set:
return self._batch_iterator(self._eval_set, self._nb_eval)
else:
return None
@eval_set.setter
def eval_set(self, val):
self._eval_set = val
self._update_nb_steps()
def wass_gp(self, disc, real, fake):
# Adapted from example provided by @eriklindernoren on GitHub
# debugging - print device of tensors
device = real.device
fake = fake.to(device)
# assume [B, C, **] -> dim = length of shape excluding B & C
dim = len(real.shape) - 2
# random number to scale between real & fake
shape = [real.shape[0], 1]
_ = [shape.append(1) for i in range(dim)]
eps = torch.rand(shape)
eps = eps.to(device)
mix = (real * eps + fake * (1 - eps)).requires_grad_(True)
disc_mix = disc(mix)
if isinstance(disc_mix, (list, tuple)):
disc_mix = disc_mix[0]
fake_ = torch.ones(disc_mix.shape, requires_grad=False)
fake_ = fake_.to(device)
grad = torch.autograd.grad(
outputs=disc_mix,
inputs=mix,
grad_outputs=fake_,
create_graph=True,
retain_graph=True,
only_inputs=True
)
grad = grad[0]
grad = grad.view(grad.shape[0], -1)
gp = ((grad.norm(2, dim=1) - 1)**2)
gp = gp.mean()
return gp
def r1_reg(self, disc, x_in):
x_in.requires_grad = True
d_out,_ = disc(x_in)
# zero-centered gradient penalty for real images
batch_size = x_in.size(0)
grad_dout = torch.autograd.grad(
outputs=d_out.sum(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
return reg
def _train_gan(self, epoch=0):
"""Train GAN for one epoch"""
# TODO: Look at implementing FID metric for val
self.model.train()
# check for translation data - need to extend to work for standard generation
if len(self.train_set) == 2:
train_s, train_t = self.train_set
train_set = zip(train_s, train_t)
nb_steps = len(train_s)
else:
train_set = self.train_set
nb_steps = len(train_set)
epoch_loss_d_gan = 0.
epoch_loss_d_seg = 0.
epoch_loss_g = 0.
epoch_loss_seg = 0.
epoch_losses = {}
epoch_metrics = {}
nb_batches = 0
nb_d_gan = 0.
nb_d_seg = 0.
nb_gan = 0.
nb_seg = 0.
### TODO: add proper data-logging
for n_batch, batch in enumerate(train_set):
losses = {}
metrics = {}
loss_d_gan = 0.
loss_d_seg = 0.
loss_g = 0.
loss_seg = 0.
batch_s, batch_t = batch
# create batch for source domain
batch_s = make_tuple(batch_s)
batch_s = tuple(torch.as_tensor(b, device=self.device) for b in batch_s)
batch_s = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch_s)
batch_s_img, batch_s_ref, batch_s_met = batch_s
nb_batches += batch_s[0].shape[0]
# create batch for target domain
batch_t = make_tuple(batch_t)
batch_t = tuple(torch.as_tensor(b, device=self.device) for b in batch_t)
batch_t = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch_t)
batch_t_img, batch_t_met = batch_t
self.optimizer.zero_grad()
if self.optim_d_gan:
self.optim_d_gan.zero_grad()
if self.optim_d_seg:
self.optim_d_seg.zero_grad()
## training translation discriminator
# first perform source -> target
trans_t_img = self.model(image=batch_s_img, meta=batch_s_met,
seg=False, gan=True,
gan_meta=batch_t_met)
# test discriminator on translation
real_valid, real_class = self.disc_gan(batch_s_img)
fake_valid, fake_class = self.disc_gan(trans_t_img)
# calculate wasserstein gradient penalty (or R1)
if self.r1:
grad_pen = self.r1_reg(self.disc_gan, batch_s_img)
else:
grad_pen = self.wass_gp(self.disc_gan, batch_s_img, trans_t_img)
# adversarial
if self.softplus:
loss_adv_d = torch.mean(F.softplus(-real_valid)) + torch.mean(F.softplus(fake_valid)) + self.lambda_gp * grad_pen
else:
loss_adv_d = -torch.mean(real_valid) + torch.mean(fake_valid) + self.lambda_gp * grad_pen
# domain
loss_dom_d = self.domain_loss(real_class.view(-1, real_class.shape[-1]), torch.max(batch_s_met, -1)[1].view(-1))
# repeat for target -> source
trans_s_img = self.model(image=batch_t_img, meta=batch_t_met,
seg=False, gan=True,
gan_meta=batch_s_met)
real_valid, real_class = self.disc_gan(batch_t_img)
fake_valid, fake_class = self.disc_gan(trans_s_img)
if self.r1:
grad_pen = self.r1_reg(self.disc_gan, batch_t_img)
else:
grad_pen = self.wass_gp(self.disc_gan, batch_t_img, trans_s_img)
if self.softplus:
loss_adv_d += torch.mean(F.softplus(-real_valid)) + torch.mean(F.softplus(fake_valid)) + self.lambda_gp * grad_pen
else:
loss_adv_d += -torch.mean(real_valid) + torch.mean(fake_valid) + self.lambda_gp * grad_pen
loss_dom_d += self.domain_loss(real_class.view(-1, real_class.shape[-1]), torch.max(batch_t_met, -1)[1].view(-1))
# calculate overall loss
loss_d_gan = loss_adv_d + self.lambda_domain * loss_dom_d
losses['loss_adv_d_gan'] = loss_adv_d
losses['loss_dom_d_gan'] = loss_dom_d
losses['loss_d_gan'] = loss_d_gan
self.optim_d_gan.zero_grad()
loss_d_gan.backward()
self.optim_d_gan.step()
nb_d_gan += 1.
if self.disc_seg and epoch > self.adv_seg_start:
## training segmentation discriminator
# segment images
s_seg = self.model(image=batch_s_img, meta=batch_s_met,
seg=True, gan=False)
t_seg = self.model(image=batch_t_img, meta=batch_t_met,
seg=True, gan=False)
# test discriminator on segmentation
gt_valid, _ = self.disc_seg(batch_s_ref)
s_valid, s_class = self.disc_seg(s_seg)
t_valid, t_class = self.disc_seg(t_seg)
# calculate wasserstein gradient penalty
if self.r1:
grad_pen = self.r1_reg(self.disc_seg, batch_s_ref)
else:
grad_pen = 0.5 * (self.wass_gp(self.disc_seg, batch_s_ref, s_seg) + \
self.wass_gp(self.disc_seg, batch_s_ref, t_seg))
# adversarial
if self.softplus:
loss_adv_d = torch.mean(F.softplus(-gt_valid)) + \
0.5 * (torch.mean(F.softplus(s_valid)) + torch.mean(F.softplus(t_valid))) + \
self.lambda_gp * grad_pen
else:
loss_adv_d = -torch.mean(gt_valid) + \
0.5 * (torch.mean(s_valid) + torch.mean(t_valid)) + \
self.lambda_gp * grad_pen
# domain
loss_dom_d = 0.5 * (self.domain_loss(s_class.view(-1, s_class.shape[-1]), torch.max(batch_s_met, -1)[1].view(-1)) + \
self.domain_loss(t_class.view(-1, t_class.shape[-1]), torch.max(batch_t_met, -1)[1].view(-1)))
# calculate overall loss
loss_d_seg = loss_adv_d + self.lambda_domain * loss_dom_d
losses['loss_adv_d_seg'] = loss_adv_d
losses['loss_dom_d_seg'] = loss_dom_d
losses['loss_d_seg'] = loss_d_seg
self.optim_d_seg.zero_grad()
loss_d_seg.backward()
self.optim_d_seg.step()
nb_d_seg += 1.
if n_batch > 0 and n_batch % self.gen_interval == 0:
## training translation generator
# source -> target
s_t_img = self.model(image=batch_s_img, meta=batch_s_met,
seg=False, gan=True,
gan_meta=batch_t_met)
fake_valid, fake_class = self.disc_gan(s_t_img)
if self.softplus:
loss_g_adv = torch.mean(F.softplus(-fake_valid))
else:
loss_g_adv = - torch.mean(fake_valid)
loss_g_dom = self.domain_loss(fake_class.view(-1, fake_class.shape[-1]), torch.max(batch_t_met, -1)[1].view(-1))
# target -> source
t_s_img = self.model(image=batch_s_img, meta=batch_s_met,
seg=False, gan=True,
gan_meta=batch_t_met)
fake_valid, fake_class = self.disc_gan(t_s_img)
if self.softplus:
loss_g_adv += torch.mean(F.softplus(-fake_valid))
else:
loss_g_adv += - torch.mean(fake_valid)
loss_g_dom += self.domain_loss(fake_class.view(-1, fake_class.shape[-1]), torch.max(batch_s_met, -1)[1].view(-1))
# source -> target -> source
s_t_s_img = self.model(image=s_t_img, meta=batch_t_met,
seg=False, gan=True,
gan_meta=batch_s_met)
loss_g_cyc = self.cycle_loss(s_t_s_img, batch_s_img)
# target -> source -> target
t_s_t_img = self.model(image=t_s_img, meta=batch_s_met,
seg=False, gan=True,
gan_meta=batch_t_met)
loss_g_cyc += self.cycle_loss(t_s_t_img, batch_t_img)
# source -> source
s_s_img = self.model(image=batch_s_img, meta=batch_s_met,
seg=False, gan=True,
gan_meta=batch_s_met)
loss_g_id = self.cycle_loss(s_s_img, batch_s_img)
# target -> target
t_t_img = self.model(image=batch_t_img, meta=batch_t_met,
seg=False, gan=True,
gan_meta=batch_t_met)
loss_g_id += self.cycle_loss(t_t_img, batch_t_img)
# overall loss
loss_g = loss_g_adv + self.lambda_domain * loss_g_dom + \
self.lambda_cycle * loss_g_cyc + self.lambda_id * loss_g_id
losses['loss_g_adv'] = loss_g_adv
losses['loss_g_dom'] = loss_g_dom
losses['loss_g_cyc'] = loss_g_cyc
losses['loss_g_id'] = loss_g_id
losses['loss_g'] = loss_g
self.optimizer.zero_grad()
loss_g.backward()
self.optimizer.step()
nb_gan += 1.
if n_batch % self.seg_interval == 0:
## training segmentation 'generator' via Dice
# segment images
s_seg = self.model(image=batch_s_img, meta=batch_s_met,
seg=True, gan=False)
# supervised learning of source -> label
loss_seg_sup = self.seg_loss(s_seg, batch_s_ref)
s_t_img = self.model(image=batch_s_img, meta=batch_s_met,
seg=False, gan=True,
gan_meta=batch_t_met)
s_t_seg = self.model(image=s_t_img, meta=batch_t_met,
seg=True, gan=False)
# supervised learning of source -> target -> label
loss_seg_synth = self.seg_loss(s_t_seg, batch_s_ref)
if self.disc_seg and epoch > self.adv_seg_start:
t_seg = self.model(image=batch_t_img, meta=batch_t_met,
seg=True, gan=False)
t_s_img = self.model(image=batch_t_img, meta=batch_t_met,
seg=False, gan=True,
gan_meta=batch_s_met)
t_s_seg = self.model(image=t_s_img, meta=batch_t_met,
seg=True, gan=False)
# test discriminator on segmentation
s_valid, s_class = self.disc_seg(s_seg)
t_valid, t_class = self.disc_seg(t_seg)
s_t_valid, s_t_class = self.disc_seg(s_t_seg)
t_s_valid, t_s_class = self.disc_seg(t_s_seg)
# adversarial
if self.softplus:
loss_seg_adv = torch.mean(F.softplus(-s_valid))
loss_seg_adv += torch.mean(F.softplus(-t_valid))
loss_seg_adv += torch.mean(F.softplus(-s_t_valid))
loss_seg_adv += torch.mean(F.softplus(-t_s_valid))
else:
loss_seg_adv = -torch.mean(s_valid)
loss_seg_adv += -torch.mean(t_valid)
loss_seg_adv += -torch.mean(s_t_valid)
loss_seg_adv += -torch.mean(t_s_valid)
# domain
loss_seg_dom = self.domain_loss(s_class.view(-1, s_class.shape[-1]), torch.max(batch_s_met, -1)[1].view(-1))
loss_seg_dom += self.domain_loss(t_class.view(-1, t_class.shape[-1]), torch.max(batch_t_met, -1)[1].view(-1))
loss_seg_dom += self.domain_loss(s_t_class.view(-1, s_t_class.shape[-1]), torch.max(batch_t_met, -1)[1].view(-1))
loss_seg_dom += self.domain_loss(t_s_class.view(-1, t_s_class.shape[-1]), torch.max(batch_s_met, -1)[1].view(-1))
# calculate overall loss
loss_seg = loss_seg_sup + self.lambda_seg_synth * loss_seg_synth + \
self.lambda_seg_adv * loss_seg_adv + self.lambda_seg_domain * loss_seg_dom
losses['loss_seg_sup'] = loss_seg_sup
losses['loss_seg_synth'] = loss_seg_synth
losses['loss_seg_adv'] = loss_seg_adv
losses['loss_seg_domain'] = loss_seg_dom
else:
# only use T1 segmentation loss
loss_seg = loss_seg_sup + self.lambda_seg_synth * loss_seg_synth
losses['loss_seg_sup'] = loss_seg_sup
losses['loss_seg_synth'] = loss_seg_synth
losses['loss_seg'] = loss_seg
self.optimizer.zero_grad()
loss_seg.backward()
self.optimizer.step()
nb_seg += 1.
# update average across batches
with torch.no_grad():
weight = float(batch_s[0].shape[0])
epoch_loss_d_gan += loss_d_gan * weight
epoch_loss_d_seg += loss_d_seg * weight
epoch_loss_g += loss_g * weight
epoch_loss_seg += loss_seg * weight
loss = loss_d_gan + loss_d_seg + loss_g + loss_seg
update_loss_dict(epoch_losses, losses, weight)
update_loss_dict(epoch_metrics, metrics, weight)
# print
if n_batch % self.log_interval == 0:
self._print('train', epoch, n_batch+1, nb_steps,
loss, losses, metrics)
# tb callback
if self.tensorboard:
tbopt = dict(inputs=batch, outputs=output,
epoch=epoch, minibatch=n_batch, mode='train',
loss=loss, losses=losses, metrics=metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['train']['step']:
func(self.tensorboard, **tbopt)
del tbopt
# print summary
with torch.no_grad():
if nb_d_gan > 0:
epoch_loss_d_gan /= nb_d_gan
if nb_d_seg > 0:
epoch_loss_d_seg /= nb_d_seg
if nb_gan > 0:
epoch_loss_g /= nb_gan
if nb_seg > 0:
epoch_loss_seg /= nb_seg
epoch_loss = epoch_loss_d_gan + epoch_loss_d_seg + epoch_loss_g + epoch_loss_seg
normalize_loss_dict(epoch_losses, nb_batches)
normalize_loss_dict(epoch_metrics, nb_batches)
self._print('train', epoch, nb_steps, nb_steps,
epoch_loss, epoch_losses, epoch_metrics, last=True)
self._board('train', epoch, epoch_loss, epoch_metrics)
# tb callback
if self.tensorboard:
tbopt = dict(epoch=epoch, loss=epoch_loss, mode='train',
losses=epoch_losses, metrics=epoch_metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['train']['epoch']:
func(self.tensorboard, **tbopt)
print('D_G loss: {}\nD_S loss: {}\nG loss: {}\nS loss: {}'.format(epoch_loss_d_gan, epoch_loss_d_seg, epoch_loss_g, epoch_loss_seg))
return epoch_loss, epoch_losses
def _train_seg(self, epoch=0):
"""Train segmentation for one epoch"""
self.model.train()
epoch_loss = 0.
epoch_losses = {}
epoch_metrics = {}
nb_batches = 0
nb_steps = len(self.train_set)
for n_batch, batch in enumerate(self.train_set):
losses = {}
metrics = {}
# forward pass
batch = make_tuple(batch)
batch = tuple(torch.as_tensor(b, device=self.device) for b in batch)
batch = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch)
nb_batches += batch[0].shape[0]
self.optimizer.zero_grad()
output = self.model(*batch, _loss=losses, _metric=metrics)
loss = sum(losses.values())
# backward pass
loss.backward()
self.optimizer.step()
# update average across batches
with torch.no_grad():
weight = float(batch[0].shape[0])
epoch_loss += loss * weight
update_loss_dict(epoch_losses, losses, weight)
update_loss_dict(epoch_metrics, metrics, weight)
# print
if n_batch % self.log_interval == 0:
self._print('train', epoch, n_batch+1, nb_steps,
loss, losses, metrics)
# tb callback
if self.tensorboard:
tbopt = dict(inputs=batch, outputs=output,
epoch=epoch, minibatch=n_batch, mode='train',
loss=loss, losses=losses, metrics=metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['train']['step']:
func(self.tensorboard, **tbopt)
del tbopt
# print summary
with torch.no_grad():
epoch_loss /= nb_batches
normalize_loss_dict(epoch_losses, nb_batches)
normalize_loss_dict(epoch_metrics, nb_batches)
self._print('train', epoch, nb_steps, nb_steps,
epoch_loss, epoch_losses, epoch_metrics, last=True)
self._board('train', epoch, epoch_loss, epoch_metrics)
# tb callback
if self.tensorboard:
tbopt = dict(epoch=epoch, loss=epoch_loss, mode='train',
losses=epoch_losses, metrics=epoch_metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['train']['epoch']:
func(self.tensorboard, **tbopt)
return epoch_loss
def _eval(self, epoch=0):
"""Evaluate once"""
if self.eval_set is None:
return
self.model.eval()
with torch.no_grad():
epoch_loss = 0
epoch_losses = {}
epoch_metrics = {}
nb_batches = 0
nb_steps = len(self.eval_set)
for n_batch, batch in enumerate(self.eval_set):
losses = {}
metrics = {}
# forward pass
batch = make_tuple(batch)
batch = tuple(torch.as_tensor(b, device=self.device) for b in batch)
batch = tuple(b.to(dtype=self.dtype)
if b.dtype in (torch.half, torch.float, torch.double)
else b for b in batch)
nb_batches += batch[0].shape[0]
self.optimizer.zero_grad()
output = self.model(*batch, _loss=losses, _metric=metrics)
loss = sum(losses.values())
# update average across batches
weight = float(batch[0].shape[0])
epoch_loss += loss * weight
update_loss_dict(epoch_losses, losses, weight)
update_loss_dict(epoch_metrics, metrics, weight)
# print
if n_batch % self.log_interval == 0:
self._print('eval', epoch, n_batch + 1, nb_steps,
loss, losses, metrics)
# tb callback
if self.tensorboard:
tbopt = dict(inputs=batch, outputs=output,
epoch=epoch, minibatch=n_batch, mode='eval',
loss=loss, losses=losses, metrics=metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['eval']['step']:
func(self.tensorboard, **tbopt)
# print summary
epoch_loss /= nb_batches
normalize_loss_dict(epoch_losses, nb_batches)
normalize_loss_dict(epoch_metrics, nb_batches)
self._print('eval', epoch, nb_steps, nb_steps,
epoch_loss, epoch_losses, epoch_metrics, last=True)
self._board('eval', epoch, epoch_loss, epoch_metrics)
# tb callback
if self.tensorboard:
tbopt = dict(epoch=epoch, loss=epoch_loss, mode='eval',
losses=epoch_losses, metrics=epoch_metrics)
self.model.board(self.tensorboard, **tbopt)
for func in self._tensorboard_callbacks['eval']['epoch']:
func(self.tensorboard, **tbopt)
return epoch_loss
def _print(self, mode, n_epoch, n_batch, nb_steps, loss,
losses=None, metrics=None, last=False):
"""Pretty printing
Parameters
----------
mode : {'train', 'eval'}
n_epoch : int
Index of current epoch (starts at one)
n_batch : int
Index of current batch (starts at one)
nb_steps : int
Total number of batches
loss : () tensor
Loss for this batch
losses : dict[str: () tensor]
Loss components for this batch
metrics : dict[str: () tensor]
Metrics for this batch
last : bool, default=False
Is this the end of the batch?
If True, loss/losses/metrics should contain the average loss
across all batches.
"""
name = 'Train' if mode == 'train' else 'Eval '
if last:
pct = 1
bar = '[' + '=' * 10 + ']'
else:
pct = n_batch/nb_steps
len_arrow = min(math.floor(pct*10 + 0.5), 9)
bar = '[' + '=' * len_arrow + '>' + ' ' * (9-len_arrow) + ']'
lepoch = str(len(str(self.nb_epoch)))
evolution = '{:s} | {:' + lepoch + 'd} | {:3.0f}% ' + bar + ' '
evolution = evolution.format(name, n_epoch, pct*100)
values = ''
if mode == 'train':
values += '| loss = {:12.6g} '.format(loss.item())
if losses and self.show_losses:
values += '|'
for key, val in losses.items():
values += ' {}: {:12.6g} '.format(key, val.item())
if metrics and (mode == 'eval' or self.show_metrics):
values += '|'
for key, val in metrics.items():
values += ' {}: {:12.6g} '.format(key, val.item())
print(evolution + values, end='\r', flush=True)
if last:
print('')
def _board(self, mode, epoch, loss, epoch_metrics):
"""Add losses and metrics to tensorboard."""
if not self.tensorboard:
return
tb = self.tensorboard
tb.add_scalars('loss', {mode: loss.item()}, epoch)
for tag, value in epoch_metrics.items():
tb.add_scalars(tag, {mode: value.item()}, epoch)
tb.flush()
def add_tensorboard_callback(self, func, mode='train', trigger='epoch'):
"""Register tensorboard callbacks
Parameters
----------
func : callable
If trigger 'step', with signature
`(tb, input, output, epoch, step, loss, losses, metrics)`
If trigger 'epoch', with signature:
`(tb, epoch, loss, losses, metrics)`
mode : {'train', 'eval'}
Trigger either during a training or evaluation call.
trigger : {'epoch', 'step'}
Trigger either at the end of a step or at the end of an epoch.
"""
if mode not in self._tensorboard_callbacks.keys():
self._tensorboard_callbacks[mode] = dict()
if trigger not in self._tensorboard_callbacks[mode].keys():
self._tensorboard_callbacks[mode][trigger] = list()
self._tensorboard_callbacks[mode][trigger].append(func)
def _hello(self, mode):
"""Tell the use what we are going to do (mode, device, dtype, ...)
Parameters
----------
mode : {'train', 'eval'}
"""
if self.device.type == 'cuda':
device = torch.cuda.get_device_name(self.device)
else:
assert self.device.type == 'cpu'
device = 'CPU'
dtype = str(self.dtype).split('.')[-1]
if mode == 'train':
hello = 'Training model {} for {} epochs (steps per epoch: {}) ' \
'on {} (dtype = {})'
hello = hello.format(type(self.model).__name__, self.nb_epoch,
len(self.train_set), device, dtype)
else:
hello = 'Evaluating model {} (minibatches: {}) on {} (dtype = {})'
hello = hello.format(type(self.model).__name__,
len(self.eval_set), device, dtype)
print(hello, flush=True)
def _save(self, epoch):
"""Save once"""
if self.save_model:
save_model = self._formatfile(self.save_model, epoch)
dir_model = os.path.dirname(save_model)
if dir_model:
os.makedirs(dir_model, exist_ok=True)
torch.save(self.model.state_dict(), save_model)
if self.save_optimizer:
save_optimizer = self._formatfile(self.save_optimizer, epoch)
dir_optimizer = os.path.dirname(save_optimizer)
if dir_optimizer:
os.makedirs(dir_optimizer, exist_ok=True)
torch.save(self.optimizer.state_dict(), save_optimizer)
@staticmethod
def _formatfile(file, epoch):
"""Format filename for an epoch"""
keys = [tup[1] for tup in string.Formatter().parse(file)
if tup[1] is not None]
if len(keys) == 1:
file = file.format(epoch)
elif len(keys) > 1:
raise ValueError('Cannot have more than one format key')
return file
def train(self):
"""Launch training"""
self._hello('train')
with torch.random.fork_rng(enabled=self.seed is not None):
if self.seed is not None:
torch.random.manual_seed(self.seed)
self.initial_seed = torch.random.initial_seed()
with benchmark(self.benchmark):
self.model.to(dtype=self.dtype, device=self.device)
self.epoch = self.initial_epoch
self._eval(self.epoch)
self._save(self.epoch)
for self.epoch in range(self.epoch+1, self.nb_epoch+1):
train_loss = self._train(self.epoch)
print('Train loss: {}'.format(train_loss))
val_loss = self._eval(self.epoch)
self._save(self.epoch)
# scheduler
if isinstance(self.scheduler, ReduceLROnPlateau):
sched_loss = val_loss or train_loss
self.scheduler.step(sched_loss)
elif self.scheduler:
self.scheduler.step()
def eval(self):
"""Launch evaluation"""
self._hello('eval')
self.model.to(dtype=self.dtype, device=self.device)
self._eval()
def init(self):
"""Initialize the random state + run one evaluation."""
with torch.random.fork_rng(enabled=self.seed is not None):
if self.seed is not None:
torch.random.manual_seed(self.seed)
self.initial_seed = torch.random.initial_seed()
self.save_random_state()
self.epoch = self.initial_epoch
self.model.to(dtype=self.dtype, device=self.device)
self._eval(self.epoch)
self._save(self.epoch)
def set_random_state(self):
"""Populate the random state using a saved state."""
if self.random_state:
cpu_state, *gpu_states = self.random_state
devices = list(range(torch.cuda.device_count()))
torch.set_rng_state(self.random_state[0])
for device, state in zip(devices, gpu_states):
torch.cuda.set_rng_state(state, device)
def save_random_state(self):
"""Save the current random state."""
devices = list(range(torch.cuda.device_count()))
self.random_state = [torch.get_rng_state()]
self.random_state.extend(torch.cuda.get_rng_state(device)
for device in devices)
def train1(self):
"""Train for one epoch."""
with torch.random.fork_rng():
self.set_random_state()
self.model.to(dtype=self.dtype, device=self.device)
self.epoch += 1
self._train(self.epoch)
self._eval(self.epoch)
self._save(self.epoch)
self.save_random_state()
| [
"torch.ones",
"torch.cuda.is_available",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.get_rng_state",
"torch.cuda.set_rng_state",
"torch.random.manual_seed",
"torch.autograd.grad",
"torch.as_tensor",
"torch.utils.tensorboard.SummaryWriter",
"torch.random.initial_seed",
"torch.device",
"torch.nn.functional.softplus",
"torch.max",
"torch.cuda.get_device_name",
"torch.cuda.device_count",
"torch.cuda.get_rng_state",
"torch.rand",
"torch.get_default_dtype",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.set_rng_state",
"torch.random.fork_rng",
"torch.mean"
] | 1.5 | liamchalcroft/nitorch | 0de179aff97244a82213c528f0d6393725c868c9 |
1.7 | import os
import json
import torch
import numpy as np
from pointnetae.config import *
class SceneDataset(torch.utils.data.Dataset):
def __init__(
self,
data_source,
max_num_points,
load_ram=False,
is_testing=False,
):
self.data_source = data_source
self.load_ram = load_ram
self.max_num_points = max_num_points
if is_testing:
split_path = os.path.join(split_dir, room_name, split_test)
else:
split_path = os.path.join(split_dir, room_name, split_train)
with open(split_path, "r") as f:
self.npyfiles = json.load(f)
if load_ram:
self.loaded_data = []
for f in self.npyfiles:
filepath = os.path.join(self.data_source, f)
self.loaded_data.append(self.get_item_from_filepath(filepath))
def get_item_from_filepath(self, filepath):
furniture_arr = np.load(filepath)
num_points = furniture_arr.shape[0]
assert num_points <= self.max_num_points
target_tensor = furniture_arr
furniture_tensor = np.zeros((num_points, point_size + 1))
furniture_tensor[0:num_points, 0:geometry_size + orientation_size] = furniture_arr[:, 0:geometry_size + orientation_size] # geometry, orientation
furniture_tensor[np.arange(num_points), geometry_size + orientation_size + furniture_arr[:, geometry_size + orientation_size].astype(int)] = 1 # category
furniture_tensor[0:num_points, geometry_size + orientation_size + num_categories] = 1 # existence (TODO: remove this from everywhere)
furniture_tensor[0:num_points, geometry_size + orientation_size + num_categories + 1:] = furniture_arr[:, geometry_size + orientation_size + 1:] # shape
return torch.Tensor(furniture_tensor), torch.Tensor(target_tensor)
def __len__(self):
return len(self.npyfiles)
def __getitem__(self, idx):
if self.load_ram:
return self.loaded_data[idx]
else:
filepath = os.path.join(self.data_source, self.npyfiles[idx])
return self.get_item_from_filepath(filepath)
def get_room_id(self, idx):
return os.path.splitext(self.npyfiles[idx])[0] | [
"torch.Tensor"
] | 1.7.1 | AdamWang00/pointnet.pytorch | b92c3916cf9f84c35861790e8d9cdc6170a9afd5 |
1.3 | import random
import warnings
from collections import OrderedDict
from functools import wraps
from typing import Any, Callable, Generator, Iterator, List, Optional
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler
from ignite.engine.engine import Engine
from ignite.engine.events import Events
from ignite.utils import manual_seed
__all__ = ["update_dataloader", "keep_random_state", "ReproducibleBatchSampler", "DeterministicEngine"]
def update_dataloader(dataloader: DataLoader, new_batch_sampler: BatchSampler) -> DataLoader:
"""Helper function to replace current batch sampler of the dataloader by a new batch sampler. Function returns new
dataloader with new batch sampler.
Args:
dataloader: input dataloader
new_batch_sampler: new batch sampler to use
Returns:
DataLoader
"""
params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith("_")]
for k in ["batch_size", "sampler", "drop_last", "batch_sampler", "dataset_kind"]:
if k in params_keys:
params_keys.remove(k)
params = {k: getattr(dataloader, k) for k in params_keys}
params["batch_sampler"] = new_batch_sampler
return type(dataloader)(**params)
class ReproducibleBatchSampler(BatchSampler):
"""Reproducible batch sampler. This class internally iterates and stores indices of the input batch sampler.
This helps to start providing data batches from an iteration in a deterministic way.
Example:
Setup dataloader with `ReproducibleBatchSampler` and start providing data batches from an iteration
.. code-block:: python
from ignite.engine.deterministic import update_dataloader
dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
# rewind dataloader to a specific iteration:
dataloader.batch_sampler.start_iteration = start_iteration
Args:
batch_sampler: batch sampler same as used with `torch.utils.data.DataLoader`.
start_iteration: optional start iteration.
"""
def __init__(self, batch_sampler: BatchSampler, start_iteration: Optional[int] = None):
if not isinstance(batch_sampler, BatchSampler):
raise TypeError("Argument batch_sampler should be torch.utils.data.sampler.BatchSampler")
self.batch_indices = [] # type: List
self.batch_sampler = batch_sampler
self.start_iteration = start_iteration
self.sampler = self.batch_sampler.sampler
def setup_batch_indices(self) -> None:
"""Setup batch indices."""
self.batch_indices = []
for batch in self.batch_sampler:
self.batch_indices.append(batch)
if self.start_iteration is not None:
self.batch_indices = self.batch_indices[self.start_iteration :]
self.start_iteration = None
def __iter__(self) -> Generator:
self.setup_batch_indices()
for batch in self.batch_indices:
yield batch
def __len__(self) -> int:
return len(self.batch_sampler)
def _get_rng_states() -> List[Any]:
output = [random.getstate(), torch.get_rng_state()]
try:
import numpy as np
output.append(np.random.get_state())
except ImportError:
pass
return output
def _set_rng_states(rng_states: List[Any]) -> None:
random.setstate(rng_states[0])
if "cpu" not in rng_states[1].device.type:
rng_states[1] = rng_states[1].cpu()
torch.set_rng_state(rng_states[1])
try:
import numpy as np
np.random.set_state(rng_states[2])
except ImportError:
pass
def _repr_rng_state(rng_states: List[Any]) -> str:
from hashlib import md5
out = " ".join([md5(str(list(s)).encode("utf-8")).hexdigest() for s in rng_states])
return out
def keep_random_state(func: Callable) -> Callable:
"""Helper decorator to keep random state of torch, numpy and random intact
while executing a function. For more details on usage, please see :ref:`Dataflow synchronization`.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> None:
rng_states = _get_rng_states()
func(*args, **kwargs)
_set_rng_states(rng_states)
return wrapper
class DeterministicEngine(Engine):
"""Deterministic engine derived from :class:`~ignite.engine.engine.Engine`.
"Deterministic" run is done by adding additional handlers to synchronize the dataflow and overriding some methods of
:class:`~ignite.engine.engine.Engine`:
.. code-block:: python
for e in range(num_epochs):
set_seed(seed_offset + e)
if resume:
setup_saved_rng_states()
do_single_epoch_iterations(dataloader)
If input data provider is `DataLoader`, its batch sampler is replaced by
:class:`~ignite.engine.deterministic.ReproducibleBatchSampler`.
.. code-block:: python
for e in range(num_epochs):
set_seed(seed_offset + e)
setup_sampling(dataloader)
if resume:
setup_saved_rng_states()
do_single_epoch_iterations(dataloader)
Internally, `torch.backends.cudnn.deterministic = True` and `torch.backends.cudnn.benchmark = False` are also
applied.
For more details about dataflow synchronization, please see :ref:`Dataflow synchronization`.
.. Note ::
This class can produce exactly the same dataflow when resuming the run from an epoch (or more precisely from
dataflow restart) and using torch `DataLoader` with `num_workers > 1` as data provider.
Args:
process_function: A function receiving a handle to the engine and the current batch
in each iteration, and returns data to be stored in the engine's state.
"""
def __init__(self, process_function: Callable):
super(DeterministicEngine, self).__init__(process_function)
self.state_dict_user_keys.append("rng_states")
self.add_event_handler(Events.STARTED, self._init_run)
self.add_event_handler(Events.DATALOADER_STOP_ITERATION | Events.TERMINATE_SINGLE_EPOCH, self._setup_seed)
def state_dict(self) -> OrderedDict:
state_dict = super(DeterministicEngine, self).state_dict()
state_dict["rng_states"] = _get_rng_states()
return state_dict
def _init_run(self) -> None:
self.state.seed = int(torch.randint(0, int(1e9), (1,)).item())
if not hasattr(self.state, "rng_states"):
setattr(self.state, "rng_states", None)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def _setup_engine(self) -> None:
if self.state.dataloader is None:
raise RuntimeError(
"Internal error, self.state.dataloader is None. Please, file an issue if you encounter this error."
)
self._dataloader_len = self._get_data_length(self.state.dataloader)
# if input data is torch dataloader we replace batch sampler by a batch sampler
# such that its random sampling indices are reproducible by prefetching them before data iteration
if isinstance(self.state.dataloader, DataLoader):
# attribute _dataset_kind is introduced since 1.3.0 => before 1.3.0 all datasets are map-like
can_patch_dataloader = True
if hasattr(self.state.dataloader, "_dataset_kind"):
from torch.utils.data.dataloader import _DatasetKind
_dataloader_kind = self.state.dataloader._dataset_kind
can_patch_dataloader = _dataloader_kind == _DatasetKind.Map
if can_patch_dataloader:
if self._dataloader_len is not None and hasattr(self.state.dataloader.sampler, "epoch"):
if self._dataloader_len != self.state.epoch_length:
warnings.warn(
"When defined engine's epoch length is different of input dataloader length, "
"distributed sampler indices can not be setup in a reproducible manner"
)
batch_sampler = self.state.dataloader.batch_sampler
if not (batch_sampler is None or isinstance(batch_sampler, ReproducibleBatchSampler)):
self.state.dataloader = update_dataloader(
self.state.dataloader, ReproducibleBatchSampler(batch_sampler) # type: ignore[arg-type]
)
iteration = self.state.iteration
self._dataloader_iter = self._from_iteration(iteration)
# Below we define initial counter value for _run_once_on_dataset to measure a single epoch
if self.state.epoch_length is not None:
iteration %= self.state.epoch_length
self._init_iter.append(iteration)
# restore rng state if in the middle
in_the_middle = self.state.iteration % self._dataloader_len > 0 if self._dataloader_len is not None else False
rng_states = getattr(self.state, "rng_states", None)
if rng_states is not None and in_the_middle:
_set_rng_states(rng_states)
setattr(self.state, "rng_states", None)
def _from_iteration(self, iteration: int) -> Iterator:
if self.state.dataloader is None:
raise RuntimeError(
"Internal error, self.state.dataloader is None. Please, file an issue if you encounter this error."
)
data = self.state.dataloader
if isinstance(data, DataLoader):
try:
# following is unsafe for IterableDatasets
iteration %= len(data.batch_sampler) # type: ignore[attr-defined, arg-type]
# Synchronize dataflow according to state.iteration
self._setup_seed()
if iteration > 0:
# batch sampler is ReproducibleBatchSampler
data.batch_sampler.start_iteration = iteration # type: ignore[attr-defined, union-attr]
return iter(data)
except TypeError as e:
# Probably we can do nothing with DataLoader built upon IterableDatasets
pass
self.logger.info("Resuming from iteration for provided data will fetch data until required iteration ...")
if hasattr(data, "__len__"):
iteration %= len(data) # type: ignore[arg-type]
# Synchronize dataflow from the begining
self._setup_seed(iteration=0)
data_iter = iter(data)
counter = 0
while counter < iteration:
try:
next(data_iter)
counter += 1
except StopIteration:
data_iter = iter(data)
return data_iter
def _setup_seed(self, _: Any = None, iter_counter: Optional[int] = None, iteration: Optional[int] = None) -> None:
if iter_counter is None:
le = self._dataloader_len if self._dataloader_len is not None else 1
elif not iter_counter > 0:
raise ValueError("iter_counter should be positive value")
else:
le = iter_counter
if iteration is None:
iteration = self.state.iteration
manual_seed(self.state.seed + iteration // le) # type: ignore[operator]
| [
"torch.get_rng_state",
"torch.cuda.is_available",
"torch.set_rng_state"
] | 1.3 | 01-vyom/ignite | 6954817abaa03b9be0f3a18b262058e1e8dd8fbe |
1.7 | import time
import logging
import warnings
import psutil
from signal import signal, SIGINT
from py3nvml.py3nvml import *
from typing import Dict, Optional
from kge import Config, Dataset
from kge.distributed.parameter_server import init_torch_server, init_lapse_scheduler
from kge.distributed.worker_process import WorkerProcessPool
from kge.distributed.work_scheduler import WorkScheduler
from kge.distributed.misc import get_num_keys
import torch
from torch import multiprocessing as mp
def monitor_hardware(folder, interval=1):
def bytes_to_mb(bytes_amount):
return round(bytes_amount / 1024 / 1024, 2)
logger = logging.getLogger("hardware_monitor")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.join(folder, "hardware_monitor.log"))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# let's monitor the default connection between OUR two servers
# todo: just monitor all interfaces later on
interface = "enp130s0f0"
while True:
time.sleep(interval)
cpu_percentage = psutil.cpu_percent()
memory_percentage = psutil.virtual_memory().percent
network_info = psutil.net_io_counters()
bytes_sent = network_info.bytes_sent
bytes_recv = network_info.bytes_recv
# timestamp;cpu%;mem%;net_sent;net_recvm
msg = f"{time.time()};{cpu_percentage};{memory_percentage};{bytes_to_mb(bytes_sent)};{bytes_to_mb(bytes_recv)}"
network_info = psutil.net_io_counters(pernic=True)
if interface in network_info.keys():
bytes_sent = network_info[interface].bytes_sent
bytes_recv = network_info[interface].bytes_recv
msg += f";{bytes_to_mb(bytes_sent)};{bytes_to_mb(bytes_recv)}"
logger.info(
msg=msg
)
def monitor_gpus(folder, interval=1):
try:
nvmlInit()
except Exception:
print("could not initialize GPU monitor")
return
device_count = nvmlDeviceGetCount()
if device_count == 0:
return
logger = logging.getLogger("gpu_monitor")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.join(folder, "gpu_monitor.log"))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
while True:
time.sleep(interval)
for i in range(device_count):
handle = nvmlDeviceGetHandleByIndex(i)
proc_res = nvmlDeviceGetComputeRunningProcesses(handle)
mem_per_process = list(
map(lambda obj: (obj.pid, obj.usedGpuMemory), proc_res)
)
res = nvmlDeviceGetUtilizationRates(handle)
mem_res = nvmlDeviceGetMemoryInfo(handle)
# timestamp;device_id;gpu_util;gpu_mem_util;gpu_temp;mem_per_process
logger.info(
f"{time.time()};{i};{res.gpu};{round((mem_res.used/mem_res.total)*100)};{mem_per_process}"
)
def create_and_run_distributed(
config: Config, dataset: Optional[Dataset] = None, checkpoint: Optional[Dict] = None
):
# setting num eval workers to 1 if < 1
if config.get("job.distributed.num_eval_workers") < 1:
warnings.warn("Need to have at least one worker for evaluation."
"Setting job.distributed.num_eval_workers to 1")
config.set("job.distributed.num_eval_workers", 1)
# setting num workers to 1 if < 1
if config.get("job.distributed.num_workers") < 1:
warnings.warn("Need to have at least one worker for training."
"Setting job.distribtued.num_workers to 1")
config.set("job.distributed.num_workers", 1)
# setting num workers per machine to num workers if < 0
if config.get("job.distributed.num_workers_machine") <= 0:
config.set("job.distributed.num_workers_machine", config.get("job.distributed.num_workers"))
# setting already initialized workers if < 0
if config.get("job.distributed.already_init_workers") < 0:
config.set("job.distributed.already_init_workers",
config.get("job.distributed.machine_id") * config.get("job.distributed.num_workers_machine"))
# specific settings for valid only jobs
if config.get("job.type") in ["valid", "test", "eval"]:
config.set("job.distributed.parameter_server", "shared")
num_eval_workers = config.get("job.distributed.num_eval_workers")
config.set("job.distributed.num_workers", num_eval_workers)
config.set("job.distributed.num_workers_machine", num_eval_workers)
config.set("job.distributed.num_machines", 1)
config.set("job.distributed.gloo_socket_ifname", "lo")
config.set("job.distributed.master_ip", "127.0.0.1")
config.set(f"{config.get('model')}.create_eval", True)
os.environ["OMP_NUM_THREADS"] = str(
config.get("job.distributed.num_threads_per_process")
)
os.environ["GLOO_SOCKET_IFNAME"] = config.get("job.distributed.gloo_socket_ifname")
if (
config.get("job.distributed.repartition_epoch")
and config.get("job.distributed.partition_type") == "stratification"
):
# with stratificaton we have a lot of open files that need to be shared
# between processes. Some servers don't allow that. Therefore set sharing
# strategy to file_system to avoid too many open files error
torch.multiprocessing.set_sharing_strategy("file_system")
# catch interrupt (to shut down lapse and other processes)
processes = []
monitoring_processes = []
worker_process_pool = None
def kill_processes(signal_received, frame):
print("\nSIGINT or CTRL-C detected. Shutting down all processes and exiting...")
for process in processes:
if process is not None:
try:
process.kill()
except AttributeError:
print("process already killed")
for process in monitoring_processes:
if process is not None:
process.kill()
if worker_process_pool is not None:
worker_process_pool.kill()
exit(0)
signal(SIGINT, kill_processes)
if config.get("job.type") == "train":
# start hardware monitoring
monitor_process = mp.Process(
target=monitor_hardware, args=(config.folder, 0.5), daemon=True
)
monitoring_processes.append(monitor_process)
monitor_process.start()
gpu_monitor_process = mp.Process(
target=monitor_gpus, args=(config.folder, 1), daemon=True
)
monitoring_processes.append(gpu_monitor_process)
gpu_monitor_process.start()
if config.get("job.distributed.machine_id") == 0:
num_keys = get_num_keys(config, dataset)
if config.get("job.distributed.parameter_server") == "lapse":
p = mp.Process(
target=init_lapse_scheduler,
args=(
config,
num_keys,
),
daemon=True,
)
processes.append(p)
p.start()
elif config.get("job.distributed.parameter_server") == "torch":
p = mp.Process(
target=init_torch_server,
args=(
config,
num_keys,
),
daemon=True,
)
processes.append(p)
p.start()
# create a work scheduler
print("init scheduler")
scheduler_init_time = time.time()
scheduler = WorkScheduler.create(config=config, dataset=dataset)
config.log(f"scheduler initialized after: {time.time()-scheduler_init_time}")
print("start scheduler")
scheduler_start_time = time.time()
processes.append(scheduler)
scheduler.start()
config.log(f"scheduler start took: {time.time()-scheduler_start_time}")
# create all train-workers in a worker pool
worker_process_pool = WorkerProcessPool(
config,
dataset,
checkpoint,
)
valid_trace = worker_process_pool.join()
for p in processes:
p.join()
if config.get("job.type") == "train":
monitor_process.terminate()
gpu_monitor_process.terminate()
return valid_trace
| [
"torch.multiprocessing.Process",
"torch.multiprocessing.set_sharing_strategy"
] | 1.7.1 | uma-pi1/dist-kge | ccc93e7981c09a0499f2267f37886660896d8f72 |
1.1 | import bisect
import gc
import glob
import random
import torch
from others.logging import logger
class Batch(object):
def _pad(self, data, pad_id, width=-1):
if (width == -1):
width = max(len(d) for d in data)
rtn_data = [d + [pad_id] * (width - len(d)) for d in data]
return rtn_data
def __init__(self, data=None, device=None, is_test=False):
"""Create a Batch from a list of examples."""
if data is not None:
self.batch_size = len(data)
pre_src = [x[0] for x in data]
pre_tgt = [x[1] for x in data]
pre_segs = [x[2] for x in data]
pre_clss = [x[3] for x in data]
pre_src_sent_labels = [x[4] for x in data]
src = torch.tensor(self._pad(pre_src, 0))
tgt = torch.tensor(self._pad(pre_tgt, 0))
segs = torch.tensor(self._pad(pre_segs, 0))
mask_src = 1 - (src == 0)
mask_tgt = 1 - (tgt == 0)
clss = torch.tensor(self._pad(pre_clss, -1))
src_sent_labels = torch.tensor(self._pad(pre_src_sent_labels, 0))
mask_cls = 1 - (clss == -1)
clss[clss == -1] = 0
setattr(self, 'clss', clss.to(device))
setattr(self, 'mask_cls', mask_cls.to(device))
setattr(self, 'src_sent_labels', src_sent_labels.to(device))
setattr(self, 'src', src.to(device))
setattr(self, 'tgt', tgt.to(device))
setattr(self, 'segs', segs.to(device))
setattr(self, 'mask_src', mask_src.to(device))
setattr(self, 'mask_tgt', mask_tgt.to(device))
if (is_test):
src_str = [x[-2] for x in data]
setattr(self, 'src_str', src_str)
tgt_str = [x[-1] for x in data]
setattr(self, 'tgt_str', tgt_str)
def __len__(self):
return self.batch_size
def load_dataset(args, corpus_type, shuffle):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid", "test"]
def _lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
logger.info('Loading %s dataset from %s, number of examples: %d' %
(corpus_type, pt_file, len(dataset)))
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(args.bert_data_path + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
if (shuffle):
random.shuffle(pts)
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type)
else:
# Only one inputters.*Dataset, simple!
pt = args.bert_data_path + '.' + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type)
def abs_batch_size_fn(new, count):
src, tgt = new[0], new[1]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents=0
max_n_tokens=0
max_n_sents = max(max_n_sents, len(tgt))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
if (count > 6):
return src_elements + 1e3
return src_elements
def ext_batch_size_fn(new, count):
if (len(new) == 4):
pass
src, labels = new[0], new[4]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents = 0
max_n_tokens = 0
max_n_sents = max(max_n_sents, len(src))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
return src_elements
class Dataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.datasets = datasets
self.batch_size = batch_size
self.device = device
self.shuffle = shuffle
self.is_test = is_test
self.cur_iter = self._next_dataset_iterator(datasets)
assert self.cur_iter is not None
def __iter__(self):
dataset_iter = (d for d in self.datasets)
while self.cur_iter is not None:
for i, batch in enumerate(self.cur_iter):
print('batch: ', i)
yield batch
self.cur_iter = self._next_dataset_iterator(dataset_iter)
def _next_dataset_iterator(self, dataset_iter):
try:
# Drop the current dataset for decreasing memory
if hasattr(self, "cur_dataset"):
self.cur_dataset = None
gc.collect()
del self.cur_dataset
gc.collect()
self.cur_dataset = next(dataset_iter)
except StopIteration:
return None
return DataIterator(args = self.args,
dataset=self.cur_dataset, batch_size=self.batch_size,
device=self.device, shuffle=self.shuffle, is_test=self.is_test)
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False,
shuffle=True):
self.args = args
self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset
self.iterations = 0
self.device = device
self.shuffle = shuffle
self.sort_key = lambda x: len(x[1])
self._iterations_this_epoch = 0
if (self.args.task == 'abs'):
self.batch_size_fn = abs_batch_size_fn
else:
self.batch_size_fn = ext_batch_size_fn
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1]+[2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if(not self.args.use_interval):
segs=[0]*len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
# src_txt = src_txt[:max_sent_id]
if(is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt
else:
return src, tgt, segs, clss, src_sent_labels
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if(len(ex['src'])==0):
continue
ex = self.preprocess(ex, self.is_test)
if(ex is None):
continue
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def batch(self, data, batch_size):
"""Yield elements from data in chunks of batch_size."""
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = self.batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if(len(b)==0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
class TextDataloader(object):
def __init__(self, args, datasets, batch_size,
device, shuffle, is_test):
self.args = args
self.batch_size = batch_size
self.device = device
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
tgt = ex['tgt'][:self.args.max_tgt_len][:-1] + [2]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if (not self.args.use_interval):
segs = [0] * len(segs)
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
end_id = [src[-1]]
src = src[:-1][:self.args.max_pos - 1] + end_id
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
# src_txt = src_txt[:max_sent_id]
if (is_test):
return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt
else:
return src, tgt, segs, clss, src_sent_labels
def batch_buffer(self, data, batch_size):
minibatch, size_so_far = [], 0
for ex in data:
if (len(ex['src']) == 0):
continue
ex = self.preprocess(ex, self.is_test)
if (ex is None):
continue
minibatch.append(ex)
size_so_far = simple_batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def create_batches(self):
""" Create batches """
data = self.data()
for buffer in self.batch_buffer(data, self.batch_size * 300):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = sorted(p_batch, key=lambda x: len(x[1]))
else:
p_batch = sorted(buffer, key=lambda x: len(x[2]))
p_batch = batch(p_batch, self.batch_size)
p_batch = batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if (self.shuffle):
random.shuffle(p_batch)
for b in p_batch:
if (len(b) == 0):
continue
yield b
def __iter__(self):
while True:
self.batches = self.create_batches()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
yield batch
return
| [
"torch.load"
] | 1.1.0 | thenghiapham/PreSumm | 7bc537b656dc43898474c99b906e02f65b98c975 |
1.6 | import os
import torch
from detecto.core import Model, Dataset
from detecto.utils import xml_to_csv, read_image
def get_dataset(**kwargs):
path = os.path.dirname(__file__)
input_folder = os.path.join(path, 'static')
labels_path = os.path.join(path, 'static/labels.csv')
xml_to_csv(input_folder, labels_path)
dataset = Dataset(labels_path, input_folder, **kwargs)
os.remove(labels_path)
return dataset
def get_image():
path = os.path.dirname(__file__)
file = 'static/image.jpg'
return read_image(os.path.join(path, file))
def get_model():
return Model(['test1', 'test2', 'test3'])
def empty_predictor(x):
return [{'labels': torch.empty(0), 'boxes': torch.empty(0, 4), 'scores': torch.empty(0)}]
| [
"torch.empty"
] | 1.6.0 | erjiang/detecto | 673bee8c0522e8cd18edce1602c2fd061bb27e14 |
1.11 | # -*- coding: utf-8 -*-
import torch
class Config:
'''
Chatbot模型参数
'''
corpus_data_path = 'corpus.pth' #已处理的对话数据
use_QA_first = True #是否载入知识库
max_input_length = 100 #输入的最大句子长度
max_generate_length = 300 #生成的最大句子长度
prefix = 'checkpoints/chatbot' #模型断点路径前缀
model_ckpt = 'checkpoints/chatbot_0425_2112' #加载模型路径
# model_ckpt = None # 如果从零开始训练, 则不从任何checkpoints继续
'''
训练超参数
'''
batch_size = 2048 #每批训练数据的大小
shuffle = True #dataloader是否打乱数据
num_workers = 0 #dataloader多进程提取数据
bidirectional = True #Encoder-RNN是否双向
hidden_size = 512
embedding_dim = 512
method = 'dot' #attention method
dropout = 0 #是否使用dropout
clip = 50.0 #梯度裁剪阈值
num_layers = 2 #Encoder-RNN层数
learning_rate = 1e-4 #学习率
teacher_forcing_ratio = 1.0 #teacher_forcing比例
decoder_learning_ratio = 5.0
'''
训练周期信息
'''
epoch = 2000
save_every = 50 #每隔save_every个Epoch保存
'''
GPU
'''
use_gpu = torch.cuda.is_available() #是否使用gpu
device = torch.device("cuda" if use_gpu else "cpu") #device
if __name__ == "__main__":
print(torch.version.__version__)
print(torch.cuda.is_available()) | [
"torch.device",
"torch.cuda.is_available"
] | 1.11.0 | scyq/CapstoneNetwork | e4c888f2a6b1951794687657f86cd84cb006c2a3 |
1.10 |
import torch
from ...dataset import SequenceBatch
from ._abstract import ImportanceMeasureModule
class InputTimesGradientImportanceMeasure(ImportanceMeasureModule):
def forward(self, batch: SequenceBatch) -> torch.Tensor:
# Prepear a compact embedding matrix for doing sum(x * dy/dz @ W.T) efficently.
embedding_matrix_compact = torch.index_select(
self.model.embedding_matrix, 0, batch.sentence.view(-1)
).unsqueeze(-1) # (B * T, Z, 1)
y, _, embedding = self.model(batch)
yc = y[torch.arange(batch.label.numel(), device=self.device), batch.label]
yc_batch = yc.sum(dim=0)
with torch.no_grad():
yc_wrt_embedding, = torch.autograd.grad([yc_batch], (embedding, )) # (B, T, Z)
if yc_wrt_embedding is None:
raise ValueError('Could not compute gradient')
yc_wrt_embedding = yc_wrt_embedding[:, :batch.sentence.size(1), :]
# This is a fast and memory-efficient version of sum(one_hot(x) * dy/dz @ W.T)
# We can do this because x is one_hot, hence there is no need to
# compute all the dy/dx = dy/dz @ W.T elements, where x = 0,
# because they will anyway go away after sum.
# In this context, the sum comes from the 2-norm. The mean
# does not affect anything, as x remains the same for all
# # Riemann steps.
yc_wrt_x_compact = torch.bmm(
yc_wrt_embedding.reshape(
embedding_matrix_compact.shape[0], 1, embedding_matrix_compact.shape[1]
), # (B * T, 1, Z)
embedding_matrix_compact, # (B * T, Z, 1)
).reshape_as(batch.sentence) # (B*T, 1, 1) -> (B, T)
# Abs is equivalent to 2-norm, because the naive sum is essentially
# sqrt(0^2 + ... + 0^2 + y_wrt_x^2 + 0^2 + ... + 0^2) = abs(y_wrt_x)
return torch.abs(yc_wrt_x_compact)
| [
"torch.abs",
"torch.autograd.grad",
"torch.no_grad"
] | 1.10.0 | AndreasMadsen/nlp-roar-interpretability | ad30f756cd744dfb05d1b57de744c5ff60d9f20c |
1.8 | from typing import Any, List
import numpy as np
import torch.nn
import torch
from cptr_model.config.config import Config
from cptr_model.config.specifics.cptr.architecture_config_file_manager import ArchitectureConfigFileManager
from cptr_model.embeddings.position.base_position_embedding import BasePositionEmbedding
class PositionSinCosEmbedding(BasePositionEmbedding):
KEY_DIM = 'dim'
KEY_NUM_POSITIONS = 'num_positions'
def __init__(self, config: Config, **kwargs):
self.config = config
self.dim = kwargs.get(PositionSinCosEmbedding.KEY_DIM, None)
self.num_positions = kwargs.get(PositionSinCosEmbedding.KEY_NUM_POSITIONS, None)
print(kwargs)
super().__init__(config, **kwargs)
self.register_buffer('pos_table', self.get_position_embedding_table())
def _verify_required_args(self) -> None:
if not self.dim:
raise ValueError(f'{PositionSinCosEmbedding.KEY_DIM} value is None')
if not self.num_positions:
raise ValueError(f'{PositionSinCosEmbedding.KEY_NUM_POSITIONS} value is None')
def __get_position_angle_vec(self, position: int) -> List[float]:
return [float(position / np.power(10000, 2 * (hid_j // 2) / self.dim)) for hid_j in range(self.dim)]
def get_position_embedding_table(self) -> torch.Tensor:
sinusoid_table = torch.tensor([self.__get_position_angle_vec(i) for i in range(self.num_positions)])
sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2])
sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2])
return torch.FloatTensor(sinusoid_table).unsqueeze(0).to(device=torch.device('cuda' if self.config.default_use_gpu else 'cpu'))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.pos_table[:, :x.shape[1], :].clone().detach() + x
| [
"torch.cos",
"torch.device",
"torch.sin",
"torch.FloatTensor"
] | 1.8.0 | jsoft88/cptr-vision-transformer | c1728792e3a1b14805ad2489efcd869677c380d7 |
1.8 | # Copyright (c) Meta Platforms, Inc
import math
from typing import Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from flowtorch.bijectors.fixed import Fixed
class LeakyReLU(Fixed):
# TODO: Setting the slope of Leaky ReLU as __init__ argument
def _forward(
self, x: torch.Tensor, params: Optional[Sequence[torch.Tensor]]
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
y = F.leaky_relu(x)
ladj = self._log_abs_det_jacobian(x, y, params)
return y, ladj
def _inverse(
self, y: torch.Tensor, params: Optional[Sequence[torch.Tensor]]
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
x = F.leaky_relu(y, negative_slope=100.0)
ladj = self._log_abs_det_jacobian(x, y, params)
return x, ladj
def _log_abs_det_jacobian(
self, x: torch.Tensor, y: torch.Tensor, params: Optional[Sequence[torch.Tensor]]
) -> torch.Tensor:
return torch.where(
x >= 0.0, torch.zeros_like(x), torch.ones_like(x) * math.log(0.01)
)
| [
"torch.zeros_like",
"torch.ones_like",
"torch.nn.functional.leaky_relu"
] | 1.8.1 | vmoens/flowtorch | 499273172dc64b68dd41d06ace935bd6ee970fe4 |
1.2 | import torch
import torch.nn as nn
import torch.nn.functional as F
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def init_weights(self, init_type='normal', gain=0.02):
'''
initialize network's weights
init_type: normal | xavier | kaiming | orthogonal
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
'''
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, gain)
nn.init.constant_(m.bias.data, 0.0)
self.apply(init_func)
class InpaintGenerator(BaseNetwork):
def __init__(self, residual_blocks=8, init_weights=True):
super(InpaintGenerator, self).__init__()
self.encoder = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(in_channels=6, out_channels=64, kernel_size=7, padding=0),
nn.InstanceNorm2d(64, track_running_stats=False),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128, track_running_stats=False),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(256, track_running_stats=False),
nn.ReLU(True)
)
blocks = []
for _ in range(residual_blocks):
block = ResnetBlock(256, 2)
blocks.append(block)
self.middle = nn.Sequential(*blocks)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128, track_running_stats=False),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(64, track_running_stats=False),
nn.ReLU(True),
nn.ReflectionPad2d(3),
nn.Conv2d(in_channels=64, out_channels=3, kernel_size=7, padding=0),
)
if init_weights:
self.init_weights()
def forward(self, x):
x = self.encoder(x)
x = self.middle(x)
x = self.decoder(x)
x = (torch.tanh(x) + 1) / 2
return x
class Discriminator(BaseNetwork):
def __init__(self, in_channels, use_sigmoid=True, use_spectral_norm=True, init_weights=True):
super(Discriminator, self).__init__()
self.use_sigmoid = use_sigmoid
self.conv1 = self.features = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1,
bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv2 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1,
bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv3 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1,
bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv4 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1,
bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv5 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1,
bias=not use_spectral_norm), use_spectral_norm),
)
if init_weights:
self.init_weights()
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
outputs = conv5
if self.use_sigmoid:
outputs = torch.sigmoid(conv5)
return outputs, [conv1, conv2, conv3, conv4, conv5]
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1, use_spectral_norm=False):
super(ResnetBlock, self).__init__()
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(dilation),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation,
bias=not use_spectral_norm), use_spectral_norm),
nn.InstanceNorm2d(dim, track_running_stats=False),
nn.ReLU(True),
nn.ReflectionPad2d(1),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1,
bias=not use_spectral_norm), use_spectral_norm),
nn.InstanceNorm2d(dim, track_running_stats=False),
)
def forward(self, x):
out = x + self.conv_block(x)
# Remove ReLU at the end of the residual block
# http://torch.ch/blog/2016/02/04/resnets.html
return out
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
| [
"torch.sigmoid",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.init.orthogonal_",
"torch.nn.LeakyReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.ReflectionPad2d",
"torch.nn.utils.spectral_norm",
"torch.nn.InstanceNorm2d",
"torch.tanh",
"torch.nn.init.xavier_normal_"
] | 1.2.0 | Larry-u/JAFPro | 10e5ee3b77bcdb103709c08c3e7d033396bab5ba |
1.7 | #!/usr/bin/env python3
import unittest
import torch
from gpytorch.lazy import RootLazyTensor
from gpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestRootLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
should_call_lanczos = False
should_call_lanczos_diagonalization = True
def create_lazy_tensor(self):
root = torch.randn(3, 5, requires_grad=True)
return RootLazyTensor(root)
def evaluate_lazy_tensor(self, lazy_tensor):
root = lazy_tensor.root.tensor
res = root.matmul(root.transpose(-1, -2))
return res
class TestRootLazyTensorBatch(TestRootLazyTensor):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(3, 5, 5)
root.add_(torch.eye(5).unsqueeze(0))
root.requires_grad_(True)
return RootLazyTensor(root)
class TestRootLazyTensorMultiBatch(TestRootLazyTensor):
seed = 1
# Because these LTs are large, we'll skil the big tests
should_test_sample = False
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(4, 3, 5, 5)
root.requires_grad_(True)
return RootLazyTensor(root)
if __name__ == "__main__":
unittest.main()
| [
"torch.eye",
"torch.randn"
] | 1.7 | qingfeng10/gpytorch | 4d33fbf64594aab2dd6e0cfcb3242510231b3e0e |
1.1 | import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import kaiming_init, normal_init
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
from mmdet.core import force_fp32
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule
import mmcv
@HEADS.register_module
class MaskIoUHead_MH(nn.Module):
"""Mask IoU Head.
This head predicts the IoU of predicted masks and corresponding gt masks.
"""
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=81,
loss_mask = dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(MaskIoUHead_MH, self).__init__()
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.num_classes = num_classes
self.fp16_enabled = False
self.convs = nn.ModuleList()
self.final_conv = nn.Conv2d(self.conv_out_channels, 1, 1, stride=1)
for i in range(num_convs):
if i == 0:
# concatenation of mask feature and mask prediction
in_channels = self.in_channels + self.num_classes - 1
else:
in_channels = self.conv_out_channels
stride = 2 if i == num_convs - 1 else 1
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
3,
stride=stride,
padding=1))
# roi_feat_size = _pair(roi_feat_size)
# pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
# self.fcs = nn.ModuleList()
# for i in range(num_fcs):
# in_channels = (
# self.conv_out_channels *
# pooled_area if i == 0 else self.fc_out_channels)
# self.fcs.append(nn.Linear(in_channels, self.fc_out_channels))
#
# self.fc_mask_iou = nn.Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(2, 2)
self.loss_mask = build_loss(loss_mask)
def init_weights(self):
kaiming_init(self.final_conv)
# for fc in self.fcs:
# kaiming_init(
# fc,
# a=1,
# mode='fan_in',
# nonlinearity='leaky_relu',
# distribution='uniform')
# normal_init(self.fc_mask_iou, std=0.01)
def forward(self, mask_feat, mask_pred):
mask_pred = mask_pred[:,1:,:,:].sigmoid()
mask_pred_pooled = self.max_pool(mask_pred)
x = torch.cat((mask_feat, mask_pred_pooled), 1)
for conv in self.convs:
x = self.relu(conv(x))
mask_out = self.final_conv(x)
# x = x.view(x.size(0), -1)
# for fc in self.fcs:
# x = self.relu(fc(x))
# mask_iou = self.fc_mask_iou(x)
return mask_out
@force_fp32(apply_to=('mask_iou_pred', ))
def loss(self, mask_pred, mask_targets, rcnn_cfg):
loss = dict()
# pos_inds = mask_iou_targets > 0
# if pos_inds.sum() > 0:
# loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
# mask_iou_targets[pos_inds])
# else:
# loss_mask_iou = mask_iou_pred * 0
H, W = mask_pred.size()[-2:]
mask_targets = mask_targets[:,None,:,:]
mask_targets = F.interpolate(mask_targets, (H,W)).squeeze(1)
num_pred = mask_pred.size(0)
loss_mask = self.loss_mask(mask_pred, mask_targets, torch.zeros(num_pred, dtype=torch.long))
loss['loss_refine_mask'] = loss_mask
loss['refine_acc'] = ((mask_pred >= rcnn_cfg.mask_thr_binary).float() == mask_targets).sum().float() / mask_targets.numel() * 100
# loss['loss_mask_iou'] = loss_mask_iou
return loss
# @force_fp32(apply_to=('mask_pred', ))
# def get_target(self, sampling_results, gt_masks, mask_pred, mask_targets,
# rcnn_train_cfg):
# """Compute target of mask IoU.
#
# Mask IoU target is the IoU of the predicted mask (inside a bbox) and
# the gt mask of corresponding gt mask (the whole instance).
# The intersection area is computed inside the bbox, and the gt mask area
# is computed with two steps, firstly we compute the gt area inside the
# bbox, then divide it by the area ratio of gt area inside the bbox and
# the gt area of the whole instance.
#
# Args:
# sampling_results (list[:obj:`SamplingResult`]): sampling results.
# gt_masks (list[ndarray]): Gt masks (the whole instance) of each
# image, binary maps with the same shape of the input image.
# mask_pred (Tensor): Predicted masks of each positive proposal,
# shape (num_pos, h, w).
# mask_targets (Tensor): Gt mask of each positive proposal,
# binary map of the shape (num_pos, h, w).
# rcnn_train_cfg (dict): Training config for R-CNN part.
#
# Returns:
# Tensor: mask iou target (length == num positive).
# """
# pos_proposals = [res.pos_bboxes for res in sampling_results]
# pos_assigned_gt_inds = [
# res.pos_assigned_gt_inds for res in sampling_results
# ]
#
# # compute the area ratio of gt areas inside the proposals and
# # the whole instance
# area_ratios = map(self._get_area_ratio, pos_proposals,
# pos_assigned_gt_inds, gt_masks)
# area_ratios = torch.cat(list(area_ratios))
# assert mask_targets.size(0) == area_ratios.size(0)
#
# mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()
# mask_pred_areas = mask_pred.sum((-1, -2))
#
# # mask_pred and mask_targets are binary maps
# overlap_areas = (mask_pred * mask_targets).sum((-1, -2))
#
# # compute the mask area of the whole instance
# gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
#
# mask_iou_targets = overlap_areas / (
# mask_pred_areas + gt_full_areas - overlap_areas)
# return mask_iou_targets
#
# def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
# """Compute area ratio of the gt mask inside the proposal and the gt
# mask of the corresponding instance"""
# num_pos = pos_proposals.size(0)
# if num_pos > 0:
# area_ratios = []
# proposals_np = pos_proposals.cpu().numpy()
# pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
# # compute mask areas of gt instances (batch processing for speedup)
# gt_instance_mask_area = gt_masks.sum((-1, -2))
# for i in range(num_pos):
# gt_mask = gt_masks[pos_assigned_gt_inds[i]]
#
# # crop the gt mask inside the proposal
# x1, y1, x2, y2 = proposals_np[i, :].astype(np.int32)
# gt_mask_in_proposal = gt_mask[y1:y2 + 1, x1:x2 + 1]
#
# ratio = gt_mask_in_proposal.sum() / (
# gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
# area_ratios.append(ratio)
# area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
# pos_proposals.device)
# else:
# area_ratios = pos_proposals.new_zeros((0, ))
# return area_ratios
# @force_fp32(apply_to=('mask_iou_pred', ))
# def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
# """Get the mask scores.
#
# mask_score = bbox_score * mask_iou
# """
# inds = range(det_labels.size(0))
# mask_scores = mask_iou_pred[inds, det_labels +
# 1] * det_bboxes[inds, -1]
# mask_scores = mask_scores.cpu().numpy()
# det_labels = det_labels.cpu().numpy()
# return [
# mask_scores[det_labels == i] for i in range(self.num_classes - 1)
# ]
| [
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.1 | Gitgigabyte/mmd | 02cf37884d3ac9a6018656d1871695669966dfb3 |
1.8 | from torch import nn, Tensor
class Classifier(nn.Module):
"""
Simple model with linear layers for mnist
"""
def __init__(self):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(784, 512)
self.relu1 = nn.LeakyReLU(negative_slope=0.1)
self.drop1 = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(512, 256)
self.relu2 = nn.LeakyReLU(negative_slope=0.1)
self.drop2 = nn.Dropout(p=0.5)
self.out = nn.Linear(256, 10)
# self.out_act = nn.Softmax(dim=1)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.relu1(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.drop2(x)
x = self.out(x)
# x = self.out_act(x)
return x
| [
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.Dropout"
] | 1.8.1 | stefansturlu/FederatedMedical | d753acda850e0d8cf64fc1d5c19e7018494bc16a |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""nn.Module with additional great features."""
import collections
import copy
import inspect
import logging
import os
import tempfile
import types
import uuid
from abc import ABC
from argparse import Namespace
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from pytorch_lightning.core.grads import GradInformation
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.utilities import rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, save_hyperparameters
from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT
log = logging.getLogger(__name__)
class LightningModule(
ABC,
DeviceDtypeModuleMixin,
GradInformation,
ModelIO,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT in PyTorch 1.7
# since none of these are important when using JIT, we are going to ignore them.
__jit_unused_properties__ = [
"datamodule",
"example_input_array",
"hparams",
"hparams_initial",
"on_gpu",
"current_epoch",
"global_step",
"global_rank",
"local_rank",
"logger",
"model_size",
"automatic_optimization",
"truncated_bptt_steps",
] + DeviceDtypeModuleMixin.__jit_unused_properties__
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}")
self.loaded_optimizer_states_dict = {}
#: Pointer to the trainer object
self.trainer = None
self._distrib_type = None
self._device_type = None
#: True if using amp
self.use_amp: bool = False
#: The precision used
self.precision: int = 32
# optionally can be set by user
self._example_input_array = None
self._datamodule = None
self._results: Optional[Result] = None
self._current_fx_name: str = ''
self._running_manual_backward: bool = False
self._current_hook_fx_name: Optional[str] = None
self._current_dataloader_idx: Optional[int] = None
self._automatic_optimization: bool = True
self._truncated_bptt_steps: int = 0
self._param_requires_grad_state = dict()
def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]:
if use_pl_optimizer:
opts = list(self.trainer.lightning_optimizers.values())
else:
opts = self.trainer.optimizers
# single optimizer
if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):
return opts[0]
# multiple opts
return opts
def lr_schedulers(self) -> Optional[Union[Any, List[Any]]]:
if not self.trainer.lr_schedulers:
return None
# ignore other keys "interval", "frequency", etc.
lr_schedulers = [s["scheduler"] for s in self.trainer.lr_schedulers]
# single scheduler
if len(lr_schedulers) == 1:
return lr_schedulers[0]
# multiple schedulers
return lr_schedulers
@property
def example_input_array(self) -> Any:
return self._example_input_array
@property
def current_epoch(self) -> int:
"""The current epoch"""
return self.trainer.current_epoch if self.trainer else 0
@property
def global_step(self) -> int:
"""Total training batches seen across all epochs"""
return self.trainer.global_step if self.trainer else 0
@property
def global_rank(self) -> int:
""" The index of the current process across all nodes and devices. """
return self.trainer.global_rank if self.trainer else 0
@property
def local_rank(self) -> int:
""" The index of the current process within a single node. """
return self.trainer.local_rank if self.trainer else 0
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def datamodule(self) -> Any:
rank_zero_deprecation(
"The `LightningModule.datamodule` property is deprecated in v1.3 and will be removed in v1.5."
" Access the datamodule through using `self.trainer.datamodule` instead."
)
return self._datamodule
@datamodule.setter
def datamodule(self, datamodule: Any) -> None:
self._datamodule = datamodule
@property
def on_gpu(self):
"""
True if your model is currently running on GPUs.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
@property
def automatic_optimization(self) -> bool:
"""
If False you are responsible for calling .backward, .step, zero_grad.
"""
return self._automatic_optimization
@automatic_optimization.setter
def automatic_optimization(self, automatic_optimization: bool) -> None:
self._automatic_optimization = automatic_optimization
@property
def truncated_bptt_steps(self) -> int:
"""
truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of much a longer sequence.
If this is > 0, the training step is passed ``hiddens``.
"""
return self._truncated_bptt_steps
@truncated_bptt_steps.setter
def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None:
self._truncated_bptt_steps = truncated_bptt_steps
@property
def logger(self):
""" Reference to the logger object in the Trainer. """
return self.trainer.logger if self.trainer else None
def _apply_batch_transfer_handler(self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0):
batch = self.on_before_batch_transfer(batch, dataloader_idx)
batch = self.transfer_batch_to_device(batch, device)
batch = self.on_after_batch_transfer(batch, dataloader_idx)
return batch
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. The same as for Python's built-in print function.
**kwargs: The same as for Python's built-in print function.
Example::
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
progress_bar = self.trainer.progress_bar_callback
if progress_bar is not None and progress_bar.is_enabled:
progress_bar.print(*args, **kwargs)
else:
print(*args, **kwargs)
def log(
self,
name: str,
value: Any,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
):
"""
Log a key, value
Example::
self.log('train_loss', loss)
The default behavior per hook is as follows
.. csv-table:: ``*`` also applies to the test loop
:header: "LightningModule Hook", "on_step", "on_epoch", "prog_bar", "logger"
:widths: 20, 10, 10, 10, 10
"training_step", "T", "F", "F", "T"
"training_step_end", "T", "F", "F", "T"
"training_epoch_end", "F", "T", "F", "T"
"validation_step*", "F", "T", "F", "T"
"validation_step_end*", "F", "T", "F", "T"
"validation_epoch_end*", "F", "T", "F", "T"
Args:
name: key name
value: value name
prog_bar: if True logs to the progress bar
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group to sync across
add_dataloader_idx: if True, appends the index of the current dataloader to
the name (when using multiple). If False, user needs to give unique names for
each dataloader to not mix values
"""
if self._results is not None:
# in any epoch end can't log step metrics (only epoch metric)
if 'epoch_end' in self._current_fx_name and on_step:
m = f'on_step=True cannot be used on {self._current_fx_name} method'
raise MisconfigurationException(m)
if 'epoch_end' in self._current_fx_name and on_epoch is False:
m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'
raise MisconfigurationException(m)
# add log_dict
# TODO: if logged twice fail with crash
# set the default depending on the fx_name
on_step = self.__auto_choose_log_on_step(on_step)
on_epoch = self.__auto_choose_log_on_epoch(on_epoch)
if self._current_hook_fx_name is not None:
self.trainer.logger_connector.check_logging_in_callbacks(
self._current_hook_fx_name, on_step=on_step, on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"Logged key: {name} should not contain information about dataloader_idx."
)
training_type_plugin = self.trainer.training_type_plugin
# Determine if dataloader index should be added
dataloader_idx = self._current_dataloader_idx if add_dataloader_idx else None
self._results.log(
name,
value,
prog_bar,
logger,
on_step,
on_epoch,
reduce_fx,
tbptt_reduce_fx,
tbptt_pad_token,
enable_graph,
sync_dist,
sync_dist_op,
sync_dist_group,
training_type_plugin.reduce,
dataloader_idx,
self.device,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group sync across
add_dataloader_idx: if True, appends the index of the current dataloader to
the name (when using multiple). If False, user needs to give unique names for
each dataloader to not mix values
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
add_dataloader_idx=add_dataloader_idx
)
def write_prediction(
self, name: str, value: Union[torch.Tensor, List[torch.Tensor]], filename: str = 'predictions.pt'
):
"""
Write predictions to disk using ``torch.save``
Example::
self.write_prediction('pred', torch.tensor(...), filename='my_predictions.pt')
Args:
name: a string indicating the name to save the predictions under
value: the predictions, either a single :class:`~torch.Tensor` or a list of them
filename: name of the file to save the predictions to
Note:
when running in distributed mode, calling ``write_prediction`` will create a file for
each device with respective names: ``filename_rank_0.pt``, ``filename_rank_1.pt``, ...
.. deprecated::v1.3
Will be removed in v1.5.0.
"""
rank_zero_deprecation(
'LightningModule method `write_prediction` was deprecated in v1.3'
' and will be removed in v1.5.'
)
self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)
def write_prediction_dict(self, predictions_dict: Dict[str, Any], filename: str = 'predictions.pt'):
"""
Write a dictonary of predictions to disk at once using ``torch.save``
Example::
pred_dict = {'pred1': torch.tensor(...), 'pred2': torch.tensor(...)}
self.write_prediction_dict(pred_dict)
Args:
predictions_dict: dict containing predictions, where each prediction should
either be single :class:`~torch.Tensor` or a list of them
Note:
when running in distributed mode, calling ``write_prediction_dict`` will create a file for
each device with respective names: ``filename_rank_0.pt``, ``filename_rank_1.pt``, ...
.. deprecated::v1.3
Will be removed in v1.5.0.
"""
rank_zero_deprecation(
'LightningModule method `write_prediction_dict` was deprecated in v1.3 and'
' will be removed in v1.5.'
)
for k, v in predictions_dict.items():
self.write_prediction(k, v, filename)
def __auto_choose_log_on_step(self, on_step):
if on_step is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_step = True
elif self._current_fx_name in {
'evaluation_step', 'evaluation_step_end', 'evaluation_epoch_end', 'training_epoch_end'
}:
on_step = False
else:
on_step = False
return on_step
def __auto_choose_log_on_epoch(self, on_epoch):
if on_epoch is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_epoch = False
elif self._current_fx_name in {
'evaluation_step', 'evaluation_step_end', 'evaluation_epoch_end', 'training_epoch_end'
}:
on_epoch = True
else:
on_epoch = True
return on_epoch
def all_gather(
self,
data: Union[torch.Tensor, Dict, List, Tuple],
group: Optional[Any] = None,
sync_grads: bool = False,
):
r"""
Allows users to call ``self.all_gather()`` from the LightningModule, thus making
the ```all_gather``` operation accelerator agnostic.
```all_gather``` is a function provided by accelerators to gather a tensor from several
distributed processes
Args:
tensor: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof.
group: the process group to gather results from. Defaults to all processes (world)
sync_grads: flag that allows users to synchronize gradients for all_gather op
Return:
A tensor of shape (world_size, batch, ...), or if the input was a collection
the output will also be a collection with tensors of this shape.
"""
group = group if group is not None else torch.distributed.group.WORLD
all_gather = self.trainer.accelerator.all_gather
data = convert_to_tensors(data, device=self.device)
all_gather = partial(all_gather, group=group, sync_grads=sync_grads)
return apply_to_collection(data, torch.Tensor, all_gather)
def forward(self, *args, **kwargs) -> Any:
r"""
Same as :meth:`torch.nn.Module.forward()`.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Your model's output
"""
return super().forward(*args, **kwargs)
def training_step(self, *args, **kwargs) -> STEP_OUTPUT:
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
hiddens(:class:`~torch.Tensor`): Passed in if
:paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0.
Return:
Any of.
- :class:`~torch.Tensor` - The loss tensor
- ``dict`` - A dictionary. Can include any keys, but must include the key ``'loss'``
- ``None`` - Training will skip to the next batch
Note:
Returning ``None`` is currently not supported for multi-GPU or TPU, or with 16-bit precision enabled.
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
if optimizer_idx == 1:
# do training_step with decoder
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
...
out, hiddens = self.lstm(data, hiddens)
...
return {'loss': loss, 'hiddens': hiddens}
Note:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer")
def training_step_end(self, *args, **kwargs) -> STEP_OUTPUT:
"""
Use this when training with dp or ddp2 because :meth:`training_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in `training_step` for each batch part.
Return:
Anything
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denomintaor
loss = self.softmax(out)
loss = nce_loss(loss)
return loss
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return {'pred': out}
def training_step_end(self, training_step_outputs):
gpu_0_pred = training_step_outputs[0]['pred']
gpu_1_pred = training_step_outputs[1]['pred']
gpu_n_pred = training_step_outputs[n]['pred']
# this softmax now uses the full batch
loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])
return loss
See Also:
See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details.
"""
def training_epoch_end(self, outputs: EPOCH_OUTPUT) -> None:
"""
Called at the end of the training epoch with the outputs of all training steps.
Use this in case you need to do something with all the outputs for every training_step.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`, or if there are
multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If this method is not overridden, this won't be called.
Example::
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
return result
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each training step for that dataloader.
.. code-block:: python
def training_epoch_end(self, training_step_outputs):
for out in training_step_outputs:
# do something here
"""
def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple val dataloaders used)
Return:
Any of.
- Any object or value
- ``None`` - Validation will skip to the next batch
.. code-block:: python
# pseudocode of order
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
if defined('validation_step_end'):
out = validation_step_end(out)
val_outs.append(out)
val_outs = validation_epoch_end(val_outs)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx)
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx)
Examples::
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument.
.. code-block:: python
# CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
"""
Use this when validating with dp or ddp2 because :meth:`validation_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`validation_step`
for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
loss = self.softmax(out)
loss = nce_loss(loss)
self.log('val_loss', loss)
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
return out
def validation_step_end(self, val_step_outputs):
for out in val_step_outputs:
# do something with these
See Also:
See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details.
"""
def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None:
"""
Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
# do something
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
self.log('final_metric', final_value)
"""
def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch.
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple test dataloaders used).
Return:
Any of.
- Any object or value
- ``None`` - Testing will skip to the next batch
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx)
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx)
Examples::
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple test dataloaders, :meth:`test_step` will have an additional argument.
.. code-block:: python
# CASE 2: multiple test dataloaders
def test_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to test you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
"""
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
self.log('test_loss', loss)
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return out
def test_step_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
self.log('test_loss', loss)
See Also:
See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details.
"""
def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> None:
"""
Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
None
Note:
If you didn't define a :meth:`test_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
some_result = calc_all_results(all_test_preds)
self.log(some_result)
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
final_value = 0
for dataloader_outputs in outputs:
for test_step_out in dataloader_outputs:
# do something
final_value += test_step_out
self.log('final_metric', final_value)
"""
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
"""
Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.
By default, it calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`.
Override to add any processing logic.
Args:
batch: Current batch
batch_idx: Index of current batch
dataloader_idx: Index of the current dataloader
Return:
Predicted output
"""
return self(batch)
def configure_callbacks(self):
"""
Configure model-specific callbacks.
When the model gets attached, e.g., when ``.fit()`` or ``.test()`` gets called,
the list returned here will be merged with the list of callbacks passed to the Trainer's ``callbacks`` argument.
If a callback returned here has the same type as one or several callbacks already present in
the Trainer's callbacks list, it will take priority and replace them.
In addition, Lightning will make sure :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`
callbacks run last.
Return:
A list of callbacks which will extend the list of callbacks in the Trainer.
Example::
def configure_callbacks(self):
early_stop = EarlyStopping(monitor"val_acc", mode="max")
checkpoint = ModelCheckpoint(monitor="val_loss")
return [early_stop, checkpoint]
Note:
Certain callback methods like :meth:`~pytorch_lightning.callbacks.base.Callback.on_init_start`
will never be invoked on the new callbacks returned here.
"""
return []
def configure_optimizers(self):
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- **Single optimizer**.
- **List or Tuple** of optimizers.
- **Two lists** - The first list has multiple optimizers, and the second has multiple LR schedulers (or
multiple lr_dict).
- **Dictionary**, with an ``"optimizer"`` key, and (optionally) a ``"lr_scheduler"``
key whose value is a single LR scheduler or lr_dict.
- **Tuple of dictionaries** as described above, with an optional ``"frequency"`` key.
- **None** - Fit will run without any optimizer.
Note:
The ``frequency`` value specified in a dict along with the ``optimizer`` key is an int corresponding
to the number of sequential batches optimized with the specific optimizer.
It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
This is different from the ``frequency`` value specified in the lr_dict mentioned below.
.. code-block:: python
def configure_optimizers(self):
optimizer_one = torch.optim.SGD(self.model.parameters(), lr=0.01)
optimizer_two = torch.optim.SGD(self.model.parameters(), lr=0.01)
return [
{'optimizer': optimizer_one, 'frequency': 5},
{'optimizer': optimizer_two, 'frequency': 10},
]
In this example, the first optimizer will be used for the first 5 steps,
the second optimizer for the next 10 steps and that cycle will continue.
If an LR scheduler is specified for an optimizer using the ``lr_scheduler`` key in the above dict,
the scheduler will only be updated when its optimizer is being used.
Note:
The lr_dict is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
lr_dict = {
'scheduler': lr_scheduler, # The LR scheduler instance (required)
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor
'strict': True, # Whether to crash the training if `monitor` is not found
'name': None, # Custom name for LearningRateMonitor to use
}
Only the ``"scheduler"`` key is required, the rest will be set to the defaults above.
Note:
The ``"frequency"`` value is an ``int`` corresponding to the number of sequential batches optimized with the
specific optimizer. It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list and passing multiple optimizers in
dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
Examples::
# most cases
def configure_optimizers(self):
return Adam(self.parameters(), lr=1e-3)
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
return gen_opt, dis_opt
# example with learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
dis_sch = CosineAnnealing(dis_opt, T_max=10)
return [gen_opt, dis_opt], [dis_sch]
# example with step-based learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
gen_sch = {'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step'} # called after each training step
dis_sch = CosineAnnealing(dis_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sch, dis_sch]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically handle the optimizers.
- If you use multiple optimizers, :meth:`training_step` will have an additional ``optimizer_idx`` parameter.
- If you use :class:`torch.optim.LBFGS`, Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only for the parameters of current optimizer
at each training step.
- If you need to control how often those optimizers step or override the default ``.step()`` schedule,
override the :meth:`optimizer_step` hook.
- If you only want to call a learning rate scheduler every ``x`` step or epoch, or want to monitor a custom
metric, you can specify these in a lr_dict:
.. code-block:: python
lr_dict = {
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1',
'frequency': x,
}
"""
rank_zero_warn("`configure_optimizers` must be implemented to be used with the Lightning Trainer")
def manual_backward(self, loss: Tensor, optimizer: Optional[Optimizer] = None, *args, **kwargs) -> None:
"""
Call this directly from your training_step when doing optimizations manually.
By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you.
This function forwards all args to the .backward() call as well.
See :ref:`manual optimization<common/optimizers:Manual optimization>` for more examples.
Example::
def training_step(...):
opt = self.optimizers()
loss = ...
opt.zero_grad()
# automatically applies scaling, etc...
self.manual_backward(loss)
opt.step()
"""
if optimizer is not None:
rank_zero_deprecation(
"`optimizer` argument to `manual_backward` is deprecated in v1.2 and will be removed in v1.4"
)
# make sure we're using manual opt
self._verify_is_manual_optimization('manual_backward')
# backward
self._running_manual_backward = True
self.trainer.train_loop.backward(loss, optimizer=None, opt_idx=None, *args, **kwargs)
self._running_manual_backward = False
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
"""
Override backward with your own implementation if you need to.
Args:
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
"""
if self.automatic_optimization or self._running_manual_backward:
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
"""
Makes sure only the gradients of the current optimizer's parameters are calculated
in the training step to prevent dangling gradients in multiple-optimizer setup.
.. note:: Only called when using multiple optimizers
Override for your own behavior
It works with ``untoggle_optimizer`` to make sure param_requires_grad_state is properly reset.
Args:
optimizer: Current optimizer used in training_loop
optimizer_idx: Current optimizer idx in training_loop
"""
# Iterate over all optimizer parameters to preserve their `requires_grad` information
# in case these are pre-defined during `configure_optimizers`
param_requires_grad_state = {}
for opt in self.optimizers(use_pl_optimizer=False):
for group in opt.param_groups:
for param in group['params']:
# If a param already appear in param_requires_grad_state, continue
if param in param_requires_grad_state:
continue
param_requires_grad_state[param] = param.requires_grad
param.requires_grad = False
# Then iterate over the current optimizer's parameters and set its `requires_grad`
# properties accordingly
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = param_requires_grad_state[param]
self._param_requires_grad_state = param_requires_grad_state
def untoggle_optimizer(self, optimizer_idx: int):
"""
.. note:: Only called when using multiple optimizers
Override for your own behavior
Args:
optimizer_idx: Current optimizer idx in training_loop
"""
for opt_idx, opt in enumerate(self.optimizers(use_pl_optimizer=False)):
if optimizer_idx != opt_idx:
for group in opt.param_groups:
for param in group['params']:
if param in self._param_requires_grad_state:
param.requires_grad = self._param_requires_grad_state[param]
# save memory
self._param_requires_grad_state = dict()
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
once per optimizer.
Warning:
If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter
to ``optimizer.step()`` function as shown in the examples. This ensures that
``training_step()``, ``optimizer.zero_grad()``, ``backward()`` are called within
:meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers, this indexes into that list.
optimizer_closure: Closure for all optimizers
on_tpu: ``True`` if TPU backward is required
using_native_amp: ``True`` if using native amp
using_lbfgs: True if the matching optimizer is :class:`torch.optim.LBFGS`
Examples::
# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step(closure=optimizer_closure)
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every step
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure)
# update discriminator opt every 2 steps
if optimizer_idx == 1:
if (batch_idx + 1) % 2 == 0 :
optimizer.step(closure=optimizer_closure)
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# warm up lr
if self.trainer.global_step < 500:
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.learning_rate
# update params
optimizer.step(closure=optimizer_closure)
"""
optimizer.step(closure=optimizer_closure)
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
"""Override this method to change the default behaviour of ``optimizer.zero_grad()``.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
Examples::
# DEFAULT
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad()
# Set gradients to `None` instead of zero to improve performance.
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad(set_to_none=True)
See :meth:`torch.optim.Optimizer.zero_grad` for the explanation of the above example.
"""
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples::
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
if :paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [len(x[0]) for x in batch if isinstance(x, (torch.Tensor, collections.Sequence))]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(x == time_dims[0] for x in time_dims), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]:
model_summary = None
if mode in ModelSummary.MODES:
model_summary = ModelSummary(self, mode=mode)
log.info("\n" + str(model_summary))
elif mode is not None:
raise MisconfigurationException(f"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}")
return model_summary
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example::
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
# call .item() only once but store elements without graphs
running_train_loss = self.trainer.train_loop.running_loss.mean()
avg_training_loss = None
if running_train_loss is not None:
avg_training_loss = running_train_loss.cpu().item()
elif self.automatic_optimization:
avg_training_loss = float('NaN')
tqdm_dict = {}
if avg_training_loss is not None:
tqdm_dict["loss"] = f"{avg_training_loss:.3g}"
module_tbptt_enabled = self.truncated_bptt_steps > 0
trainer_tbptt_enabled = self.trainer.truncated_bptt_steps is not None and self.trainer.truncated_bptt_steps > 0
if module_tbptt_enabled or trainer_tbptt_enabled:
tqdm_dict["split_idx"] = self.trainer.split_idx
if self.trainer.logger is not None and self.trainer.logger.version is not None:
version = self.trainer.logger.version
# show last 4 places of long version strings
version = version[-4:] if isinstance(version, str) else version
tqdm_dict["v_num"] = version
return tqdm_dict
def _verify_is_manual_optimization(self, fn_name):
if self.automatic_optimization:
raise MisconfigurationException(
f'to use {fn_name}, please disable automatic optimization:'
' set model property `automatic_optimization` as False'
)
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
"""
Collect all module arguments in the current constructor and all child constructors.
The child constructors are all the ``__init__`` methods that reach the current class through
(chained) ``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set hyper_parameters in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
def save_hyperparameters(
self,
*args,
ignore: Optional[Union[Sequence[str], str]] = None,
frame: Optional[types.FrameType] = None
) -> None:
"""Save model arguments to ``hparams`` attribute.
Args:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or arguments from class ``__init__``
ignore: an argument name or a list of argument names from
class ``__init__`` to be ignored
frame: a frame object. Default is None
Example::
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # manually assign arguments
... self.save_hyperparameters('arg1', 'arg3')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
>>> class AutomaticArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # equivalent automatic
... self.save_hyperparameters()
... def forward(self, *args, **kwargs):
... ...
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg2": abc
"arg3": 3.14
>>> class SingleArgModel(LightningModule):
... def __init__(self, params):
... super().__init__()
... # manually assign single argument
... self.save_hyperparameters(params)
... def forward(self, *args, **kwargs):
... ...
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
>>> model.hparams
"p1": 1
"p2": abc
"p3": 3.14
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # pass argument(s) to ignore as a string or in a list
... self.save_hyperparameters(ignore='arg2')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
"""
# the frame needs to be created in this file.
if not frame:
frame = inspect.currentframe().f_back
save_hyperparameters(self, *args, ignore=ignore, frame=frame)
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
if isinstance(hp, Namespace):
hp = vars(hp)
if isinstance(hp, dict):
hp = AttributeDict(hp)
elif isinstance(hp, PRIMITIVE_TYPES):
raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.")
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
raise ValueError(f"Unsupported config type of {type(hp)}.")
if isinstance(hp, dict) and isinstance(self.hparams, dict):
self.hparams.update(hp)
else:
self._hparams = hp
@torch.no_grad()
def to_onnx(
self,
file_path: Union[str, Path],
input_sample: Optional[Any] = None,
**kwargs,
):
"""
Saves the model in ONNX format
Args:
file_path: The path of the file the onnx model should be saved to.
input_sample: An input for tracing. Default: None (Use self.example_input_array)
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
mode = self.training
if input_sample is None:
if self.example_input_array is None:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_sample = self.example_input_array
input_sample = self._apply_batch_transfer_handler(input_sample)
if "example_outputs" not in kwargs:
self.eval()
kwargs["example_outputs"] = self(input_sample)
torch.onnx.export(self, input_sample, file_path, **kwargs)
self.train(mode)
@torch.no_grad()
def to_torchscript(
self,
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = 'script',
example_inputs: Optional[Any] = None,
**kwargs,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""
By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.
If you want to use tracing, please provided the argument `method='trace'` and make sure that either the
example_inputs argument is provided, or the model has self.example_input_array set.
If you would like to customize the modules that are scripted you should override this method.
In case you want to return multiple modules, we recommend using a dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: An input to be used to do tracing when method is set to 'trace'.
Default: None (Use self.example_input_array)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
...
>>> model = SimpleModel()
>>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP
>>> os.path.isfile("model.pt") # doctest: +SKIP
>>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP
... example_inputs=torch.randn(1, 64))) # doctest: +SKIP
>>> os.path.isfile("model_trace.pt") # doctest: +SKIP
True
Return:
This LightningModule as a torchscript, regardless of whether file_path is
defined or not.
"""
mode = self.training
if method == 'script':
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == 'trace':
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
if self.example_input_array is None:
raise ValueError(
'Choosing method=`trace` requires either `example_inputs`'
' or `model.example_input_array` to be defined.'
)
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self._apply_batch_transfer_handler(example_inputs)
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError(f"The 'method' parameter only supports 'script' or 'trace', but value given was: {method}")
self.train(mode)
if file_path is not None:
torch.jit.save(torchscript_module, file_path)
return torchscript_module
@property
def hparams(self) -> Union[AttributeDict, dict, Namespace]:
if not hasattr(self, "_hparams"):
self._hparams = AttributeDict()
return self._hparams
@property
def hparams_initial(self) -> AttributeDict:
if not hasattr(self, "_hparams_initial"):
return AttributeDict()
# prevent any change
return copy.deepcopy(self._hparams_initial)
@property
def model_size(self) -> float:
# todo: think about better way without need to dump model to drive
tmp_name = f"{uuid.uuid4().hex}.pt"
torch.save(self.state_dict(), tmp_name)
size_mb = os.path.getsize(tmp_name) / 1e6
os.remove(tmp_name)
return size_mb
| [
"torch._C._log_api_usage_once",
"torch.no_grad",
"torch.jit.save",
"torch.onnx.export"
] | 1.4 | lillekemiker/pytorch-lightning | 6104a6316afb8cdd9825d77844db456ffa766ca1 |
1.4 | import time
from typing import Optional
import torch
from falkon.options import ConjugateGradientOptions, FalkonOptions
from falkon.mmv_ops.fmmv_incore import incore_fdmmv, incore_fmmv
from falkon.utils.tensor_helpers import copy_same_stride, create_same_stride
from falkon.utils import TicToc
# More readable 'pseudocode' for conjugate gradient.
# function [x] = conjgrad(A, b, x)
# r = b - A * x;
# p = r;
# rsold = r' * r;
#
# for i = 1:length(b)
# Ap = A * p;
# alpha = rsold / (p' * Ap);
# x = x + alpha * p;
# r = r - alpha * Ap;
# rsnew = r' * r;
# if sqrt(rsnew) < 1e-10
# break;
# end
# p = r + (rsnew / rsold) * p;
# rsold = rsnew;
# end
# end
class Optimizer(object):
def __init__(self):
pass
class ConjugateGradient(Optimizer):
def __init__(self, opt: Optional[ConjugateGradientOptions] = None):
super().__init__()
self.params = opt or ConjugateGradientOptions()
def solve(self, X0, B, mmv, max_iter, callback=None):
t_start = time.time()
if X0 is None:
R = copy_same_stride(B)
X = create_same_stride(B.size(), B, B.dtype, B.device)
X.fill_(0.0)
else:
R = B - mmv(X0)
X = X0
m_eps = self.params.cg_epsilon(X.dtype)
P = R
# noinspection PyArgumentList
Rsold = torch.sum(R.pow(2), dim=0)
e_train = time.time() - t_start
for i in range(max_iter):
with TicToc("Chol Iter", debug=False): # TODO: FIXME
t_start = time.time()
AP = mmv(P)
# noinspection PyArgumentList
alpha = Rsold / (torch.sum(P * AP, dim=0) + m_eps)
X.addmm_(P, torch.diag(alpha))
if (i + 1) % self.params.cg_full_gradient_every == 0:
R = B - mmv(X)
else:
R = R - torch.mm(AP, torch.diag(alpha))
# R.addmm_(mat1=AP, mat2=torch.diag(alpha), alpha=-1.0)
# noinspection PyArgumentList
Rsnew = torch.sum(R.pow(2), dim=0)
if Rsnew.abs().max().sqrt() < self.params.cg_tolerance:
print("Stopping conjugate gradient descent at "
"iteration %d. Solution has converged." % (i + 1))
break
P = R + torch.mm(P, torch.diag(Rsnew / (Rsold + m_eps)))
if P.is_cuda:
# P must be synced so that it's correct for mmv in next iter.
torch.cuda.synchronize()
Rsold = Rsnew
e_iter = time.time() - t_start
e_train += e_iter
with TicToc("Chol callback", debug=False):
if callback is not None:
callback(i + 1, X, e_train)
return X
class FalkonConjugateGradient(Optimizer):
def __init__(self, kernel, preconditioner, opt: FalkonOptions):
super().__init__()
self.kernel = kernel
self.preconditioner = preconditioner
self.params = opt
self.optimizer = ConjugateGradient(opt.get_conjgrad_options())
def solve(self, X, M, Y, _lambda, initial_solution, max_iter, callback=None):
n = X.size(0)
prec = self.preconditioner
with TicToc("ConjGrad preparation", False):
if M is None:
Knm = X
else:
Knm = None
# Compute the right hand side
if Knm is not None:
B = incore_fmmv(Knm, Y / n, None, transpose=True, opt=self.params)
else:
B = self.kernel.dmmv(X, M, None, Y / n, opt=self.params)
B = prec.apply_t(B)
# Define the Matrix-vector product iteration
if X.is_cuda:
s1 = torch.cuda.Stream(X.device)
def mmv(sol):
with TicToc("MMV", False):
v = prec.invA(sol)
v_t = prec.invT(v)
if Knm is not None:
cc = incore_fdmmv(Knm, v_t, None, opt=self.params)
else:
cc = self.kernel.dmmv(X, M, v_t, None, opt=self.params)
if X.is_cuda:
with torch.cuda.stream(s1), torch.cuda.device(X.device):
# We must sync before calls to prec.inv* which use a different stream
cc_ = cc.div_(n)
v_ = v.mul_(_lambda)
s1.synchronize()
cc_ = prec.invTt(cc_).add_(v_)
s1.synchronize()
return prec.invAt(cc_)
else:
return prec.invAt(prec.invTt(cc / n) + _lambda * v)
# Run the conjugate gradient solver
beta = self.optimizer.solve(initial_solution, B, mmv, max_iter, callback)
return beta
| [
"torch.cuda.synchronize",
"torch.cuda.device",
"torch.cuda.stream",
"torch.diag",
"torch.cuda.Stream",
"torch.sum"
] | 1.4 | gpleiss/falkon | 36aa6713aff8ee6b9ad922d48b07c994fce30559 |
1.4 | import torch
import torch.nn as nn
import pdb
class Cumulative_Probability_Layer(nn.Module):
def __init__(self, num_features, args, max_followup):
super(Cumulative_Probability_Layer, self).__init__()
self.args = args
self.hazard_fc = nn.Linear(num_features, max_followup)
self.base_hazard_fc = nn.Linear(num_features, 1)
self.relu = nn.ReLU(inplace=True)
mask = torch.ones([max_followup, max_followup])
mask = torch.tril(mask, diagonal=0)
mask = torch.nn.Parameter(torch.t(mask), requires_grad=False)
self.register_parameter('upper_triagular_mask', mask)
def hazards(self, x):
raw_hazard = self.hazard_fc(x)
pos_hazard = self.relu(raw_hazard)
return pos_hazard
def forward(self, x):
if self.args.make_probs_indep:
return self.hazards(x)
# hazards = self.hazard_fc(x)
hazards = self.hazards(x)
B, T = hazards.size() #hazards is (B, T)
expanded_hazards = hazards.unsqueeze(-1).expand(B, T, T) #expanded_hazards is (B,T, T)
masked_hazards = expanded_hazards * self.upper_triagular_mask # masked_hazards now (B,T, T)
cum_prob = torch.sum(masked_hazards, dim=1) + self.base_hazard_fc(x)
return cum_prob
| [
"torch.nn.Linear",
"torch.tril",
"torch.ones",
"torch.nn.ReLU",
"torch.t",
"torch.sum"
] | 1.4.0 | harrivle/Mirai | 70413de690da36c5878e2e6006711476e166bb1d |
1.8 | # This app is for educational purpose only. Insights gained is not financial advice. Use at your own risk!
import streamlit as st
from PIL import Image
import pandas as pd
import base64
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import requests
import json
import time
import tweepy
import datetime
from datetime import datetime, date, time
import plotly.express as px
import numpy as np
from wordcloud import WordCloud, STOPWORDS
import config
import torch
from transformers import pipeline
from hate_speech_model import HateSpeechClassifier
#---------------------------------#
# New feature (make sure to upgrade your streamlit library)
# pip install --upgrade streamlit
#---------------------------------#
# Page layout
## Page expands to full width
st.set_page_config(layout="wide")
#---------------------------------#
# Title
image = Image.open('4.PNG')
st.image(image, width = None)
df = pd.read_csv('data/final_hatespeech_data - final_hatespeech_data.csv')
df['label'] = np.where(df['label']==1,'Hate speech','Normal speech')
# Page layout (continued)
## Divide page to 3 columns (col1 = sidebar, col2 and col3 = page contents)
col1 = st.sidebar
col2, col3 = st.beta_columns((2,1))
#---------------------------------#
# Sidebar - Main panel
col1.header('Select options')
## Sidebar - Currency price unit
locations = ['ELDORET', 'EMBU', 'GARISSA', 'GITHUNGURI', 'HOMA BAY', 'ISINYA', 'ISIOLO', 'JUJA', 'KABARAK', 'KABETE', 'KAJIADO', 'KAKAMEGA', 'KAPSABET', 'NAIROBI', 'KERICHO', 'KIAMBU']
region = pd.DataFrame(locations)
selected_region = col1.selectbox('Select region', region)
## Sidebar - Start and End date
start_date = col1.date_input('Start date', min_value=datetime(2021, 4, 1),max_value=datetime(2021, 4, 29))
start_date = pd.to_datetime(start_date)
end_date = col1.date_input('End date', min_value=datetime(2021, 4, 1),max_value=datetime(2021, 4, 29))
end_date = pd.to_datetime(end_date)
#date_range = col1.date_input('Date Range',value=(datetime(2020, 1, 1), datetime(2030, 1, 1)), help="choose a range or click same day twice")
#st.title('Twitter hatespeech detection tool')
st.markdown("""
This tool classifies tweets as **hate speech or non-hatespeech**!
""")
#---------------------------------#
# About
expander_bar_1 = st.beta_expander("About this tool")
expander_bar_1.markdown("""
In an increasingly digital era where online social interactions are considered part of the social context, it is proving inevitable that machine learning should be used to protect people from harmful content. This has been evidenced by the multitude of instances where hate speech propagated online has led to physical injury and loss of lives across the world. Government institutions should now consider online interactions as spaces where potential crimes may occur just like in the physical world.
This tool identifies hatespeech as tweets that can be in following three formal classes:
* **HATE:** This class contains tweets which highlight negative attributes or deficiencies of certain groups of individuals. This class includes hateful comments towards individuals based on race, political opinion, sexual orientation, gender, social status, health condition, etc.
* **OFFN:** This class contains tweets which are degrading, dehumanizing or insulting towards an individual. It encompasses cases of threatening with violent acts.
* **PRFN:** This class contains tweets with explicit content, profane words or unacceptable language in the absence of insults and abuse. This typically concerns the usage of swearwords and cursing.
Political hate speech is the greatest area of concern in regards to Kenya and thus this will be the area of focus for this tool.
""")
#---------------------------------#
# Scraping of tweets
expander_bar_2 = st.beta_expander("Search/load tweets")
#@st.cache
# Load classification model
with st.spinner('Loading...'):
model = HateSpeechClassifier()
model.load_state_dict(torch.load(config.MODEL_PATH, map_location=torch.device('cpu')))
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
@st.cache(allow_output_mutation=True)
def sentence_prediction(tw, model):
tokenizer = config.TOKENIZER
max_len = 140
review = str(tw)
inputs = tokenizer.encode_plus(
review,
None,
add_special_tokens=True,
max_length=max_len,
return_token_type_ids=False,
truncation=True,
padding="max_length"
)
class_names = ['Normal Speech','Hate Speech']
input_ids = inputs['input_ids']
mask = inputs['attention_mask']
padding_length = max_len - len(input_ids)
input_ids = input_ids + ([0] * padding_length)
mask = mask + ([0] * padding_length)
input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0)
attention_mask = torch.tensor(mask, dtype=torch.long).unsqueeze(0)
input_ids = input_ids.to(device, dtype=torch.long)
attention_mask = attention_mask.to(device, dtype=torch.long)
outputs = model(input_ids=input_ids,
attention_mask=attention_mask
)
outputs = torch.sigmoid(outputs).cpu().detach().numpy()
out = outputs[0][0]
hate_prediction = float(out)
if hate_prediction >= 0.5:
return f"{class_names[1]}"
else:
return f"{class_names[0]}"
### SINGLE TWEET CLASSIFICATION ###
expander_bar_2.subheader('Single tweet classification')
# Get sentence input, preprocess it, and convert to flair.data.Sentence format
tw = expander_bar_2.text_input('Tweet:')
if tw != '':
# Predict tweet
sentence = sentence_prediction(tw, model)
# Show prediction
#with st.spinner('Predicting...'):
#sentence
if sentence == "Hate Speech":
zero_model = 'typeform/mobilebert-uncased-mnli'
classifier = pipeline("zero-shot-classification", model=zero_model,tokenizer=config.TOKENIZER)
text = tw
candidate_labels = ['Violent', 'Offensive', 'Profane']
result = classifier(text, candidate_labels)
data = pd.DataFrame({'Hate Sub-clusters': result['labels'], 'Confidence Level': result['scores']})
clus = data[data['Confidence Level'] == data['Confidence Level'].max()]
clus_p = clus['Hate Sub-clusters'].values
clus_pp = clus_p[0]
clus_c = clus['Confidence Level'].values
clus_cc = round(clus_c[0], 2)
#print('hate sub-cluster: ', clus_pp ,' with a Confidence Level of ', clus_cc)
#f"{'hate sub-cluster': clus_pp,'Confidence Level': clus_cc}"
with st.spinner('Predicting...'):
speech = f"**{sentence}**"
subclust = f"**Hate sub-cluster: {clus_pp} with a Confidence Level of {clus_cc}**"
#st.markdown(speech)
expander_bar_2.write(speech)
#st.markdown(subclust)
expander_bar_2.write(subclust)
else:
with st.spinner('Predicting...'):
speech = f"**{sentence}**"
#st.markdown(speech)
expander_bar_2.write(speech)
#st.write(alt.Chart(data).mark_bar().encode(
# x='Confidence Level',
# y=alt.X('Hate Sub-clusters', sort=None),
# color='Hate Sub-clusters'
#).configure_axis(
# grid=False
#).properties(
# width=500,
# height=150
#)
# )
# st.write(out)
### TWEET SEARCH AND CLASSIFY ###
expander_bar_2.subheader('Offline Batch tweet classification')
# Initialize empty dataframe
tweet_data = pd.DataFrame({
'tweet': [],
'predicted-sentiment': [],
'location': [],
'tweet_date': []
})
uploaded_file = expander_bar_2.file_uploader("Choose a file")
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
expander_bar_2.write(df)
# classify tweet
#for tweet in df['tweet']:
for index, row in df.iterrows():
# Skip iteration if tweet is empty
#if tweet in ('', ' '):
if row['tweet'] in ('', ' '):
continue
# Make predictions
class_names = ['Hate Speech', 'Normal Speech']
sentence = sentence_prediction(row['tweet'], model)
# classifier.predict(sentence)
sentiment = sentence
max_len = 140
if sentiment == "Hate Speech":
#tokenizer = AutoTokenizer.from_pretrained('typeform/mobilebert-uncased-mnli')
zero_model = 'typeform/mobilebert-uncased-mnli'
classifier = pipeline("zero-shot-classification", model=zero_model,tokenizer=config.TOKENIZER)
text = row['tweet']
candidate_labels = ['Violent', 'Offensive', 'Profane']
result = classifier(text, candidate_labels)
data = pd.DataFrame({'Hate Sub-clusters': result['labels'], 'Confidence Level': result['scores']})
clus = data[data['Confidence Level'] == data['Confidence Level'].max()]
clus_p = clus['Hate Sub-clusters'].values
clus_pp = clus_p[0]
clus_c = clus['Confidence Level'].values
clus_cc = round(clus_c[0], 2)
#tweet_data = tweet_data.append({'tweet': tweet, 'predicted-sentiment': sentiment, 'hate sub-cluster': clus_pp,
# 'confidence level': clus_cc}, ignore_index=True)
tweet_data = tweet_data.append(
{'tweet': row['tweet'], 'predicted-sentiment': sentiment, 'hate sub-cluster': clus_pp,
'confidence level': clus_cc, 'location': row['location'], 'tweet_date': row['tweet_date']},
ignore_index=True)
#tweet_data = tweet_data.reindex(
# columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level'])
tweet_data = tweet_data.reindex(
columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level', 'location',
'tweet_date'])
else:
non = ''
#tweet_data = tweet_data.append(
# {'tweet': tweet, 'predicted-sentiment': sentiment, 'hate sub-cluster': non, 'confidence level': non},
# ignore_index=True)
tweet_data = tweet_data.append(
{'tweet': row['tweet'], 'predicted-sentiment': sentiment, 'hate sub-cluster': non,
'confidence level': non, 'location': row['location'], 'tweet_date': row['tweet_date']},
ignore_index=True)
tweet_data = tweet_data.reindex(
columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level', 'location',
'tweet_date'])
#columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level'])
# As long as the query is valid (not empty or equal to '#')...
#if query != '' and query != '#':
# with st.spinner(f'Searching for and analyzing {query}...'):
# Show query data and sentiment if available
try:
#expander_bar_2.write(tweet_data)
tweet_data.to_csv("predicted_tweet_data")
tweet_data['tweet_date'] = pd.to_datetime(tweet_data['tweet_date'])
tweet_data_filtered = tweet_data[
(tweet_data['location'] == selected_region) & (tweet_data['tweet_date'] >= start_date) & (
tweet_data['tweet_date'] <= end_date)]
expander_bar_2.write(tweet_data_filtered)
except NameError: # if no queries have been made yet
pass
#---------------------------------#
# Overview of extracted tweets
tweet_data['tweet_date'] = pd.to_datetime(tweet_data['tweet_date'])
tweet_data_filtered = tweet_data[(tweet_data['location']==selected_region) & (tweet_data['tweet_date']>=start_date) & (tweet_data['tweet_date']<=end_date)]
expander_bar_3 = st.beta_expander("Visual overview of loaded tweets")
sentiment_count = tweet_data_filtered['predicted-sentiment'].value_counts()
sentiment_count = pd.DataFrame({'Sentiments':sentiment_count.index,'Tweets':sentiment_count.values})
# region_count = df['location'].value_counts()
# region_count = pd.DataFrame({'Region':region_count.index,'Tweets':region_count.values})
if len(sentiment_count) == 0:
expander_bar_3.markdown('There are no visuals at the moment...... Please load data to show some visuals')
else:
fig_1 = px.bar(sentiment_count,x='Sentiments',y='Tweets',color='Tweets',height=500)
expander_bar_3.plotly_chart(fig_1)
fig_2 = px.pie(sentiment_count,values='Tweets',names='Sentiments')
expander_bar_3.plotly_chart(fig_2)
# fig_3 = px.bar(region_count,x='Region',y='Tweets',color='Tweets',height=500)
# expander_bar_3.plotly_chart(fig_3)
#expander_bar_3.table()
#---------------------------------#
# Hate speech tweets
expander_bar_3 = st.beta_expander("View hatespeech tweets")
df_hatespeech = tweet_data_filtered[tweet_data_filtered['predicted-sentiment']=='Hate Speech']
if len(df_hatespeech) == 0:
expander_bar_3.markdown('Nothing to show here since hate speech has not been detected in the set of uploaded tweets')
else:
expander_bar_3.dataframe(df_hatespeech[['tweet','predicted-sentiment']])
#---------------------------------#
# Non-hatespeech tweets
expander_bar_4 = st.beta_expander("View normal text tweets")
df_normalspeech = tweet_data_filtered[tweet_data_filtered['predicted-sentiment']=='Normal Speech']
if len(df_normalspeech) == 0:
expander_bar_4.markdown('Nothing to show here since normal speech has not been detected in the set of uploaded tweets')
else:
expander_bar_4.dataframe(df_normalspeech[['tweet','predicted-sentiment']])
#---------------------------------#
#---------------------------------#
#---------------------------------#
# Hate speech words
st.set_option('deprecation.showPyplotGlobalUse', False)
expander_bar_5 = st.beta_expander("Hate speech key words")
if len(df_hatespeech) == 0:
expander_bar_5.markdown('Nothing to show here since hate speech has not been detected in the set of uploaded tweets')
else:
words = " ".join(df_hatespeech["tweet"])
processed_words = " ".join([word for word in words.split() if "http" not in word and not word.startswith("@") and word != "RT"])
wordcloud = WordCloud(stopwords=STOPWORDS, background_color="white", width=800, height=640).generate(processed_words)
plt.imshow(wordcloud)
plt.xticks([])
plt.yticks([])
expander_bar_5.pyplot()
#---------------------------------# | [
"torch.device",
"torch.sigmoid",
"torch.tensor"
] | 1.8.1 | sgich/HateSpeechDetection | e6e0dd2ca6956cdb496232196ef292b97e96eb04 |
1.7 | import argparse
import os
from tqdm import tqdm
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
import deepab
from deepab.models.AbResNet import AbResNet, load_model
from deepab.models.PairedSeqLSTM import load_model as load_lstm_rnn
from deepab.util.masking import MASK_VALUE
from deepab.util.training import check_for_h5_file
# from deepab.util.util import RawTextArgumentDefaultsHelpFormatter
from deepab.datasets.H5PairwiseGeometryDataset import H5PairwiseGeometryDataset
from deepab.preprocess.generate_h5_pairwise_geom_file import antibody_to_h5
_output_names = ['ca_dist', 'cb_dist', 'no_dist', 'omega', 'theta', 'phi']
class FocalLoss(nn.modules.loss._WeightedLoss):
def __init__(self,
weight=None,
gamma=2,
reduction='mean',
ignore_index=MASK_VALUE):
super(FocalLoss, self).__init__(weight, reduction=reduction)
self.gamma = gamma
self.weight = weight
self.ignore_index = ignore_index
def forward(self, input, target):
ce_loss = F.cross_entropy(input,
target,
reduction=self.reduction,
weight=self.weight,
ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = ((1 - pt)**self.gamma * ce_loss).mean()
return focal_loss
def train_epoch(model, train_loader, optimizer, device, criterion, loss_size):
"""Trains a model for one epoch"""
model.train()
running_losses = torch.zeros(loss_size)
for inputs, labels in tqdm(train_loader, total=len(train_loader)):
inputs = inputs.to(device)
labels = [label.to(device) for label in labels]
optimizer.zero_grad()
def handle_batch():
outputs = model(inputs)
losses = [
criterion(output, label)
for output, label in zip(outputs, labels)
]
total_loss = sum(losses)
losses.append(total_loss)
total_loss.backward()
optimizer.step()
return outputs, torch.Tensor([float(l.item()) for l in losses])
outputs, batch_loss = handle_batch()
running_losses += batch_loss
return running_losses
def validate(model, validation_loader, device, criterion, loss_size):
""""""
with torch.no_grad():
model.eval()
running_losses = torch.zeros(loss_size)
for inputs, labels in tqdm(validation_loader,
total=len(validation_loader)):
inputs = inputs.to(device)
labels = [label.to(device) for label in labels]
def handle_batch():
outputs = model(inputs)
losses = [
criterion(output, label)
for output, label in zip(outputs, labels)
]
total_loss = sum(losses)
losses.append(total_loss)
return outputs, torch.Tensor([float(l.item()) for l in losses])
outputs, batch_loss = handle_batch()
running_losses += batch_loss
return running_losses
def train(model,
train_loader,
validation_loader,
optimizer,
epochs,
current_epoch,
device,
criterion,
lr_modifier,
writer,
save_file,
save_every,
properties=None):
""""""
properties = {} if properties is None else properties
print('Using {} as device'.format(str(device).upper()))
model = model.to(device)
loss_size = len(_output_names) + 1
for epoch in range(current_epoch, epochs):
train_losses = train_epoch(model, train_loader, optimizer, device,
criterion, loss_size)
avg_train_losses = train_losses / len(train_loader)
train_loss_dict = dict(
zip(_output_names + ['total'], avg_train_losses.tolist()))
writer.add_scalars('train_loss', train_loss_dict, global_step=epoch)
print('\nAverage training loss (epoch {}): {}'.format(
epoch, train_loss_dict))
val_losses = validate(model, validation_loader, device, criterion,
loss_size)
avg_val_losses = val_losses / len(validation_loader)
val_loss_dict = dict(
zip(_output_names + ['total'], avg_val_losses.tolist()))
writer.add_scalars('validation_loss', val_loss_dict, global_step=epoch)
print('\nAverage validation loss (epoch {}): {}'.format(
epoch, val_loss_dict))
total_val_loss = val_losses[-1]
lr_modifier.step(total_val_loss)
if (epoch + 1) % save_every == 0:
properties.update({'model_state_dict': model.state_dict()})
properties.update({
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss_dict,
'val_loss': val_loss_dict,
'epoch': epoch
})
torch.save(properties, save_file + ".e{}".format(epoch + 1))
properties.update({'model_state_dict': model.state_dict()})
properties.update({
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss_dict,
'val_loss': val_loss_dict,
'epoch': epoch
})
torch.save(properties, save_file)
def _get_args():
"""Gets command line arguments"""
project_path = os.path.abspath(os.path.join(deepab.__file__, "../.."))
desc = ('''
Script for training a model using a non-redundant set of bound and
unbound antibodies from SabDab with at most 99% sequence similarity,
a resolution cutoff of 3, and with a paired VH/VL.
\n
If there is no H5 file named antibody.h5 in the deepab/data directory,
then the script automatically uses the PDB files in
deepab/data/antibody_database directory to generate antibody.h5. If no
such directory exists, then the script downloads the set of pdbs from
SabDab outlined above.
''')
parser = argparse.ArgumentParser()
# Model architecture arguments
parser.add_argument('--num_blocks1D',
type=int,
default=3,
help='Number of one-dimensional ResNet blocks to use.')
parser.add_argument('--num_blocks2D',
type=int,
default=25,
help='Number of two-dimensional ResNet blocks to use.')
parser.add_argument('--dilation_cycle', type=int, default=5)
parser.add_argument('--num_bins', type=int, default=37)
parser.add_argument('--dropout', type=float, default=0.2)
# Training arguments
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--save_every', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--use_gpu', default=False, action="store_true")
parser.add_argument('--train_split', type=float, default=0.9)
default_h5_file = os.path.join(project_path, 'data/abPwGeometry.h5')
parser.add_argument('--h5_file', type=str, default=default_h5_file)
default_antibody_database = os.path.join(project_path,
'data/antibody_database')
parser.add_argument('--antibody_database',
type=str,
default=default_antibody_database)
now = str(datetime.now().strftime('%y-%m-%d %H:%M:%S'))
default_model_path = os.path.join(project_path,
'trained_models/model_{}/'.format(now))
parser.add_argument('--output_dir', type=str, default=default_model_path)
parser.add_argument('--pretrain_model_file', type=str, default=None)
parser.add_argument('--random_seed', type=int, default=0)
return parser.parse_args()
def _cli():
args = _get_args()
device_type = 'cuda' if torch.cuda.is_available(
) and args.use_gpu else 'cpu'
device = torch.device(device_type)
out_dir = args.output_dir
current_epoch = 0
if os.path.isdir(out_dir) and os.path.exists(
os.path.join(out_dir, "model.p")):
model_file = os.path.join(out_dir, "model.p")
properties = torch.load(model_file, map_location='cpu')
model = load_model(model_file, eval_mode=False,
device=device).to(device)
current_epoch = properties['epoch'] + 1
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
optimizer.load_state_dict(properties['optimizer_state_dict'])
elif args.pretrain_model_file != None and os.path.exists(
args.pretrain_model_file):
pretrain_model_file = args.pretrain_model_file
properties = torch.load(pretrain_model_file, map_location='cpu')
model = load_model(pretrain_model_file,
eval_mode=False,
device=device,
strict=False).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
properties.update({'lr': args.lr})
print('Making {} ...'.format(out_dir))
os.mkdir(out_dir)
else:
lstm_model_file = "trained_models/pairedseqlstm_scaler.p.e5"
lstm_model = load_lstm_rnn(lstm_model_file, eval_mode=True).to(device)
lstm_checkpoint_dict = torch.load(lstm_model_file, map_location='cpu')
lstm_mean = torch.tensor(
lstm_checkpoint_dict['scaler_mean']).float().to(device)
lstm_scale = torch.tensor(
lstm_checkpoint_dict['scaler_scale']).float().to(device)
properties = dict(num_out_bins=args.num_bins,
num_blocks1D=args.num_blocks1D,
num_blocks2D=args.num_blocks2D,
dropout_proportion=args.dropout,
dilation_cycle=args.dilation_cycle)
model = AbResNet(21,
lstm_model=lstm_model,
lstm_mean=lstm_mean,
lstm_scale=lstm_scale,
**properties)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
properties.update({'lr': args.lr})
properties['lstm_checkpoint_dict'] = lstm_checkpoint_dict
print('Making {} ...'.format(out_dir))
os.mkdir(out_dir)
properties.update({'lr': args.lr})
# Load dataset loaders from h5 file
h5_file = args.h5_file
check_for_h5_file(h5_file, antibody_to_h5, args.antibody_database)
dataset = H5PairwiseGeometryDataset(h5_file,
num_bins=args.num_bins,
mask_distant_orientations=True)
train_split_length = int(len(dataset) * args.train_split)
torch.manual_seed(args.random_seed)
train_dataset, validation_dataset = data.random_split(
dataset, [train_split_length,
len(dataset) - train_split_length])
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
collate_fn=H5PairwiseGeometryDataset.merge_samples_to_minibatch)
validation_loader = data.DataLoader(
validation_dataset,
batch_size=args.batch_size,
collate_fn=H5PairwiseGeometryDataset.merge_samples_to_minibatch)
criterion = FocalLoss(ignore_index=dataset.mask_fill_value)
lr_modifier = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
verbose=True)
writer = SummaryWriter(os.path.join(out_dir, 'tensorboard'))
print('Arguments:\n', args)
print('Model:\n', model)
train(model=model,
train_loader=train_loader,
validation_loader=validation_loader,
optimizer=optimizer,
device=device,
epochs=args.epochs,
current_epoch=current_epoch,
criterion=criterion,
lr_modifier=lr_modifier,
writer=writer,
save_file=os.path.join(out_dir, 'model.p'),
save_every=args.save_every,
properties=properties)
if __name__ == '__main__':
_cli()
| [
"torch.zeros",
"torch.device",
"torch.save",
"torch.no_grad",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"torch.exp"
] | 1.7.1 | nsridhar1/DeepAb | 659bad092e1b56ec9f056d9c031900990436200c |
1.8 | from data.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred
from data.edf_loader import EdfDataset
from data.sample_loader import SampleDataset
from exp.exp_basic import Exp_Basic
from models.model import Informer, InformerStack
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.metrics import metric
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
import os
import time
import warnings
warnings.filterwarnings('ignore')
class Exp_Informer(Exp_Basic):
def __init__(self, args):
super(Exp_Informer, self).__init__(args)
def _build_model(self):
model_dict = {
'informer':Informer,
'informerstack':InformerStack,
}
if self.args.model=='informer' or self.args.model=='informerstack':
e_layers = self.args.e_layers if self.args.model=='informer' else self.args.s_layers
model = model_dict[self.args.model](
self.args.enc_in,
self.args.dec_in,
self.args.c_out,
self.args.seq_len,
self.args.label_len,
self.args.pred_len,
self.args.factor,
self.args.d_model,
self.args.n_heads,
e_layers, # self.args.e_layers,
self.args.d_layers,
self.args.d_ff,
self.args.dropout,
self.args.attn,
self.args.embed,
self.args.freq,
self.args.activation,
self.args.output_attention,
self.args.distil,
self.args.mix,
self.device
).float()
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_data(self, flag):
args = self.args
data_dict = {
'ETTh1':Dataset_ETT_hour,
'ETTh2':Dataset_ETT_hour,
'ETTm1':Dataset_ETT_minute,
'ETTm2':Dataset_ETT_minute,
'WTH':Dataset_Custom,
'ECL':Dataset_Custom,
'Solar':Dataset_Custom,
'custom':Dataset_Custom,
'SingleEdf': EdfDataset,
'SampleEdf': SampleDataset
}
Data = data_dict[self.args.data]
timeenc = 0 if args.embed!='timeF' else 1
if flag == 'test':
shuffle_flag = False; drop_last = True; batch_size = args.batch_size; freq=args.freq
elif flag=='pred':
shuffle_flag = False; drop_last = False; batch_size = 1; freq=args.detail_freq
Data = Dataset_Pred
else:
shuffle_flag = True; drop_last = True; batch_size = args.batch_size; freq=args.freq
data_set = Data(
root_path=args.root_path,
data_path=args.data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target,
inverse=args.inverse,
timeenc=timeenc,
freq=freq,
cols=args.cols
)
if self.args.data == 'SampleEdf':
data_set = data_set.sample_dataset
print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader
def _select_optimizer(self):
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def _select_criterion(self):
criterion = nn.MSELoss()
return criterion
def vali(self, vali_data, vali_loader, criterion):
self.model.eval()
total_loss = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(vali_loader):
pred, true = self._process_one_batch(
vali_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
loss = criterion(pred.detach().cpu(), true.detach().cpu())
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
train_data, train_loader = self._get_data(flag = 'train')
vali_data, vali_loader = self._get_data(flag = 'val')
test_data, test_loader = self._get_data(flag = 'test')
path = os.path.join(self.args.checkpoints, setting)
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
if self.args.use_amp:
scaler = torch.cuda.amp.GradScaler()
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
epoch_time = time.time()
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
pred, true = self._process_one_batch(
train_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
loss = criterion(pred, true)
train_loss.append(loss.item())
if (i+1) % 500==0:
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time()-time_now)/iter_count
left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
if self.args.use_amp:
scaler.scale(loss).backward()
scaler.step(model_optim)
scaler.update()
else:
loss.backward()
model_optim.step()
print("Epoch: {} cost time: {}".format(epoch+1, time.time()-epoch_time))
train_loss = np.average(train_loss)
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
if early_stopping.early_stop:
print("Early stopping")
break
adjust_learning_rate(model_optim, epoch+1, self.args)
best_model_path = path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def test(self, setting):
test_data, test_loader = self._get_data(flag='test')
self.model.eval()
preds = []
trues = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(test_loader):
pred, true = self._process_one_batch(
test_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
preds.append(pred.detach().cpu().numpy())
trues.append(true.detach().cpu().numpy())
preds = np.array(preds)
trues = np.array(trues)
print('test shape:', preds.shape, trues.shape)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
print('test shape:', preds.shape, trues.shape)
# result save
folder_path = './results/' + setting +'/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path+'pred.npy', preds)
np.save(folder_path+'true.npy', trues)
return
def predict(self, setting, load=False):
pred_data, pred_loader = self._get_data(flag='pred')
if load:
path = os.path.join(self.args.checkpoints, setting)
best_model_path = path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
self.model.eval()
preds = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(pred_loader):
pred, true = self._process_one_batch(
pred_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
preds.append(pred.detach().cpu().numpy())
preds = np.array(preds)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
# result save
folder_path = './results/' + setting +'/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
np.save(folder_path+'real_prediction.npy', preds)
return
def _process_one_batch(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
# decoder input
if self.args.padding==0:
dec_inp = torch.zeros([batch_y.shape[0], self.args.pred_len, batch_y.shape[-1]]).float()
elif self.args.padding==1:
dec_inp = torch.ones([batch_y.shape[0], self.args.pred_len, batch_y.shape[-1]]).float()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).float().to(self.device)
# encoder - decoder
if self.args.use_amp:
with torch.cuda.amp.autocast():
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
if self.args.inverse:
outputs = dataset_object.inverse_transform(outputs)
f_dim = -1 if self.args.features=='MS' else 0
batch_y = batch_y[:,-self.args.pred_len:,f_dim:].to(self.device)
return outputs, batch_y
| [
"torch.zeros",
"torch.cat",
"torch.cuda.amp.autocast",
"torch.nn.MSELoss",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.load",
"torch.cuda.amp.GradScaler",
"torch.nn.DataParallel"
] | 1.8.0 | kimjimong/Informer2020 | 1466bea980552ca65468f10d53bef8506935f315 |
1.1 | import torch
from torch import nn
from torch import autograd
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class SeqToLangModel(nn.Module):
def __init__(self, num_of_ngrams, output_size, hidden_size, embedding_dim):
super().__init__()
self.chars_embedding = nn.Embedding(num_embeddings=num_of_ngrams, padding_idx=0, embedding_dim=embedding_dim)
self.rnn = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, bidirectional=True, batch_first=True)
self.linear = nn.Linear(hidden_size*2, output_size)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def forward(self, tokenized_texts):
batch_size = tokenized_texts.shape[0]
num_of_words = tokenized_texts.shape[1]
num_of_ngrams = tokenized_texts.shape[2]
lens = torch.sum(tokenized_texts.sum(2) > 0, dim=1)
x = tokenized_texts.view(batch_size * num_of_words, num_of_ngrams)
embedded = self.chars_embedding(x)
embedded = embedded.view(batch_size, num_of_words, num_of_ngrams, -1)
embedded_sum = embedded.sum(2) / lens.view(batch_size, 1, 1).float()
pack = pack_padded_sequence(embedded_sum, lens, batch_first=True, enforce_sorted=False)
rnn_outputs, last_hidden = self.rnn(pack)
unpacked, unpacked_len = pad_packed_sequence(rnn_outputs, batch_first=True)
if unpacked.size(1) < num_of_words:
dummy_tensor = autograd.Variable(torch.zeros(batch_size, num_of_words - unpacked.size(1), unpacked.shape[-1])).to(self.device)
unpacked = torch.cat([unpacked, dummy_tensor], 1)
output = self.linear(unpacked)
return output
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.cuda.is_available",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Embedding"
] | 1.1.0 | abdelrahman0110/task | 2956dc96f51b3fb3d691d0f8bbad9d9bfdeb6078 |
1.4 | import tqdm
import pytest
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from tianshou.policy import BasePolicy
from tianshou.env import DummyVectorEnv, SubprocVectorEnv
from tianshou.data import Batch, Collector, AsyncCollector
from tianshou.data import (
ReplayBuffer,
PrioritizedReplayBuffer,
VectorReplayBuffer,
CachedReplayBuffer,
)
if __name__ == '__main__':
from env import MyTestEnv, NXEnv
else: # pytest
from test.base.env import MyTestEnv, NXEnv
class MyPolicy(BasePolicy):
def __init__(self, dict_state=False, need_state=True):
"""
:param bool dict_state: if the observation of the environment is a dict
:param bool need_state: if the policy needs the hidden state (for RNN)
"""
super().__init__()
self.dict_state = dict_state
self.need_state = need_state
def forward(self, batch, state=None):
if self.need_state:
if state is None:
state = np.zeros((len(batch.obs), 2))
else:
state += 1
if self.dict_state:
return Batch(act=np.ones(len(batch.obs['index'])), state=state)
return Batch(act=np.ones(len(batch.obs)), state=state)
def learn(self):
pass
class Logger:
def __init__(self, writer):
self.cnt = 0
self.writer = writer
def preprocess_fn(self, **kwargs):
# modify info before adding into the buffer, and recorded into tfb
# if obs && env_id exist -> reset
# if obs_next/rew/done/info/env_id exist -> normal step
if 'rew' in kwargs:
info = kwargs['info']
info.rew = kwargs['rew']
if 'key' in info.keys():
self.writer.add_scalar(
'key', np.mean(info.key), global_step=self.cnt)
self.cnt += 1
return Batch(info=info)
else:
return Batch()
@staticmethod
def single_preprocess_fn(**kwargs):
# same as above, without tfb
if 'rew' in kwargs:
info = kwargs['info']
info.rew = kwargs['rew']
return Batch(info=info)
else:
return Batch()
def test_collector():
writer = SummaryWriter('log/collector')
logger = Logger(writer)
env_fns = [lambda x=i: MyTestEnv(size=x, sleep=0) for i in [2, 3, 4, 5]]
venv = SubprocVectorEnv(env_fns)
dum = DummyVectorEnv(env_fns)
policy = MyPolicy()
env = env_fns[0]()
c0 = Collector(policy, env, ReplayBuffer(size=100), logger.preprocess_fn)
c0.collect(n_step=3)
assert len(c0.buffer) == 3
assert np.allclose(c0.buffer.obs[:4, 0], [0, 1, 0, 0])
assert np.allclose(c0.buffer[:].obs_next[..., 0], [1, 2, 1])
c0.collect(n_episode=3)
assert len(c0.buffer) == 8
assert np.allclose(c0.buffer.obs[:10, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 0])
assert np.allclose(c0.buffer[:].obs_next[..., 0],
[1, 2, 1, 2, 1, 2, 1, 2])
c0.collect(n_step=3, random=True)
c1 = Collector(
policy, venv,
VectorReplayBuffer(total_size=100, buffer_num=4),
logger.preprocess_fn)
c1.collect(n_step=8)
obs = np.zeros(100)
obs[[0, 1, 25, 26, 50, 51, 75, 76]] = [0, 1, 0, 1, 0, 1, 0, 1]
assert np.allclose(c1.buffer.obs[:, 0], obs)
assert np.allclose(c1.buffer[:].obs_next[..., 0], [1, 2, 1, 2, 1, 2, 1, 2])
c1.collect(n_episode=4)
assert len(c1.buffer) == 16
obs[[2, 3, 27, 52, 53, 77, 78, 79]] = [0, 1, 2, 2, 3, 2, 3, 4]
assert np.allclose(c1.buffer.obs[:, 0], obs)
assert np.allclose(c1.buffer[:].obs_next[..., 0],
[1, 2, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5])
c1.collect(n_episode=4, random=True)
c2 = Collector(
policy, dum,
VectorReplayBuffer(total_size=100, buffer_num=4),
logger.preprocess_fn)
c2.collect(n_episode=7)
obs1 = obs.copy()
obs1[[4, 5, 28, 29, 30]] = [0, 1, 0, 1, 2]
obs2 = obs.copy()
obs2[[28, 29, 30, 54, 55, 56, 57]] = [0, 1, 2, 0, 1, 2, 3]
c2obs = c2.buffer.obs[:, 0]
assert np.all(c2obs == obs1) or np.all(c2obs == obs2)
c2.reset_env()
c2.reset_buffer()
assert c2.collect(n_episode=8)['n/ep'] == 8
obs[[4, 5, 28, 29, 30, 54, 55, 56, 57]] = [0, 1, 0, 1, 2, 0, 1, 2, 3]
assert np.all(c2.buffer.obs[:, 0] == obs)
c2.collect(n_episode=4, random=True)
# test corner case
with pytest.raises(TypeError):
Collector(policy, dum, ReplayBuffer(10))
with pytest.raises(TypeError):
Collector(policy, dum, PrioritizedReplayBuffer(10, 0.5, 0.5))
with pytest.raises(TypeError):
c2.collect()
# test NXEnv
for obs_type in ["array", "object"]:
envs = SubprocVectorEnv([
lambda i=x: NXEnv(i, obs_type) for x in [5, 10, 15, 20]])
c3 = Collector(policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4))
c3.collect(n_step=6)
assert c3.buffer.obs.dtype == object
def test_collector_with_async():
env_lens = [2, 3, 4, 5]
writer = SummaryWriter('log/async_collector')
logger = Logger(writer)
env_fns = [lambda x=i: MyTestEnv(size=x, sleep=0.001, random_sleep=True)
for i in env_lens]
venv = SubprocVectorEnv(env_fns, wait_num=len(env_fns) - 1)
policy = MyPolicy()
bufsize = 60
c1 = AsyncCollector(
policy, venv,
VectorReplayBuffer(total_size=bufsize * 4, buffer_num=4),
logger.preprocess_fn)
ptr = [0, 0, 0, 0]
for n_episode in tqdm.trange(1, 30, desc="test async n_episode"):
result = c1.collect(n_episode=n_episode)
assert result["n/ep"] >= n_episode
# check buffer data, obs and obs_next, env_id
for i, count in enumerate(
np.bincount(result["lens"], minlength=6)[2:]):
env_len = i + 2
total = env_len * count
indices = np.arange(ptr[i], ptr[i] + total) % bufsize
ptr[i] = (ptr[i] + total) % bufsize
seq = np.arange(env_len)
buf = c1.buffer.buffers[i]
assert np.all(buf.info.env_id[indices] == i)
assert np.all(buf.obs[indices].reshape(count, env_len) == seq)
assert np.all(buf.obs_next[indices].reshape(
count, env_len) == seq + 1)
# test async n_step, for now the buffer should be full of data
for n_step in tqdm.trange(1, 15, desc="test async n_step"):
result = c1.collect(n_step=n_step)
assert result["n/st"] >= n_step
for i in range(4):
env_len = i + 2
seq = np.arange(env_len)
buf = c1.buffer.buffers[i]
assert np.all(buf.info.env_id == i)
assert np.all(buf.obs.reshape(-1, env_len) == seq)
assert np.all(buf.obs_next.reshape(-1, env_len) == seq + 1)
with pytest.raises(TypeError):
c1.collect()
def test_collector_with_dict_state():
env = MyTestEnv(size=5, sleep=0, dict_state=True)
policy = MyPolicy(dict_state=True)
c0 = Collector(policy, env, ReplayBuffer(size=100),
Logger.single_preprocess_fn)
c0.collect(n_step=3)
c0.collect(n_episode=2)
assert len(c0.buffer) == 10
env_fns = [lambda x=i: MyTestEnv(size=x, sleep=0, dict_state=True)
for i in [2, 3, 4, 5]]
envs = DummyVectorEnv(env_fns)
envs.seed(666)
obs = envs.reset()
assert not np.isclose(obs[0]['rand'], obs[1]['rand'])
c1 = Collector(
policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4),
Logger.single_preprocess_fn)
c1.collect(n_step=12)
result = c1.collect(n_episode=8)
assert result['n/ep'] == 8
lens = np.bincount(result['lens'])
assert result['n/st'] == 21 and np.all(lens == [0, 0, 2, 2, 2, 2]) or \
result['n/st'] == 20 and np.all(lens == [0, 0, 3, 1, 2, 2])
batch, _ = c1.buffer.sample(10)
c0.buffer.update(c1.buffer)
assert len(c0.buffer) in [42, 43]
if len(c0.buffer) == 42:
assert np.all(c0.buffer[:].obs.index[..., 0] == [
0, 1, 2, 3, 4, 0, 1, 2, 3, 4,
0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 2, 0, 1, 2,
0, 1, 2, 3, 0, 1, 2, 3,
0, 1, 2, 3, 4, 0, 1, 2, 3, 4,
]), c0.buffer[:].obs.index[..., 0]
else:
assert np.all(c0.buffer[:].obs.index[..., 0] == [
0, 1, 2, 3, 4, 0, 1, 2, 3, 4,
0, 1, 0, 1, 0, 1,
0, 1, 2, 0, 1, 2, 0, 1, 2,
0, 1, 2, 3, 0, 1, 2, 3,
0, 1, 2, 3, 4, 0, 1, 2, 3, 4,
]), c0.buffer[:].obs.index[..., 0]
c2 = Collector(
policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4, stack_num=4),
Logger.single_preprocess_fn)
c2.collect(n_episode=10)
batch, _ = c2.buffer.sample(10)
def test_collector_with_ma():
env = MyTestEnv(size=5, sleep=0, ma_rew=4)
policy = MyPolicy()
c0 = Collector(policy, env, ReplayBuffer(size=100),
Logger.single_preprocess_fn)
# n_step=3 will collect a full episode
r = c0.collect(n_step=3)['rews']
assert len(r) == 0
r = c0.collect(n_episode=2)['rews']
assert r.shape == (2, 4) and np.all(r == 1)
env_fns = [lambda x=i: MyTestEnv(size=x, sleep=0, ma_rew=4)
for i in [2, 3, 4, 5]]
envs = DummyVectorEnv(env_fns)
c1 = Collector(
policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4),
Logger.single_preprocess_fn)
r = c1.collect(n_step=12)['rews']
assert r.shape == (2, 4) and np.all(r == 1), r
r = c1.collect(n_episode=8)['rews']
assert r.shape == (8, 4) and np.all(r == 1)
batch, _ = c1.buffer.sample(10)
print(batch)
c0.buffer.update(c1.buffer)
assert len(c0.buffer) in [42, 43]
if len(c0.buffer) == 42:
rew = [
0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1,
0, 0, 1, 0, 0, 1,
0, 0, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
]
else:
rew = [
0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
0, 1, 0, 1, 0, 1,
0, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
]
assert np.all(c0.buffer[:].rew == [[x] * 4 for x in rew])
assert np.all(c0.buffer[:].done == rew)
c2 = Collector(
policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4, stack_num=4),
Logger.single_preprocess_fn)
r = c2.collect(n_episode=10)['rews']
assert r.shape == (10, 4) and np.all(r == 1)
batch, _ = c2.buffer.sample(10)
def test_collector_with_atari_setting():
reference_obs = np.zeros([6, 4, 84, 84])
for i in range(6):
reference_obs[i, 3, np.arange(84), np.arange(84)] = i
reference_obs[i, 2, np.arange(84)] = i
reference_obs[i, 1, :, np.arange(84)] = i
reference_obs[i, 0] = i
# atari single buffer
env = MyTestEnv(size=5, sleep=0, array_state=True)
policy = MyPolicy()
c0 = Collector(policy, env, ReplayBuffer(size=100))
c0.collect(n_step=6)
c0.collect(n_episode=2)
assert c0.buffer.obs.shape == (100, 4, 84, 84)
assert c0.buffer.obs_next.shape == (100, 4, 84, 84)
assert len(c0.buffer) == 15
obs = np.zeros_like(c0.buffer.obs)
obs[np.arange(15)] = reference_obs[np.arange(15) % 5]
assert np.all(obs == c0.buffer.obs)
c1 = Collector(policy, env, ReplayBuffer(size=100, ignore_obs_next=True))
c1.collect(n_episode=3)
assert np.allclose(c0.buffer.obs, c1.buffer.obs)
with pytest.raises(AttributeError):
c1.buffer.obs_next
assert np.all(reference_obs[[1, 2, 3, 4, 4] * 3] == c1.buffer[:].obs_next)
c2 = Collector(
policy, env,
ReplayBuffer(size=100, ignore_obs_next=True, save_only_last_obs=True))
c2.collect(n_step=8)
assert c2.buffer.obs.shape == (100, 84, 84)
obs = np.zeros_like(c2.buffer.obs)
obs[np.arange(8)] = reference_obs[[0, 1, 2, 3, 4, 0, 1, 2], -1]
assert np.all(c2.buffer.obs == obs)
assert np.allclose(c2.buffer[:].obs_next,
reference_obs[[1, 2, 3, 4, 4, 1, 2, 2], -1])
# atari multi buffer
env_fns = [lambda x=i: MyTestEnv(size=x, sleep=0, array_state=True)
for i in [2, 3, 4, 5]]
envs = DummyVectorEnv(env_fns)
c3 = Collector(
policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4))
c3.collect(n_step=12)
result = c3.collect(n_episode=9)
assert result["n/ep"] == 9 and result["n/st"] == 23
assert c3.buffer.obs.shape == (100, 4, 84, 84)
obs = np.zeros_like(c3.buffer.obs)
obs[np.arange(8)] = reference_obs[[0, 1, 0, 1, 0, 1, 0, 1]]
obs[np.arange(25, 34)] = reference_obs[[0, 1, 2, 0, 1, 2, 0, 1, 2]]
obs[np.arange(50, 58)] = reference_obs[[0, 1, 2, 3, 0, 1, 2, 3]]
obs[np.arange(75, 85)] = reference_obs[[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
assert np.all(obs == c3.buffer.obs)
obs_next = np.zeros_like(c3.buffer.obs_next)
obs_next[np.arange(8)] = reference_obs[[1, 2, 1, 2, 1, 2, 1, 2]]
obs_next[np.arange(25, 34)] = reference_obs[[1, 2, 3, 1, 2, 3, 1, 2, 3]]
obs_next[np.arange(50, 58)] = reference_obs[[1, 2, 3, 4, 1, 2, 3, 4]]
obs_next[np.arange(75, 85)] = reference_obs[[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]]
assert np.all(obs_next == c3.buffer.obs_next)
c4 = Collector(
policy, envs,
VectorReplayBuffer(total_size=100, buffer_num=4, stack_num=4,
ignore_obs_next=True, save_only_last_obs=True))
c4.collect(n_step=12)
result = c4.collect(n_episode=9)
assert result["n/ep"] == 9 and result["n/st"] == 23
assert c4.buffer.obs.shape == (100, 84, 84)
obs = np.zeros_like(c4.buffer.obs)
slice_obs = reference_obs[:, -1]
obs[np.arange(8)] = slice_obs[[0, 1, 0, 1, 0, 1, 0, 1]]
obs[np.arange(25, 34)] = slice_obs[[0, 1, 2, 0, 1, 2, 0, 1, 2]]
obs[np.arange(50, 58)] = slice_obs[[0, 1, 2, 3, 0, 1, 2, 3]]
obs[np.arange(75, 85)] = slice_obs[[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
assert np.all(c4.buffer.obs == obs)
obs_next = np.zeros([len(c4.buffer), 4, 84, 84])
ref_index = np.array([
1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 2, 1, 2, 2, 1, 2, 2,
1, 2, 3, 3, 1, 2, 3, 3,
1, 2, 3, 4, 4, 1, 2, 3, 4, 4,
])
obs_next[:, -1] = slice_obs[ref_index]
ref_index -= 1
ref_index[ref_index < 0] = 0
obs_next[:, -2] = slice_obs[ref_index]
ref_index -= 1
ref_index[ref_index < 0] = 0
obs_next[:, -3] = slice_obs[ref_index]
ref_index -= 1
ref_index[ref_index < 0] = 0
obs_next[:, -4] = slice_obs[ref_index]
assert np.all(obs_next == c4.buffer[:].obs_next)
buf = ReplayBuffer(100, stack_num=4, ignore_obs_next=True,
save_only_last_obs=True)
c5 = Collector(policy, envs, CachedReplayBuffer(buf, 4, 10))
result_ = c5.collect(n_step=12)
assert len(buf) == 5 and len(c5.buffer) == 12
result = c5.collect(n_episode=9)
assert result["n/ep"] == 9 and result["n/st"] == 23
assert len(buf) == 35
assert np.all(buf.obs[:len(buf)] == slice_obs[[
0, 1, 0, 1, 2, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4,
0, 1, 0, 1, 2, 0, 1, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2, 3, 4]])
assert np.all(buf[:].obs_next[:, -1] == slice_obs[[
1, 1, 1, 2, 2, 1, 1, 1, 2, 3, 3, 1, 2, 3, 4, 4,
1, 1, 1, 2, 2, 1, 1, 1, 2, 3, 3, 1, 2, 2, 1, 2, 3, 4, 4]])
assert len(buf) == len(c5.buffer)
# test buffer=None
c6 = Collector(policy, envs)
result1 = c6.collect(n_step=12)
for key in ["n/ep", "n/st", "rews", "lens"]:
assert np.allclose(result1[key], result_[key])
result2 = c6.collect(n_episode=9)
for key in ["n/ep", "n/st", "rews", "lens"]:
assert np.allclose(result2[key], result[key])
if __name__ == '__main__':
test_collector()
test_collector_with_dict_state()
test_collector_with_ma()
test_collector_with_atari_setting()
test_collector_with_async()
| [
"torch.utils.tensorboard.SummaryWriter"
] | 1.4.0 | ksc999/tianshou | 8a5e2190f7045ffee2ffa4346c0010693ac7b222 |
1.5 | import argparse
import os
# workaround to unpickle olf model files
import sys
import numpy as np
import torch
import time
from envs.envs_util import VecPyTorch, make_vec_envs
from misc.utils import get_render_func, get_vec_normalize
# sys.path.append('a2c_ppo_acktr')
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument(
'--env-name',
default='PongNoFrameskip-v4',
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument(
'--algo',
default='ppo',
help='whether to use a non-deterministic policy')
parser.add_argument(
'--load-dir',
default='./trained_models/',
help='directory to load agent logs (default: ./trained_models/)')
parser.add_argument(
'--non-det',
action='store_true',
default=False,
help='whether to use a non-deterministic policy')
args = parser.parse_args()
args.det = not args.non_det
env = make_vec_envs(
args.env_name,
args.seed + 1000,
1,
None,
None,
device='cpu',
allow_early_resets=False)
# Get a render function
render_func = get_render_func(env)
# We need to use the same statistics for normalization as used in training
actor_critic, ob_rms = \
torch.load(os.path.join(args.load_dir, args.algo, args.env_name + ".pt"))
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1,
actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
obs = env.reset()
if render_func is not None:
render_func('human')
if args.env_name.find('Bullet') > -1:
import pybullet as p
torsoId = -1
for i in range(p.getNumBodies()):
if (p.getBodyInfo(i)[0].decode() == "torso"):
torsoId = i
while True:
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=args.det)
# Obser reward and next obs
obs, reward, done, _ = env.step(action)
masks.fill_(0.0 if done else 1.0)
if args.env_name.find('Bullet') > -1:
if torsoId > -1:
distance = 5
yaw = 0
humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)
p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)
if render_func is not None:
render_func('human')
time.sleep(1/30)
| [
"torch.zeros",
"torch.no_grad"
] | 1.5.1 | mazpie/vime-pytorch | fef62d9d700886622d9ca8c2234ad1718c10f553 |
1.8 | from typing import Dict, Optional
import torch
from kornia.augmentation._2d.geometric.base import GeometricAugmentationBase2D
from kornia.geometry.transform import get_tps_transform, warp_image_tps
class RandomThinPlateSpline(GeometricAugmentationBase2D):
r"""Add random noise to the Thin Plate Spline algorithm.
.. image:: _static/img/RandomThinPlateSpline.png
Args:
scale: the scale factor to apply to the destination points.
align_corners: Interpolation flag used by ``grid_sample``.
mode: Interpolation mode used by `grid_sample`. Either 'bilinear' or 'nearest'.
return_transform: if ``True`` return the matrix describing the transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation won't be concatenated.
same_on_batch: apply the same transformation across the batch.
p: probability of applying the transformation.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
.. note::
This function internally uses :func:`kornia.geometry.transform.warp_image_tps`.
Examples:
>>> img = torch.ones(1, 1, 2, 2)
>>> out = RandomThinPlateSpline()(img)
>>> out.shape
torch.Size([1, 1, 2, 2])
To apply the exact augmenation again, you may take the advantage of the previous parameter state:
>>> input = torch.randn(1, 3, 32, 32)
>>> aug = RandomThinPlateSpline(p=1.)
>>> (aug(input) == aug(input, params=aug._params)).all()
tensor(True)
"""
def __init__(
self,
scale: float = 0.2,
align_corners: bool = False,
return_transform: bool = False,
same_on_batch: bool = False,
p: float = 0.5,
keepdim: bool = False,
) -> None:
super().__init__(
p=p, return_transform=return_transform, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim
)
self.flags = dict(align_corners=align_corners)
self.dist = torch.distributions.Uniform(-scale, scale)
def generate_parameters(self, shape: torch.Size) -> Dict[str, torch.Tensor]:
B, _, _, _ = shape
src = torch.tensor([[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0], [0.0, 0.0]]]).repeat(B, 1, 1) # Bx5x2
dst = src + self.dist.rsample(src.shape)
return dict(src=src, dst=dst)
# TODO: It is incorrect to return identity
def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:
return self.identity_matrix(input)
def apply_transform(
self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
) -> torch.Tensor:
src = params["src"].to(input)
dst = params["dst"].to(input)
# NOTE: warp_image_tps need to use inverse parameters
kernel, affine = get_tps_transform(dst, src)
return warp_image_tps(input, src, kernel, affine, self.flags["align_corners"])
| [
"torch.distributions.Uniform",
"torch.tensor"
] | 1.8.1 | dichen-cd/kornia | dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f |
1.5 | import argparse
import sys
import os
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
plt.switch_backend('agg')
sys.path.insert(0, '../utils') # If that is the way to include paths for this project, then why not also for 'backbone'?
sys.path.insert(0, '../eval')
sys.path.insert(0, '../backbone')
from dataset_3d import *
from model_3d import *
from resnet_2d3d import neq_load_customized
from augmentation import *
from utils import AverageMeter, save_checkpoint, denorm, calc_topk_accuracy
import torch
import torch.optim as optim
from torch.utils import data
from torchvision import transforms
import torchvision.utils as vutils
# This way, cuda optimizes for the hardware available, if input size is always equal.
# torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser()
parser.add_argument('--net', default='resnet18', type=str)
parser.add_argument('--model', default='dpc-rnn', type=str)
parser.add_argument('--dataset', default='nturgbd', type=str)
parser.add_argument('--seq_len', default=5, type=int, help='number of frames in each video block')
parser.add_argument('--num_seq', default=6, type=int, help='number of video blocks')
parser.add_argument('--pred_step', default=2, type=int)
parser.add_argument('--ds', default=1, type=int, help='frame downsampling rate')
parser.add_argument('--batch_size', default=96, type=int)
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--wd', default=1e-5, type=float, help='weight decay')
parser.add_argument('--resume', default='', type=str, help='path of model to resume')
parser.add_argument('--pretrain', default='', type=str, help='path of pretrained model')
parser.add_argument('--epochs', default=10, type=int, help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--gpu', default=[0,2], type=int, nargs='+')
parser.add_argument('--print_freq', default=5, type=int, help='frequency of printing output during training')
parser.add_argument('--reset_lr', action='store_true', help='Reset learning rate when resume training?')
parser.add_argument('--prefix', default='tmp', type=str, help='prefix of checkpoint filename')
parser.add_argument('--train_what', default='all', type=str)
parser.add_argument('--img_dim', default=128, type=int)
parser.add_argument('--train_csv',
default=os.path.expanduser("~/datasets/nturgbd/project_specific/dpc_converted/train_set.csv"),
type=str)
parser.add_argument('--test_csv',
default=os.path.expanduser("~/datasets/nturgbd/project_specific/dpc_converted/test_set.csv"),
type=str)
def main():
torch.manual_seed(0)
np.random.seed(0)
global args;
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # NVIDIA-SMI uses PCI_BUS_ID device order, but CUDA orders graphics devices by speed by default (fastest first).
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(id) for id in args.gpu])
print ('Cuda visible devices: {}'.format(os.environ["CUDA_VISIBLE_DEVICES"]))
print ('Available device count: {}'.format(torch.cuda.device_count()))
args.gpu = list(range(torch.cuda.device_count())) # Really weird: In Pytorch 1.2, the device ids start from 0 on the visible devices.
print("Note: At least in Pytorch 1.2, device ids are reindexed on the visible devices and not the same as in nvidia-smi.")
for i in args.gpu:
print("Using Cuda device {}: {}".format(i, torch.cuda.get_device_name(i)))
print("Cuda is available: {}".format(torch.cuda.is_available()))
global cuda;
cuda = torch.device('cuda')
### dpc model ###
if args.model == 'dpc-rnn':
model = DPC_RNN(sample_size=args.img_dim,
num_seq=args.num_seq,
seq_len=args.seq_len,
network=args.net,
pred_step=args.pred_step)
else: raise ValueError('wrong model!')
# Data Parallel uses a master device (default gpu 0) and performs scatter gather operations on batches and resulting gradients.
model = nn.DataParallel(model) # Distributes batches on mutiple devices to train model in parallel automatically.
model = model.to(cuda) # Sends model to device 0, other gpus are used automatically.
global criterion
criterion = nn.CrossEntropyLoss() # Contrastive loss is basically CrossEntropyLoss with vector similarity and temperature.
### optimizer ###
if args.train_what == 'last':
for name, param in model.module.resnet.named_parameters():
param.requires_grad = False
else: pass # train all layers
print('\n===========Check Grad============')
for name, param in model.named_parameters():
print(name, param.requires_grad)
print('=================================\n')
params = model.parameters()
optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd)
args.old_lr = None
best_acc = 0
global iteration
iteration = 0
### restart training ###
if args.resume:
if os.path.isfile(args.resume):
args.old_lr = float(re.search('_lr(.+?)_', args.resume).group(1))
print("=> loading resumed checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
args.start_epoch = checkpoint['epoch']
iteration = checkpoint['iteration']
best_acc = checkpoint['best_acc']
# I assume this copies the *cpu located* parameters to the CUDA model automatically?
model.load_state_dict(checkpoint['state_dict'])
if not args.reset_lr: # if didn't reset lr, load old optimizer
optimizer.load_state_dict(checkpoint['optimizer'])
else: print('==== Change lr from %f to %f ====' % (args.old_lr, args.lr))
print("=> loaded resumed checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("[Warning] no checkpoint found at '{}'".format(args.resume))
if args.pretrain:
if os.path.isfile(args.pretrain):
print("=> loading pretrained checkpoint '{}'".format(args.pretrain))
checkpoint = torch.load(args.pretrain, map_location=torch.device('cpu'))
model = neq_load_customized(model, checkpoint['state_dict'])
print("=> loaded pretrained checkpoint '{}' (epoch {})"
.format(args.pretrain, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.pretrain))
### load data ###
if args.dataset == 'ucf101': # designed for ucf101, short size=256, rand crop to 224x224 then scale to 128x128
transform = transforms.Compose([
RandomHorizontalFlip(consistent=True),
RandomCrop(size=224, consistent=True),
Scale(size=(args.img_dim, args.img_dim)),
RandomGray(consistent=False, p=0.5),
ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),
ToTensor(),
Normalize()
])
elif args.dataset == 'k400': # designed for kinetics400, short size=150, rand crop to 128x128
transform = transforms.Compose([
RandomSizedCrop(size=args.img_dim, consistent=True, p=1.0),
RandomHorizontalFlip(consistent=True),
RandomGray(consistent=False, p=0.5),
ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),
ToTensor(),
Normalize()
])
elif args.dataset == 'nturgbd': # designed for nturgbd, short size=150, rand crop to 128x128
transform = transforms.Compose([
RandomSizedCrop(size=args.img_dim, consistent=True, p=1.0),
RandomHorizontalFlip(consistent=True),
RandomGray(consistent=False, p=0.5),
ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),
ToTensor(),
Normalize()
])
train_loader = get_data(transform, 'train')
val_loader = get_data(transform, 'val')
# setup tools
global de_normalize;
de_normalize = denorm()
global img_path;
img_path, model_path = set_path(args)
global writer_train
try: # old version
writer_val = SummaryWriter(log_dir=os.path.join(img_path, 'val'))
writer_train = SummaryWriter(log_dir=os.path.join(img_path, 'train'))
except: # v1.7
writer_val = SummaryWriter(logdir=os.path.join(img_path, 'val'))
writer_train = SummaryWriter(logdir=os.path.join(img_path, 'train'))
### main loop ###
for epoch in range(args.start_epoch, args.epochs):
train_loss, train_acc, train_accuracy_list = train(train_loader, model, optimizer, epoch)
val_loss, val_acc, val_accuracy_list = validate(val_loader, model, epoch)
# save curve
writer_train.add_scalar('global/loss', train_loss, epoch)
writer_train.add_scalar('global/accuracy', train_acc, epoch)
writer_val.add_scalar('global/loss', val_loss, epoch)
writer_val.add_scalar('global/accuracy', val_acc, epoch)
writer_train.add_scalar('accuracy/top1', train_accuracy_list[0], epoch)
writer_train.add_scalar('accuracy/top3', train_accuracy_list[1], epoch)
writer_train.add_scalar('accuracy/top5', train_accuracy_list[2], epoch)
writer_val.add_scalar('accuracy/top1', val_accuracy_list[0], epoch)
writer_val.add_scalar('accuracy/top3', val_accuracy_list[1], epoch)
writer_val.add_scalar('accuracy/top5', val_accuracy_list[2], epoch)
# save check_point
is_best = val_acc > best_acc;
best_acc = max(val_acc, best_acc)
save_checkpoint({'epoch': epoch + 1,
'net': args.net,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
'iteration': iteration},
is_best, filename=os.path.join(model_path, 'epoch%s.pth.tar' % str(epoch + 1)), keep_all=False)
print('Training from ep %d to ep %d finished' % (args.start_epoch, args.epochs))
def process_output(mask):
'''task mask as input, compute the target for contrastive loss'''
# dot product is computed in parallel gpus, so get less easy neg, bounded by batch size in each gpu'''
# mask meaning: -2: omit, -1: temporal neg (hard), 0: easy neg, 1: pos, -3: spatial neg
(B, NP, SQ, B2, NS, _) = mask.size() # [B, P, SQ, B, N, SQ]
target = mask == 1
target.requires_grad = False
return target, (B, B2, NS, NP, SQ)
def train(data_loader, model, optimizer, epoch):
losses = AverageMeter()
accuracy = AverageMeter()
accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]
model.train()
global iteration
for idx, input_seq in enumerate(data_loader):
tic = time.time()
input_seq = input_seq.to(cuda)
B = input_seq.size(0)
[score_, mask_] = model(input_seq)
# visualize
if (iteration == 0) or (iteration == args.print_freq): # I suppose this is a bug, since it does not write out images on print frequency, but only the first and second time.
if B > 2: input_seq = input_seq[0:2, :]
writer_train.add_image('input_seq',
de_normalize(vutils.make_grid(
input_seq.transpose(2, 3).contiguous().view(-1, 3, args.img_dim, args.img_dim),
nrow=args.num_seq * args.seq_len)),
iteration)
del input_seq
if idx == 0:
target_, (_, B2, NS, NP, SQ) = process_output(mask_)
# TODO: adapt logic for two stream network.
# score is a 6d tensor: [B, P, SQ, B, N, SQ]
score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_.view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_flattened.double()
target_flattened = target_flattened.argmax(dim=1)
loss = criterion(score_flattened, target_flattened)
top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))
accuracy_list[0].update(top1.item(), B)
accuracy_list[1].update(top3.item(), B)
accuracy_list[2].update(top5.item(), B)
losses.update(loss.item(), B)
accuracy.update(top1.item(), B)
del score_
optimizer.zero_grad()
loss.backward()
optimizer.step()
del loss
if idx % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.6f} ({loss.local_avg:.4f})\t'
'Acc: top1 {3:.4f}; top3 {4:.4f}; top5 {5:.4f} T:{6:.2f}\t'.format(
epoch, idx, len(data_loader), top1, top3, top5, time.time() - tic, loss=losses))
writer_train.add_scalar('local/loss', losses.val, iteration)
writer_train.add_scalar('local/accuracy', accuracy.val, iteration)
iteration += 1
return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]
def validate(data_loader, model, epoch):
losses = AverageMeter()
accuracy = AverageMeter()
accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]
model.eval()
with torch.no_grad():
for idx, input_seq in tqdm(enumerate(data_loader), total=len(data_loader)):
input_seq = input_seq.to(cuda)
B = input_seq.size(0)
[score_, mask_] = model(input_seq)
del input_seq
if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)
# [B, P, SQ, B, N, SQ]
score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_.view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_flattened.double()
target_flattened = target_flattened.argmax(dim=1)
loss = criterion(score_flattened, target_flattened)
top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))
losses.update(loss.item(), B)
accuracy.update(top1.item(), B)
accuracy_list[0].update(top1.item(), B)
accuracy_list[1].update(top3.item(), B)
accuracy_list[2].update(top5.item(), B)
print('[{0}/{1}] Loss {loss.local_avg:.4f}\t'
'Acc: top1 {2:.4f}; top3 {3:.4f}; top5 {4:.4f} \t'.format(
epoch, args.epochs, *[i.avg for i in accuracy_list], loss=losses))
return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]
def get_data(transform, mode='train'):
print('Loading data for "%s" ...' % mode)
if args.dataset == 'k400':
use_big_K400 = args.img_dim > 140
dataset = Kinetics400_full_3d(mode=mode,
transform=transform,
seq_len=args.seq_len,
num_seq=args.num_seq,
downsample=5,
big=use_big_K400)
elif args.dataset == 'ucf101':
dataset = UCF101_3d(mode=mode,
transform=transform,
seq_len=args.seq_len,
num_seq=args.num_seq,
downsample=args.ds)
elif args.dataset == 'nturgbd':
dataset = NTURGBD_3D(mode=mode,
transform=transform,
seq_len=args.seq_len,
num_seq=args.num_seq,
downsample=args.ds,
train_csv=args.train_csv,
val_csv=args.test_csv)
else:
raise ValueError('dataset not supported')
sampler = data.RandomSampler(dataset)
if mode == 'train':
data_loader = data.DataLoader(dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
num_workers=32,
pin_memory=True,
drop_last=True)
elif mode == 'val':
data_loader = data.DataLoader(dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
num_workers=32,
pin_memory=True,
drop_last=True)
print('"%s" dataset size: %d' % (mode, len(dataset)))
return data_loader
def set_path(args):
if args.resume: exp_path = os.path.dirname(os.path.dirname(args.resume))
else:
exp_path = 'log_{args.prefix}/{args.dataset}-{args.img_dim}_{0}_{args.model}_\
bs{args.batch_size}_lr{1}_seq{args.num_seq}_pred{args.pred_step}_len{args.seq_len}_ds{args.ds}_\
train-{args.train_what}{2}'.format(
'r%s' % args.net[6::], \
args.old_lr if args.old_lr is not None else args.lr, \
'_pt=%s' % args.pretrain.replace('/', '-') if args.pretrain else '', \
args=args)
img_path = os.path.join(exp_path, 'img')
model_path = os.path.join(exp_path, 'model')
if not os.path.exists(img_path): os.makedirs(img_path)
if not os.path.exists(model_path): os.makedirs(model_path)
return img_path, model_path
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.utils.data.RandomSampler",
"torch.no_grad",
"torch.optim.Adam",
"torch.cuda.get_device_name",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.5.0 | simplexsigil/DPC | bf06939e302d9393b22f9b424abd281c535d8dbd |
1.6 | from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
import torch
from .bandits import DataBasedBandit
from .utils import download_data
URL = "https://storage.googleapis.com/bandits_datasets/raw_stock_contexts"
class FinancialDataBandit(DataBasedBandit):
"""A contextual bandit based on `Financial Stock dataset`.
Source:
https://github.com/tensorflow/models/tree/archive/research/deep_contextual_bandits
Citation:
Riquelme, Tucker, Snoek. Deep Bayesian bandits showdown: An empirical comparison of Bayesian deep networks for Thompson sampling. InProceedings ofthe 6th International Conference on Learning Representations, 2018.
License:
Apache-2.0 License
Args:
path (str, optional): Path to the data. Defaults to "./data/Financial/".
download (bool, optional): Whether to download the data. Defaults to False.
force_download (bool, optional): Whether to force download even if file exists.
Defaults to False.
url (Union[str, None], optional): URL to download data from. Defaults to None
which implies use of source URL.
device (str): Device to use for tensor operations.
"cpu" for cpu or "cuda" for cuda. Defaults to "cpu".
Attributes:
n_actions (int): Number of actions available.
context_dim (int): The length of context vector.
len (int): The number of examples (context, reward pairs) in the dataset.
device (torch.device): Device to use for tensor operations.
Raises:
FileNotFoundError: If file is not found at specified path.
"""
def __init__(self, **kwargs):
super(FinancialDataBandit, self).__init__(kwargs.get("device", "cpu"))
self.n_actions = 8
path = kwargs.get("path", "./data/Financial/")
download = kwargs.get("download", None)
force_download = kwargs.get("force_download", None)
url = kwargs.get("url", URL)
if download:
fpath = download_data(path, url, force_download)
self.df = pd.read_csv(
fpath, header=None, skiprows=[0], sep=" ", dtype=np.float32
).dropna()
else:
fpath = Path(path).joinpath("raw_stock_contexts")
self.df = pd.read_csv(
fpath, header=None, skiprows=[0], sep=" ", dtype=np.float32
).dropna()
self.context_dim = self.df.shape[1]
self.len = len(self.df)
self._generate_rewards()
def _generate_rewards(self):
# Vector with additive noise levels for each action
noise_stds = [0.01 * (i + 1) for i in range(self.n_actions)]
betas = np.random.uniform(-1, 1, (self.context_dim, self.n_actions))
betas /= np.linalg.norm(betas, axis=0)
mean_rewards = np.dot(self.df, betas)
noise = np.random.normal(scale=noise_stds, size=mean_rewards.shape)
self.rewards = mean_rewards + noise
self.max_rewards = np.max(self.rewards, axis=1)
def reset(self) -> torch.Tensor:
"""Reset bandit by shuffling indices and get new context.
Returns:
torch.Tensor: Current context selected by bandit.
"""
self._reset()
self.df = self.df.sample(frac=1).reset_index(drop=True)
self._generate_rewards()
return self._get_context()
def _compute_reward(self, action: int) -> Tuple[int, int]:
"""Compute the reward for a given action.
Args:
action (int): The action to compute reward for.
Returns:
Tuple[int, int]: Computed reward.
"""
r = self.rewards[self.idx, action]
max_r = self.max_rewards[self.idx]
return r, max_r
def _get_context(self) -> torch.Tensor:
"""Get the vector for current selected context.
Returns:
torch.Tensor: Current context vector.
"""
return torch.tensor(
self.df.iloc[self.idx],
device=self.device,
dtype=torch.float,
)
| [
"torch.tensor"
] | 1.6.0 | IBM/sau-explore | dfeff8192292afeca7c17927684a3ed9fe65b97f |
1.4 | from math import ceil
from typing import Dict, Any, List, Optional
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from core.algorithms.onpolicy_sync.losses import PPO
from core.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from core.base_abstractions.experiment_config import ExperimentConfig
from core.base_abstractions.sensor import SensorSuite
from core.base_abstractions.task import TaskSampler
from plugins.ithor_plugin.ithor_sensors import RGBSensorThor, GoalObjectTypeThorSensor
from plugins.ithor_plugin.ithor_task_samplers import ObjectNavTaskSampler
from plugins.ithor_plugin.ithor_tasks import ObjectNavTask
from projects.objectnav_baselines.models.object_nav_models import (
ObjectNavBaselineActorCritic,
)
from utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay
class ObjectNavThorPPOExperimentConfig(ExperimentConfig):
"""A simple object navigation experiment in THOR.
Training with PPO.
"""
# A simple setting, train/valid/test are all the same single scene
# and we're looking for a single object
OBJECT_TYPES = ["Tomato"]
TRAIN_SCENES = ["FloorPlan1_physics"]
VALID_SCENES = ["FloorPlan1_physics"]
TEST_SCENES = ["FloorPlan1_physics"]
# Setting up sensors and basic environment details
SCREEN_SIZE = 224
SENSORS = [
RGBSensorThor(
height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True,
),
GoalObjectTypeThorSensor(object_types=OBJECT_TYPES),
]
ENV_ARGS = {
"player_screen_height": SCREEN_SIZE,
"player_screen_width": SCREEN_SIZE,
"quality": "Very Low",
}
MAX_STEPS = 128
ADVANCE_SCENE_ROLLOUT_PERIOD = None
VALID_SAMPLES_IN_SCENE = 10
TEST_SAMPLES_IN_SCENE = 100
@classmethod
def tag(cls):
return "ObjectNavThorPPO"
@classmethod
def training_pipeline(cls, **kwargs):
ppo_steps = int(1e6)
lr = 2.5e-4
num_mini_batch = 2 if not torch.cuda.is_available() else 6
update_repeats = 4
num_steps = 128
metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks
save_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 1.0
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig),
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,),
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def machine_params(cls, mode="train", **kwargs):
num_gpus = torch.cuda.device_count()
has_gpu = num_gpus != 0
if mode == "train":
nprocesses = 20 if has_gpu else 4
gpu_ids = [0] if has_gpu else []
elif mode == "valid":
nprocesses = 1
gpu_ids = [1 % num_gpus] if has_gpu else []
elif mode == "test":
nprocesses = 1
gpu_ids = [0] if has_gpu else []
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
return {"nprocesses": nprocesses, "gpu_ids": gpu_ids}
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return ObjectNavBaselineActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
goal_sensor_uuid="goal_object_type_ind",
hidden_size=512,
object_type_embedding_dim=8,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return ObjectNavTaskSampler(**kwargs)
@staticmethod
def _partition_inds(n: int, num_parts: int):
return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(
np.int32
)
def _get_sampler_args_for_scene_split(
self,
scenes: List[str],
process_ind: int,
total_processes: int,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
if total_processes > len(scenes): # oversample some scenes -> bias
if total_processes % len(scenes) != 0:
print(
"Warning: oversampling some of the scenes to feed all processes."
" You can avoid this by setting a number of workers divisible by the number of scenes"
)
scenes = scenes * int(ceil(total_processes / len(scenes)))
scenes = scenes[: total_processes * (len(scenes) // total_processes)]
else:
if len(scenes) % total_processes != 0:
print(
"Warning: oversampling some of the scenes to feed all processes."
" You can avoid this by setting a number of workers divisor of the number of scenes"
)
inds = self._partition_inds(len(scenes), total_processes)
return {
"scenes": scenes[inds[process_ind] : inds[process_ind + 1]],
"object_types": self.OBJECT_TYPES,
"env_args": self.ENV_ARGS,
"max_steps": self.MAX_STEPS,
"sensors": self.SENSORS,
"action_space": gym.spaces.Discrete(
len(ObjectNavTask.class_action_names())
),
"seed": seeds[process_ind] if seeds is not None else None,
"deterministic_cudnn": deterministic_cudnn,
}
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
self.TRAIN_SCENES,
process_ind,
total_processes,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_period"] = "manual"
res["env_args"] = {}
res["env_args"].update(self.ENV_ARGS)
res["env_args"]["x_display"] = (
("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None
)
return res
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
self.VALID_SCENES,
process_ind,
total_processes,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_period"] = self.VALID_SAMPLES_IN_SCENE
res["max_tasks"] = self.VALID_SAMPLES_IN_SCENE * len(res["scenes"])
res["env_args"] = {}
res["env_args"].update(self.ENV_ARGS)
res["env_args"]["x_display"] = (
("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None
)
return res
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
self.TEST_SCENES,
process_ind,
total_processes,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_period"] = self.TEST_SAMPLES_IN_SCENE
res["max_tasks"] = self.TEST_SAMPLES_IN_SCENE * len(res["scenes"])
res["env_args"] = {}
res["env_args"].update(self.ENV_ARGS)
res["env_args"]["x_display"] = (
("0.%d" % devices[process_ind % len(devices)]) if len(devices) > 0 else None
)
return res
| [
"torch.cuda.is_available",
"torch.cuda.device_count"
] | 1.4.0 | prithv1/allenact | f29dd6f0ec62425b02ca07fee815b1a82627a28e |
1.9 | from typing import List, Any
import pytorch_lightning as pl
import torch
from pytorch_lightning import Trainer
from torch.optim import Adam
from torchmetrics import MetricCollection
from new.data.report import Report
from new.model.lstm_tagger import LstmTagger
from new.training.data import ReportsDataModule
from new.training.metrics import Precision, Recall, TopkAccuracy
from commode_utils.losses import SequenceCrossEntropyLoss
class TrainingModule(pl.LightningModule):
def __init__(self, tagger: LstmTagger):
super().__init__()
self.tagger = tagger
self.train_metrics = MetricCollection([Precision(), Recall(), TopkAccuracy(1)])
self.val_metrics = MetricCollection([Precision(), Recall(), TopkAccuracy(1)])
self.softmax = torch.nn.Softmax(dim=-1)
self.celoss = SequenceCrossEntropyLoss(reduction="batch-mean", pad_idx=2)
def training_step(self, batch, batch_idx):
reports, target, masks = batch
mask = torch.cat(masks, dim=1)
if self.tagger.with_crf:
emissions = torch.cat([self.tagger.calc_emissions(report, mask) for report, mask in zip(reports, masks)],
dim=1)
loss = -self.tagger.crf(emissions, target, mask)
else:
scores = self.tagger.forward(reports, masks)
loss = self.celoss(scores, target)
with torch.no_grad():
scores = self.tagger.forward(reports, masks)
preds = scores.argmax(dim=-1)
scores = self.softmax(scores)
self.train_metrics.update(preds, target, mask, scores=scores)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, *args):
reports, target, masks = batch
mask = torch.cat(masks, dim=1)
if self.tagger.with_crf:
emissions = torch.cat([self.tagger.calc_emissions(report, mask) for report, mask in zip(reports, masks)],
dim=1)
loss = -self.tagger.crf(emissions, target, mask)
else:
scores = self.tagger.forward(reports, masks)
loss = self.celoss(scores, target)
with torch.no_grad():
scores = self.tagger.forward(reports, masks)
preds = scores.argmax(dim=-1)
scores = self.softmax(scores)
self.val_metrics.update(preds, target, mask, scores=scores)
return loss
def validation_epoch_end(self, outputs: List[Any]) -> None:
super().validation_epoch_end(outputs)
self.log("val_metrics", self.val_metrics.compute())
print(self.val_metrics.compute())
self.val_metrics.reset()
def training_epoch_end(self, outputs: List[Any]) -> None:
super().training_epoch_end(outputs)
self.log("train_metrics", self.train_metrics.compute())
self.train_metrics.reset()
def configure_optimizers(self):
return Adam(self.parameters(), lr=1e-4, weight_decay=1e-5)
def train_lstm_tagger(tagger: LstmTagger, reports: List[Report], target: List[List[int]], batch_size: int,
max_len: int) -> LstmTagger:
datamodule = ReportsDataModule(reports, target, batch_size, max_len)
model = TrainingModule(tagger)
trainer = Trainer(gpus=1)
trainer.fit(model, datamodule)
return tagger
| [
"torch.cat",
"torch.no_grad",
"torch.nn.Softmax"
] | 1.9.0 | lissrbay/bugml | 6a3823e32c176de9d3a47416a220e7d83d38763d |
1.1 | import torch
import torch.nn as nn
from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from .. import builder
from ..registry import DETECTORS
from .base import BaseDetector
from .test_mixins import BBoxTestMixin, MaskTestMixin, RPNTestMixin
@DETECTORS.register_module
class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
MaskTestMixin):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
keypoint_roi_extractor=None,###
keypoint_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if bbox_head is not None:
self.bbox_roi_extractor = builder.build_roi_extractor(
bbox_roi_extractor)
self.bbox_head = builder.build_head(bbox_head)
if mask_head is not None:
if mask_roi_extractor is not None:
self.mask_roi_extractor = builder.build_roi_extractor(
mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = builder.build_head(mask_head)
###
if keypoint_head is not None:
if keypoint_roi_extractor is not None:
self.keypoint_roi_extractor = builder.build_roi_extractor(
keypoint_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.keypoint_roi_extractor = self.bbox_roi_extractor
self.keypoint_head = builder.build_head(keypoint_head)
###
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(TwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_bbox:
self.bbox_roi_extractor.init_weights()
self.bbox_head.init_weights()
if self.with_mask:
self.mask_head.init_weights()
if not self.share_roi_extractor:
self.mask_roi_extractor.init_weights()
###
if self.with_keypoint:
self.keypoint_head.init_weights()
if not self.share_roi_extractor:
self.keypoint_roi_extractor.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmedetection/tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).cuda()
# bbox head
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
outs = outs + (cls_score, bbox_pred)
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
outs = outs + (mask_pred, )
return outs
### keypoint head
if self.with_keypoint:
keypoint_rois = rois[:100]
keypoint_feats = self.keypoint_roi_extractor(
x[:self.keypoint_roi_extractor.num_inputs], keypoint_rois)
if self.with_shared_head:
keypoint_feats = self.shared_head(keypoint_feats)
keypoint_pred = self.keypoint_head(keypoint_feats)
outs = outs + (keypoint_pred, )
return outs
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_keypoints=None,###
proposals=None):
"""
Args:
img (Tensor): of shape (B, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and my also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i],
gt_bboxes[i],
gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
if self.with_bbox:
rois = bbox2roi([res.bboxes for res in sampling_results])
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_targets = self.bbox_head.get_target(sampling_results,
gt_bboxes, gt_labels,
self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
*bbox_targets)
losses.update(loss_bbox)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_targets = self.mask_head.get_target(sampling_results,
gt_masks,
self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_pred, mask_targets,
pos_labels)
losses.update(loss_mask)
### keypoint head forward and loss
if self.with_keypoint:
if not self.share_roi_extractor:
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
keypoint_feats = self.keypoint_roi_extractor(
x[:self.keypoint_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
keypoint_feats = self.shared_head(keypoint_feats)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
keypoint_feats = bbox_feats[pos_inds]
keypoint_pred = self.keypoint_head(keypoint_feats)
keypoint_targets = self.keypoint_head.get_target(sampling_results,
gt_keypoints,
self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_keypoint = self.keypoint_head.loss(keypoint_pred, keypoint_targets,
pos_labels)
losses.update(loss_keypoint)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
elif self.with_mask:
segm_results = self.simple_test_mask(
x, img_meta, det_bboxes, det_labels, rescale=rescale)
return bbox_results, segm_results
###
elif self.with_keypoint:
kp_results = self.simple_test_keypoint(
x, img_meta, det_bboxes, det_labels, rescale=rescale)
return bbox_results, kp_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
proposal_list = self.aug_test_rpn(
self.extract_feats(imgs), img_metas, self.test_cfg.rpn)
det_bboxes, det_labels = self.aug_test_bboxes(
self.extract_feats(imgs), img_metas, proposal_list,
self.test_cfg.rcnn)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(
self.extract_feats(imgs), img_metas, det_bboxes, det_labels)
return bbox_results, segm_results
else:
return bbox_results
| [
"torch.zeros",
"torch.cat",
"torch.randn",
"torch.ones"
] | 1.1 | nemonameless/mmkp | 3758c48c7a6568e04d5a01b79f12baf12abcc420 |
1.6 | # Code modified from https://github.com/kuangliu/pytorch-cifar
'''ResNet in PyTorch.
BasicBlock and Bottleneck module is from the original ResNet paper:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
PreActBlock and PreActBottleneck module is from the later paper:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, nc=3):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(nc, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, lin=0, lout=5):
out = x
if lin < 1 and lout > -1:
out = self.conv1(out)
out = self.bn1(out)
out = F.relu(out)
if lin < 2 and lout > 0:
out = self.layer1(out)
if lin < 3 and lout > 1:
out = self.layer2(out)
if lin < 4 and lout > 2:
out = self.layer3(out)
if lin < 5 and lout > 3:
out = self.layer4(out)
if lout > 4:
# out = F.avg_pool2d(out, 4)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=10, nc=3):
return ResNet(PreActBlock, [2, 2, 2, 2], num_classes=num_classes, nc=nc)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)
def test():
net = ResNet18()
y = net(Variable(torch.randn(1, 3, 32, 32)))
print(y.size())
def resnet():
return ResNet18()
| [
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.randn"
] | 1.6.0 | dmitryshendryk/tantum | afd07e7a52d65338297a4f46d26e5241d3e756dc |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
RobertaConfig,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from transformers.models.roberta.modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaEmbeddings,
create_position_ids_from_input_ids,
)
class RobertaModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = RobertaModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = RobertaForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RobertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RobertaForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RobertaModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaModel,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (RobertaForCausalLM,) if is_torch_available() else ()
def setUp(self):
self.model_tester = RobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.Tensor(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
@require_torch
class RobertaModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = RobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_no_head(self):
model = RobertaModel.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.extract_features(input_ids)[:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_classification_head(self):
model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.large.mnli')
# roberta.eval()
# expected_tensor = roberta.predict("mnli", input_ids, return_logits=True).detach()
self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
| [
"torch.Size",
"torch.eq",
"torch.tensor",
"torch.as_tensor",
"torch.allclose",
"torch.Tensor"
] | 1.0 | HatsuneMiku4/transformers | ee5a5b6be825a9e3ac539485e7dea7601ad57653 |
0.4 | """
A ``TokenEmbedder`` which uses one of the BERT models
(https://github.com/google-research/bert)
to produce embeddings.
At its core it uses Hugging Face's PyTorch implementation
(https://github.com/huggingface/pytorch-pretrained-BERT),
so thanks to them!
"""
from typing import Dict
import logging
import torch
import torch.nn.functional as F
from pytorch_pretrained_bert.modeling import BertModel
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn import util
logger = logging.getLogger(__name__)
class PretrainedBertModel:
"""
In some instances you may want to load the same BERT model twice
(e.g. to use as a token embedder and also as a pooling layer).
This factory provides a cache so that you don't actually have to load the model twice.
"""
_cache: Dict[str, BertModel] = {}
@classmethod
def load(cls, model_name: str, cache_model: bool = True) -> BertModel:
if model_name in cls._cache:
return PretrainedBertModel._cache[model_name]
model = BertModel.from_pretrained(model_name)
if cache_model:
cls._cache[model_name] = model
return model
class BertEmbedder(TokenEmbedder):
"""
A ``TokenEmbedder`` that produces BERT embeddings for your tokens.
Should be paired with a ``BertIndexer``, which produces wordpiece ids.
Most likely you probably want to use ``PretrainedBertEmbedder``
for one of the named pretrained models, not this base class.
Parameters
----------
bert_model: ``BertModel``
The BERT model being wrapped.
top_layer_only: ``bool``, optional (default = ``False``)
If ``True``, then only return the top layer instead of apply the scalar mix.
max_pieces : int, optional (default: 512)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Assuming the inputs are windowed
and padded appropriately by this length, the embedder will split them into a
large batch, feed them into BERT, and recombine the output as if it was a
longer sequence.
num_start_tokens : int, optional (default: 1)
The number of starting special tokens input to BERT (usually 1, i.e., [CLS])
num_end_tokens : int, optional (default: 1)
The number of ending tokens input to BERT (usually 1, i.e., [SEP])
"""
def __init__(self,
bert_model: BertModel,
top_layer_only: bool = False,
max_pieces: int = 512,
num_start_tokens: int = 1,
num_end_tokens: int = 1) -> None:
super().__init__()
self.bert_model = bert_model
self.output_dim = bert_model.config.hidden_size
self.max_pieces = max_pieces
self.num_start_tokens = num_start_tokens
self.num_end_tokens = num_end_tokens
if not top_layer_only:
self._scalar_mix = ScalarMix(bert_model.config.num_hidden_layers,
do_layer_norm=False)
else:
self._scalar_mix = None
def get_output_dim(self) -> int:
return self.output_dim
def forward(self,
input_ids: torch.LongTensor,
offsets: torch.LongTensor = None,
token_type_ids: torch.LongTensor = None) -> torch.Tensor:
"""
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
token_type_ids : ``torch.LongTensor``, optional
If an input consists of two sentences (as in the BERT paper),
tokens from the first sentence should have type 0 and tokens from
the second sentence should have type 1. If you don't provide this
(the default BertIndexer doesn't) then it's assumed to be all 0s.
"""
# pylint: disable=arguments-differ
batch_size, full_seq_len = input_ids.size(0), input_ids.size(-1)
initial_dims = list(input_ids.shape[:-1])
# The embedder may receive an input tensor that has a sequence length longer than can
# be fit. In that case, we should expect the wordpiece indexer to create padded windows
# of length `self.max_pieces` for us, and have them concatenated into one long sequence.
# E.g., "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ..."
# We can then split the sequence into sub-sequences of that length, and concatenate them
# along the batch dimension so we effectively have one huge batch of partial sentences.
# This can then be fed into BERT without any sentence length issues. Keep in mind
# that the memory consumption can dramatically increase for large batches with extremely
# long sentences.
needs_split = full_seq_len > self.max_pieces
last_window_size = 0
if needs_split:
# Split the flattened list by the window size, `max_pieces`
split_input_ids = list(input_ids.split(self.max_pieces, dim=-1))
# We want all sequences to be the same length, so pad the last sequence
last_window_size = split_input_ids[-1].size(-1)
padding_amount = self.max_pieces - last_window_size
split_input_ids[-1] = F.pad(split_input_ids[-1], pad=[0, padding_amount], value=0)
# Now combine the sequences along the batch dimension
input_ids = torch.cat(split_input_ids, dim=0)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
input_mask = (input_ids != 0).long()
# input_ids may have extra dimensions, so we reshape down to 2-d
# before calling the BERT model and then reshape back at the end.
all_encoder_layers, _ = self.bert_model(input_ids=util.combine_initial_dims(input_ids),
token_type_ids=util.combine_initial_dims(token_type_ids),
attention_mask=util.combine_initial_dims(input_mask))
all_encoder_layers = torch.stack(all_encoder_layers)
if needs_split:
# First, unpack the output embeddings into one long sequence again
unpacked_embeddings = torch.split(all_encoder_layers, batch_size, dim=1)
unpacked_embeddings = torch.cat(unpacked_embeddings, dim=2)
# Next, select indices of the sequence such that it will result in embeddings representing the original
# sentence. To capture maximal context, the indices will be the middle part of each embedded window
# sub-sequence (plus any leftover start and final edge windows), e.g.,
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# "[CLS] I went to the very fine [SEP] [CLS] the very fine store to eat [SEP]"
# with max_pieces = 8 should produce max context indices [2, 3, 4, 10, 11, 12] with additional start
# and final windows with indices [0, 1] and [14, 15] respectively.
# Find the stride as half the max pieces, ignoring the special start and end tokens
# Calculate an offset to extract the centermost embeddings of each window
stride = (self.max_pieces - self.num_start_tokens - self.num_end_tokens) // 2
stride_offset = stride // 2 + self.num_start_tokens
first_window = list(range(stride_offset))
max_context_windows = [i for i in range(full_seq_len)
if stride_offset - 1 < i % self.max_pieces < stride_offset + stride]
# Lookback what's left, unless it's the whole self.max_pieces window
if full_seq_len % self.max_pieces == 0:
lookback = self.max_pieces
else:
lookback = full_seq_len % self.max_pieces
final_window_start = full_seq_len - lookback + stride_offset + stride
final_window = list(range(final_window_start, full_seq_len))
select_indices = first_window + max_context_windows + final_window
initial_dims.append(len(select_indices))
recombined_embeddings = unpacked_embeddings[:, :, select_indices]
else:
recombined_embeddings = all_encoder_layers
# Recombine the outputs of all layers
# (layers, batch_size * d1 * ... * dn, sequence_length, embedding_dim)
# recombined = torch.cat(combined, dim=2)
input_mask = (recombined_embeddings != 0).long()
if self._scalar_mix is not None:
mix = self._scalar_mix(recombined_embeddings, input_mask)
else:
mix = recombined_embeddings[-1]
# At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim)
if offsets is None:
# Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim)
dims = initial_dims if needs_split else input_ids.size()
return util.uncombine_initial_dims(mix, dims)
else:
# offsets is (batch_size, d1, ..., dn, orig_sequence_length)
offsets2d = util.combine_initial_dims(offsets)
# now offsets is (batch_size * d1 * ... * dn, orig_sequence_length)
range_vector = util.get_range_vector(offsets2d.size(0),
device=util.get_device_of(mix)).unsqueeze(1)
# selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length)
selected_embeddings = mix[range_vector, offsets2d]
return util.uncombine_initial_dims(selected_embeddings, offsets.size())
@TokenEmbedder.register("bert-pretrained")
class PretrainedBertEmbedder(BertEmbedder):
# pylint: disable=line-too-long
"""
Parameters
----------
pretrained_model: ``str``
Either the name of the pretrained model to use (e.g. 'bert-base-uncased'),
or the path to the .tar.gz file with the model weights.
If the name is a key in the list of pretrained models at
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py#L41
the corresponding path will be used; otherwise it will be interpreted as a path or URL.
requires_grad : ``bool``, optional (default = False)
If True, compute gradient of BERT parameters for fine tuning.
top_layer_only: ``bool``, optional (default = ``False``)
If ``True``, then only return the top layer instead of apply the scalar mix.
"""
def __init__(self, pretrained_model: str, requires_grad: bool = False, top_layer_only: bool = False) -> None:
model = PretrainedBertModel.load(pretrained_model)
for param in model.parameters():
param.requires_grad = requires_grad
super().__init__(bert_model=model, top_layer_only=top_layer_only)
| [
"torch.cat",
"torch.stack",
"torch.split",
"torch.zeros_like",
"torch.nn.functional.pad"
] | 0.4.1 | schmmd/allennlp | fbc28cefe03b1ea3ff65300d475d34f5f9629a5c |
0.4 | # pylint: disable=no-self-use,invalid-name
import torch
from pytorch_pretrained_bert.modeling import BertConfig, BertModel
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers.wordpiece_indexer import PretrainedBertIndexer
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.tokenizers.word_splitter import BertBasicWordSplitter
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders.bert_token_embedder import BertEmbedder
class TestBertEmbedder(ModelTestCase):
def setUp(self):
super().setUp()
vocab_path = self.FIXTURES_ROOT / 'bert' / 'vocab.txt'
self.token_indexer = PretrainedBertIndexer(str(vocab_path))
config_path = self.FIXTURES_ROOT / 'bert' / 'config.json'
config = BertConfig(str(config_path))
self.bert_model = BertModel(config)
self.token_embedder = BertEmbedder(self.bert_model)
def test_without_offsets(self):
input_ids = torch.LongTensor([[3, 5, 9, 1, 2], [1, 5, 0, 0, 0]])
result = self.token_embedder(input_ids)
assert list(result.shape) == [2, 5, 12]
def test_with_offsets(self):
input_ids = torch.LongTensor([[3, 5, 9, 1, 2], [1, 5, 0, 0, 0]])
offsets = torch.LongTensor([[0, 2, 4], [1, 0, 0]])
result = self.token_embedder(input_ids, offsets=offsets)
assert list(result.shape) == [2, 3, 12]
def test_end_to_end(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
# 2 3 4 3 5 6 8 9 2 14 12
sentence1 = "the quickest quick brown fox jumped over the lazy dog"
tokens1 = tokenizer.tokenize(sentence1)
# 2 3 5 6 8 9 2 15 10 11 14 1
sentence2 = "the quick brown fox jumped over the laziest lazy elmo"
tokens2 = tokenizer.tokenize(sentence2)
vocab = Vocabulary()
instance1 = Instance({"tokens": TextField(tokens1, {"bert": self.token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": self.token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
# 16 = [CLS], 17 = [SEP]
assert tokens["bert"].tolist() == [[16, 2, 3, 4, 3, 5, 6, 8, 9, 2, 14, 12, 17, 0],
[16, 2, 3, 5, 6, 8, 9, 2, 15, 10, 11, 14, 1, 17]]
assert tokens["bert-offsets"].tolist() == [[1, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[1, 2, 3, 4, 5, 6, 7, 10, 11, 12]]
# No offsets, should get 14 vectors back ([CLS] + 12 token wordpieces + [SEP])
bert_vectors = self.token_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 14, 12]
# Offsets, should get 10 vectors back.
bert_vectors = self.token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 10, 12]
# Now try top_layer_only = True
tlo_embedder = BertEmbedder(self.bert_model, top_layer_only=True)
bert_vectors = tlo_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 14, 12]
bert_vectors = tlo_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 10, 12]
def test_padding_for_equal_length_indices(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
# 2 3 5 6 8 9 2 14 12
sentence = "the quick brown fox jumped over the lazy dog"
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
instance = Instance({"tokens": TextField(tokens, {"bert": self.token_indexer})})
batch = Batch([instance])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"].tolist() == [[16, 2, 3, 5, 6, 8, 9, 2, 14, 12, 17]]
assert tokens["bert-offsets"].tolist() == [[1, 2, 3, 4, 5, 6, 7, 8, 9]]
def test_squad_with_unwordpieceable_passage(self):
# pylint: disable=line-too-long
tokenizer = WordTokenizer()
token_indexer = PretrainedBertIndexer("bert-base-uncased")
passage1 = ("There were four major HDTV systems tested by SMPTE in the late 1970s, "
"and in 1979 an SMPTE study group released A Study of High Definition Television Systems:")
question1 = "Who released A Study of High Definition Television Systems?"
passage2 = ("Broca, being what today would be called a neurosurgeon, "
"had taken an interest in the pathology of speech. He wanted "
"to localize the difference between man and the other animals, "
"which appeared to reside in speech. He discovered the speech "
"center of the human brain, today called Broca's area after him. "
"His interest was mainly in Biological anthropology, but a German "
"philosopher specializing in psychology, Theodor Waitz, took up the "
"theme of general and social anthropology in his six-volume work, "
"entitled Die Anthropologie der Naturvölker, 1859–1864. The title was "
"""soon translated as "The Anthropology of Primitive Peoples". """
"The last two volumes were published posthumously.")
question2 = "What did Broca discover in the human brain?"
from allennlp.data.dataset_readers.reading_comprehension.util import make_reading_comprehension_instance
instance1 = make_reading_comprehension_instance(tokenizer.tokenize(question1),
tokenizer.tokenize(passage1),
{"bert": token_indexer},
passage1)
instance2 = make_reading_comprehension_instance(tokenizer.tokenize(question2),
tokenizer.tokenize(passage2),
{"bert": token_indexer},
passage2)
vocab = Vocabulary()
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
qtokens = tensor_dict["question"]
ptokens = tensor_dict["passage"]
config = BertConfig(len(token_indexer.vocab))
model = BertModel(config)
embedder = BertEmbedder(model)
_ = embedder(ptokens["bert"], offsets=ptokens["bert-offsets"])
_ = embedder(qtokens["bert"], offsets=qtokens["bert-offsets"])
def test_max_length(self):
config = BertConfig(len(self.token_indexer.vocab))
model = BertModel(config)
embedder = BertEmbedder(model)
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
sentence = "the " * 1000
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
instance = Instance({"tokens": TextField(tokens, {"bert": self.token_indexer})})
batch = Batch([instance])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
embedder(tokens["bert"], tokens["bert-offsets"])
def test_end_to_end_with_higher_order_inputs(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
# 2 3 4 3 5 6 8 9 2 14 12
sentence1 = "the quickest quick brown fox jumped over the lazy dog"
tokens1 = tokenizer.tokenize(sentence1)
text_field1 = TextField(tokens1, {"bert": self.token_indexer})
# 2 3 5 6 8 9 2 15 10 11 14 1
sentence2 = "the quick brown fox jumped over the laziest lazy elmo"
tokens2 = tokenizer.tokenize(sentence2)
text_field2 = TextField(tokens2, {"bert": self.token_indexer})
# 2 5 15 10 11 6
sentence3 = "the brown laziest fox"
tokens3 = tokenizer.tokenize(sentence3)
text_field3 = TextField(tokens3, {"bert": self.token_indexer})
vocab = Vocabulary()
instance1 = Instance({"tokens": ListField([text_field1])})
instance2 = Instance({"tokens": ListField([text_field2, text_field3])})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths, verbose=True)
tokens = tensor_dict["tokens"]
# No offsets, should get 14 vectors back ([CLS] + 12 wordpieces + [SEP])
bert_vectors = self.token_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 2, 14, 12]
# Offsets, should get 10 vectors back.
bert_vectors = self.token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 2, 10, 12]
# Now try top_layer_only = True
tlo_embedder = BertEmbedder(self.bert_model, top_layer_only=True)
bert_vectors = tlo_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 2, 14, 12]
bert_vectors = tlo_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 2, 10, 12]
def test_sliding_window(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
sentence = "the quickest quick brown fox jumped over the lazy dog"
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
vocab_path = self.FIXTURES_ROOT / 'bert' / 'vocab.txt'
token_indexer = PretrainedBertIndexer(str(vocab_path), truncate_long_sequences=False, max_pieces=8)
config_path = self.FIXTURES_ROOT / 'bert' / 'config.json'
config = BertConfig(str(config_path))
bert_model = BertModel(config)
token_embedder = BertEmbedder(bert_model, max_pieces=8)
instance = Instance({"tokens": TextField(tokens, {"bert": token_indexer})})
batch = Batch([instance])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
# 16 = [CLS], 17 = [SEP]
# 1 full window + 1 half window with start/end tokens
assert tokens["bert"].tolist() == [[16, 2, 3, 4, 3, 5, 6, 17,
16, 3, 5, 6, 8, 9, 2, 17,
16, 8, 9, 2, 14, 12, 17]]
assert tokens["bert-offsets"].tolist() == [[1, 3, 4, 5, 6, 7, 8, 9, 10, 11]]
bert_vectors = token_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [1, 13, 12]
bert_vectors = token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [1, 10, 12]
def test_sliding_window_with_batch(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
sentence = "the quickest quick brown fox jumped over the lazy dog"
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
vocab_path = self.FIXTURES_ROOT / 'bert' / 'vocab.txt'
token_indexer = PretrainedBertIndexer(str(vocab_path), truncate_long_sequences=False, max_pieces=8)
config_path = self.FIXTURES_ROOT / 'bert' / 'config.json'
config = BertConfig(str(config_path))
bert_model = BertModel(config)
token_embedder = BertEmbedder(bert_model, max_pieces=8)
instance = Instance({"tokens": TextField(tokens, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens + tokens + tokens, {"bert": token_indexer})})
batch = Batch([instance, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
bert_vectors = token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert bert_vectors is not None
| [
"torch.LongTensor"
] | 0.4.1 | schmmd/allennlp | fbc28cefe03b1ea3ff65300d475d34f5f9629a5c |
1.0 | # Pytorch imports
import torch
# Cyphercat imports
from .train import train, train_attacker
from .metrics import eval_membership_inference, eval_attack_model
# Device to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def ml_leaks1(target=None, shadow_model=None, attacker_model=None,
target_in_loader=None, target_out_loader=None,
shadow_train_loader=None, shadow_out_loader=None,
shadow_optim=None, attack_optim=None, shadow_criterion=None,
attack_criterion=None, shadow_epochs=0, attack_epochs=0,
classes=None, n_max_posteriors=3, retrain=True, verbose=False):
'''Implementation of ml_leaks 1 membership inference attack
Trains shadow network an independent data set and then trains the
attacker to infer membership on this shadow net. Finally, the attacker is
used to run mberbeship inference on the target.
Args:
target (nn.Module): Trained target network.
shadow_model (nn.Module): Shadow network to help train the attacker in
membership inference task.
attacker_model (nn.Module): Network to be trained in membership
inference task.
target_in_loader (DataLoader): DataLoader pointing to target in-data
used for testing the attack (split[4])
target_out_loader (DataLoader): Loads data pointing to target out-of-
training dataset (split[1]) used for attack evaluation.
shadow_train_loader (DataLoader): Loader for shadow_model training
(split[2]).
shadow_out_loader: Out-of-sample from shadow net, used to train the
attacker (split[3]).
shadow_optim (torch.optim): Optimizer for shadow_model training.
attack_optim (torch.optim): Optimizer for attacker_model training.
shadow_criterion (torch.nn): Loss function for shadow_model training.
attack_criterion (torch.nn): Loss function for attacker_model
training.
shadow_epochs (int): Number of epochs used to train the shadow network.
attack_epochs (int): Number of epochs used to train the attack network.
classes (list): Classes for membership inference task.
n_max_posteriors (int): Number of maximal posteriors to use in
membership inference attack.
retrain (bool): If True will retrain the shadow and attack network,
otherwise will simply use the provided attacker model as is fed.
verbose (bool): If True will print the loss at each batch during all
training steps.
Example:
To-do:
Add example to docstring.
'''
if retrain:
print('---- Training shadow network ----')
train(model=shadow_model, data_loader=shadow_train_loader,
test_loader=shadow_out_loader, optimizer=shadow_optim,
criterion=shadow_criterion, n_epochs=shadow_epochs,
classes=classes, verbose=verbose)
#
print('---- Training attack network ----')
train_attacker(attack_model=attacker_model, shadow_model=shadow_model,
shadow_train=shadow_train_loader,
shadow_out=shadow_out_loader, optimizer=attack_optim,
criterion=attack_criterion, n_epochs=attack_epochs,
k=n_max_posteriors)
#
print('---- Evaluate attack ----')
df_pr = eval_attack_model(attack_model=attacker_model, target=target,
target_train=target_in_loader,
target_out=target_out_loader, k=n_max_posteriors)
return df_pr
def ml_leaks3(target=None, target_in_loader=None, target_out_loader=None):
''' Implementation of ml_leaks 3 membership inference attack
Args:
target (nn.Module): Trained target network to attack
target_in_loader (DataLoader): Loader pointing to data used to
train target (split[4]). Used here to evaluate attack
performance.
target_out_loader: Loader pointing to the target out-of-training data
(split[1])
Example:
To-do:
Add example to docstring.
'''
eval_membership_inference(target_model=target,
target_train=target_in_loader,
target_out=target_out_loader)
def mi_gradient_ascent(input_sample=None, target_model=None, optimizer=None,
category=None, iterations=0, verbose=False):
""" Implementation of gradient based model inversion attack
Args:
input_sample (torch.tensor): Initialized input sample, usually
randomly generated. Size should match the model input.
target_model (nn.Module): Pretrained model to attack.
optimizer (nn.optim): Optimizer (initialized on image parameters) used
in attack.
category (int): Category to invert.
iterations (int): Query iterations in the attack.
verbose (bool): If True will print the loss at each step in attack.
Returns:
(list(float)): Returns a list of the losses at each iteration.
Example:
Todos:
Write example
"""
category = torch.Variable(torch.LongTensor([category])).to(device)
losses = []
for i_step in range(iterations):
target_model.zero_grad()
out = target_model(input_sample)
loss = -out.take(category)
loss.backward()
#
optimizer.step()
input_sample.grad.zero_()
losses.append(loss.data)
#
return losses
| [
"torch.cuda.is_available",
"torch.LongTensor"
] | 1.0.0 | arafin-lab/model_inversion_experiments | 4029ae8683b9056013e6424d8931afe79afa618e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.