python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import transforms
from utils import *
class MiniImageNet(torch.utils.data.Dataset):
def __init__(self, root, train):
super(MiniImageNet, self).__init__()
if train:
self.name='train'
else:
self.name='test'
root = os.path.join(root, 'miniimagenet')
with open(os.path.join(root,'{}.pkl'.format(self.name)), 'rb') as f:
data_dict = pickle.load(f)
self.data = data_dict['images']
self.labels = data_dict['labels']
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img, label = self.data[i], self.labels[i]
return img, label
class iMiniImageNet(MiniImageNet):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None):
super(iMiniImageNet, self).__init__(root=root, train=train)
self.transform = transform
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
labels = []
tt = [] # task module labels
td = [] # disctiminator labels
for i in range(len(self.data)):
if self.labels[i] in classes:
data.append(self.data[i])
labels.append(self.class_mapping[self.labels[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.labels[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
labels.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = np.array(data)
self.labels = labels
self.tt = tt
self.td = td
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if not torch.is_tensor(img):
img = Image.fromarray(img)
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.use_memory = args.use_memory
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,84,84]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.transformation = transforms.Compose([
transforms.Resize((84,84)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id],
memory_classes=memory_classes, memory=memory,
task_num=task_id, train=True, transform=self.transformation)
self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id,self.task_ids[task_id])
self.dataloaders[task_id]['tsne'] = torch.utils.data.DataLoader(self.test_set[task_id],
batch_size=len(test_loader.dataset),
num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
print ("Task ID: ", task_id)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
for i in range(len(self.task_ids[task_id])):
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class] # randomly sample some data
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x']))) | Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os.path
import sys
import warnings
import urllib.request
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from .utils import *
# from scipy.imageio import imread
import pandas as pd
import os
import torch
from PIL import Image
import scipy.io as sio
from collections import defaultdict
from itertools import chain
from collections import OrderedDict
class CIFAR10_(datasets.CIFAR10):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
num_classes = 10
def __init__(self, root, task_num, num_samples_per_class, train, transform, target_transform, download=True):
# root, task_num, train, transform = None, download = False):
super(CIFAR10_, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
if not num_samples_per_class:
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
else:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
self._load_meta()
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
# if self.train:
return len(self.data)
# else:
# return len(self.test_data)
def report_size(self):
print("CIFAR10 size at train={} time: {} ".format(self.train,self.__len__()))
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
class CIFAR100_(CIFAR10_):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
num_classes = 100
class SVHN_(torch.utils.data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=True):
self.root = os.path.expanduser(root)
# root, task_num, train, transform = None, download = False):
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if self.train:
split="train"
else:
split="test"
self.num_classes = 10
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.targets = loaded_mat['y'].astype(np.int64).squeeze()
self.data = np.transpose(self.data, (3, 2, 0, 1))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes+1):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = np.array(sum(y,[])).astype(np.int64)
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.targets, self.targets == 10, 0)
# print ("svhn: ", self.data.shape)
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class MNIST_RGB(datasets.MNIST):
def __init__(self, root, task_num, num_samples_per_class, train=True, transform=None, target_transform=None, download=False):
super(MNIST_RGB, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
self.train = train # training set or test set
self.target_transform=target_transform
self.transform=transform
self.num_classes=10
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
# self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img, mode='L').convert('RGB')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_(MNIST_RGB):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class notMNIST_(torch.utils.data.Dataset):
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform=target_transform
self.train = train
self.url = "https://github.com/facebookresearch/Adversarial-Continual-Learning/raw/master/data/notMNIST.zip"
self.filename = 'notMNIST.zip'
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
download_url(self.url, root, filename=self.filename)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
if self.train:
fpath = os.path.join(root, 'notMNIST', 'Train')
else:
fpath = os.path.join(root, 'notMNIST', 'Test')
X, Y = [], []
folders = os.listdir(fpath)
for folder in folders:
folder_path = os.path.join(fpath, folder)
for ims in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, ims)
X.append(np.array(Image.open(img_path).convert('RGB')))
Y.append(ord(folder) - 65) # Folders are A-J so labels will be 0-9
except:
print("File {}/{} is broken".format(folder, ims))
self.data = np.array(X)
self.targets = Y
self.num_classes = len(set(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.labels = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
img = Image.fromarray(img)#.convert('RGB')
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
def download(self):
"""Download the notMNIST data if it doesn't exist in processed_folder already."""
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
| Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/datasets_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
if args.experiment == 'cifar100':
hiddens = [64, 128, 256, 1024, 1024, 512]
elif args.experiment == 'miniimagenet':
hiddens = [64, 128, 256, 512, 512, 512]
# ----------------------------------
elif args.experiment == 'multidatasets':
hiddens = [64, 128, 256, 1024, 1024, 512]
else:
raise NotImplementedError
self.conv1=torch.nn.Conv2d(self.ncha,hiddens[0],kernel_size=size//8)
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2=torch.nn.Conv2d(hiddens[0],hiddens[1],kernel_size=size//10)
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3=torch.nn.Conv2d(hiddens[1],hiddens[2],kernel_size=2)
s=utils.compute_conv_output_size(s,2)
s=s//2
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(hiddens[2]*s*s,hiddens[3])
self.fc2=torch.nn.Linear(hiddens[3],hiddens[4])
self.fc3=torch.nn.Linear(hiddens[4],hiddens[5])
self.fc4=torch.nn.Linear(hiddens[5], self.latent_dim)
def forward(self, x_s):
x_s = x_s.view_as(x_s)
h = self.maxpool(self.drop1(self.relu(self.conv1(x_s))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x_s.size(0), -1)
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop2(self.relu(self.fc2(h)))
h = self.drop2(self.relu(self.fc3(h)))
h = self.drop2(self.relu(self.fc4(h)))
return h
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'cifar100':
hiddens=[32,32]
flatten=1152
elif args.experiment == 'miniimagenet':
# hiddens=[8,8]
# flatten=1800
hiddens=[16,16]
flatten=3600
elif args.experiment == 'multidatasets':
hiddens=[32,32]
flatten=1152
else:
raise NotImplementedError
self.task_out = torch.nn.Sequential()
self.task_out.add_module('conv1', torch.nn.Conv2d(self.ncha, hiddens[0], kernel_size=self.size // 8))
self.task_out.add_module('relu1', torch.nn.ReLU(inplace=True))
self.task_out.add_module('drop1', torch.nn.Dropout(0.2))
self.task_out.add_module('maxpool1', torch.nn.MaxPool2d(2))
self.task_out.add_module('conv2', torch.nn.Conv2d(hiddens[0], hiddens[1], kernel_size=self.size // 10))
self.task_out.add_module('relu2', torch.nn.ReLU(inplace=True))
self.task_out.add_module('dropout2', torch.nn.Dropout(0.5))
self.task_out.add_module('maxpool2', torch.nn.MaxPool2d(2))
self.linear = torch.nn.Sequential()
self.linear.add_module('linear1', torch.nn.Linear(flatten, self.latent_dim))
self.linear.add_module('relu3', torch.nn.ReLU(inplace=True))
def forward(self, x):
x = x.view_as(x)
out = self.task_out(x)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# def forward(self, x, task_id):
# x = x.view_as(x)
# out = self.task_out[2*task_id].forward(x)
# out = out.view(out.size(0),-1)
# out = self.task_out[2*task_id+1].forward(out)
# return out
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.ntasks = args.ntasks
self.samples = args.samples
self.image_size = self.ncha*size*size
self.args=args
self.hidden1 = args.head_units
self.hidden2 = args.head_units//2
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[0][1])
)
def forward(self, x_s, x_p, tt=None):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
# x_s = self.shared(x_s)
# x_p = self.private(x_p)
#
# x = torch.cat([x_p, x_s], dim=1)
# if self.args.experiment == 'multidatasets':
# # if no memory is used this is faster:
# y=[]
# for i,_ in self.taskcla:
# y.append(self.head[i](x))
# return y[task_id]
# else:
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
# if torch.is_tensor(tt):
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
# else:
# return self.head(x)
output = {}
output['shared'] = self.shared(x_s)
output['private'] = self.private(x_p)
concat_features = torch.cat([output['private'], output['shared']], dim=1)
if torch.is_tensor(tt):
output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(
concat_features.size(0))])
else:
output['out'] = self.head(concat_features)
return output
# def get_encoded_ftrs(self, x_s, x_p, task_id=None):
# return self.shared(x_s), self.private(x_p)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print("Size of the network for one task including (S+P+p)")
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s ' % (self.pretty_print(count_P)))
print('Num parameters in p = %s ' % (self.pretty_print(count_H)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P + count_H))
print('--------------------------> Architecture size in total for all tasks: %s parameters (%sB)' % (
self.pretty_print(count_S + self.ntasks*count_P + self.ntasks*count_H),
self.pretty_print(4 * (count_S + self.ntasks*count_P + self.ntasks*count_H))))
classes_per_task = self.taskcla[0][1]
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples*classes_per_task,
self.pretty_print(
self.ntasks * 4 * self.samples * classes_per_task* self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(
4 * (count_S + self.ntasks *count_P + self.ntasks *count_H) + self.ntasks * 4 * self.samples * classes_per_task * self.image_size))
def pretty_print(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | ACL-resnet/src/networks/alexnet_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
ncha,size,_ = args.inputsize
self.pretrained = False
if args.experiment == 'cifar100':
hiddens = [64, 128, 256]
elif args.experiment == 'miniimagenet':
hiddens = [1024, 512, 256]
else:
raise NotImplementedError
# Small resnet
resnet = resnet18_small(self.latent_dim, shared=True)
self.features = torch.nn.Sequential(*list(resnet.children())[:-2])
if args.experiment == 'miniimagenet':
# num_ftrs = 4608
num_ftrs = 2304 # without average pool (-2)
elif args.experiment == 'cifar100':
# num_ftrs = 25088 # without average pool
num_ftrs = 256
else:
raise NotImplementedError
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(num_ftrs,hiddens[0])
self.fc2=torch.nn.Linear(hiddens[0],hiddens[1])
self.fc3=torch.nn.Linear(hiddens[1],hiddens[1])
self.fc4=torch.nn.Linear(hiddens[1], self.latent_dim)
def forward(self, x):
x = x.view_as(x)
x = self.features(x)
x = torch.flatten(x, 1)
x = self.drop2(self.relu(self.fc1(x)))
x = self.drop2(self.relu(self.fc2(x)))
x = self.drop2(self.relu(self.fc3(x)))
x = self.drop2(self.relu(self.fc4(x)))
return x
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.image_size = ncha * size * size
self.taskcla = args.taskcla
self.latent_dim = args.latent_dim
self.ntasks = args.ntasks
self.samples = args.samples
self.image_size = ncha * size * size
self.use_memory = args.use_memory
self.hidden1 = args.head_units
self.hidden2 = args.head_units
self.shared = Shared(args)
self.private = resnet18_small(self.latent_dim, shared=False)
self.head = torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[0][1])
)
def forward(self, x_s, x_p, tt=None):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
# x_s = self.shared(x_s)
# x_p = self.private(x_p)
# x = torch.cat([x_p, x_s], dim=1)
# if self.args.experiment == 'multidatasets':
# # if no memory is used this is faster:
# y=[]
# for i,_ in self.taskcla:
# y.append(self.head[i](x))
# return y[task_id]
# else:
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
output = {}
output['shared'] = self.shared(x_s)
output['private'] = self.private(x_p)
concat_features = torch.cat([output['private'], output['shared']], dim=1)
if torch.is_tensor(tt):
output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(
concat_features.size(0))])
else:
output['out'] = self.head(concat_features)
return output
# output['shared'] = self.shared(x_s)
# output['private'] = self.private(x_p)
#
# concat_features = torch.cat([output['private'], output['shared']], dim=1)
#
# if torch.is_tensor(tt):
#
# output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(concat_features.size(0))])
# else:
# if self.use_memory == 'no':
# output['out'] = self.head.forward(concat_features)
#
# elif self.use_memory == 'yes':
# y = []
# for i, _ in self.taskcla:
# y.append(self.head[i](concat_features))
# output['out'] = y[task_id]
#
# return output
# def get_encoded_ftrs(self, x_s, x_p, task_id=None):
# return self.shared(x_s), self.private(x_p)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print("Size of the network for one task including (S+P+p)")
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s ' % (self.pretty_print(count_P)))
print('Num parameters in p = %s ' % (self.pretty_print(count_H)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P + count_H))
print('--------------------------> Architecture size in total for all tasks: %s parameters (%sB)' % (
self.pretty_print(count_S + self.ntasks*count_P + self.ntasks*count_H),
self.pretty_print(4 * (count_S + self.ntasks*count_P + self.ntasks*count_H))))
classes_per_task = self.taskcla[0][1]
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples*classes_per_task,
self.pretty_print(
self.ntasks * 4 * self.samples * classes_per_task* self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(
4 * (count_S + self.ntasks *count_P + self.ntasks *count_H) + self.ntasks * 4 * self.samples * classes_per_task * self.image_size))
def pretty_print(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
class _CustomDataParallel(torch.nn.DataParallel):
def __init__(self, model):
super(_CustomDataParallel, self).__init__(model)
def __getattr__(self, name):
try:
return super(_CustomDataParallel, self).__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, shared, block, layers, num_classes, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
# small resnet
if shared:
hiddens = [32, 64, 128, 256]
else:
hiddens = [16, 32, 32, 64]
# original resnet
# hiddens = [64, 128, 256, 512]
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, hiddens[0], layers[0])
self.layer2 = self._make_layer(block, hiddens[1], layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, hiddens[2], layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, hiddens[3], layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(hiddens[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
x = self.relu(x)
return x
def forward(self, x):
return self._forward_impl(x)
def resnet18_small(latend_dim, shared):
# r"""ResNet-18 model from
# `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
return ResNet(shared, BasicBlock, [2, 2, 2, 2], num_classes=latend_dim)
| Adversarial-Continual-Learning-main | ACL-resnet/src/networks/resnet_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.nhid = args.units
self.device = args.device
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.linear = torch.nn.Sequential()
self.linear.add_module('linear', torch.nn.Linear(self.ncha*self.size*self.size, self.latent_dim))
self.linear.add_module('relu', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x_p, task_id):
x_p = x_p.view(x_p.size(0), -1)
return self.task_out[task_id].forward(x_p)
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.nhid = args.units
self.nlayers = args.nlayers
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.2)
self.fc1=torch.nn.Linear(ncha*self.size*self.size, self.nhid)
if self.nlayers == 3:
self.fc2 = torch.nn.Linear(self.nhid, self.nhid)
self.fc3=torch.nn.Linear(self.nhid,self.latent_dim)
else:
self.fc2 = torch.nn.Linear(self.nhid,self.latent_dim)
def forward(self, x_s):
h = x_s.view(x_s.size(0), -1)
h = self.drop(self.relu(self.fc1(h)))
h = self.drop(self.relu(self.fc2(h)))
if self.nlayers == 3:
h = self.drop(self.relu(self.fc3(h)))
return h
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'mnist5':
self.hidden1 = 28
self.hidden2 = 14
elif args.experiment == 'pmnist':
self.hidden1 = 28
self.hidden2 = 28
self.samples = args.samples
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2 * self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self,x_s, x_p, tt, task_id):
h_s = x_s.view(x_s.size(0), -1)
h_p = x_s.view(x_p.size(0), -1)
x_s = self.shared(h_s)
x_p = self.private(h_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Total architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | ACL-resnet/src/networks/mlp_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Discriminator(torch.nn.Module):
def __init__(self,args,task_id):
super(Discriminator, self).__init__()
self.num_tasks=args.ntasks
self.units=args.units
self.latent_dim=args.latent_dim
if args.diff == 'yes':
self.dis = torch.nn.Sequential(
GradientReversal(args.lam),
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
else:
self.dis = torch.nn.Sequential(
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
def forward(self, z):
return self.dis(z)
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def get_size(self):
count=sum(p.numel() for p in self.dis.parameters() if p.requires_grad)
print('Num parameters in D = %s ' % (self.pretty_print(count)))
class GradientReversalFunction(torch.autograd.Function):
"""
From:
https://github.com/jvanvugt/pytorch-domain-adaptation/blob/cb65581f20b71ff9883dd2435b2275a1fd4b90df/utils.py#L26
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_) | Adversarial-Continual-Learning-main | ACL-resnet/src/networks/discriminator.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
np.random.seed(1234)
# we want 500 for training, 100 for test for wach class
n = 500
def get_total(data):
data_x, data_y = [], []
for k, v in data.items():
for i in range(len(v)):
data_x.append(v[i])
data_y.append(k)
d = {}
d['images'] = data_x
d['labels'] = data_y
return d
# loading the pickled data
with open(os.path.join('../data/miniimagenet/data.pkl'), 'rb') as f:
data_dict = pickle.load(f)
data = data_dict['images']
labels = data_dict['labels']
# split data into classes, 600 images per class
class_dict = {}
for i in range(len(set(labels))):
class_dict[i] = []
for i in range(len(data)):
class_dict[labels[i]].append(data[i])
# Split data for each class to 500 and 100
x_train, x_test = {}, {}
for i in range(len(set(labels))):
np.random.shuffle(class_dict[i])
x_test[i] = class_dict[i][n:]
x_train[i] = class_dict[i][:n]
# mix the data
d_train = get_total(x_train)
d_test = get_total(x_test)
with open(os.path.join('../data/miniimagenet/train.pkl'), 'wb') as f:
pickle.dump(d_train, f)
with open(os.path.join('../data/miniimagenet/test.pkl'), 'wb') as f:
pickle.dump(d_test, f) | Adversarial-Continual-Learning-main | data/split_miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, time, os
import numpy as np
import torch
import copy
import utils
from copy import deepcopy
from tqdm import tqdm
sys.path.append('../')
from networks.discriminator import Discriminator
class ACL(object):
def __init__(self, model, args, network):
self.args=args
self.nepochs=args.nepochs
self.sbatch=args.batch_size
# optimizer & adaptive lr
self.e_lr=args.e_lr
self.d_lr=args.d_lr
if not args.experiment == 'multidatasets':
self.e_lr=[args.e_lr] * args.ntasks
self.d_lr=[args.d_lr] * args.ntasks
else:
self.e_lr = [self.args.lrs[i][1] for i in range(len(args.lrs))]
self.d_lr = [self.args.lrs[i][1]/10. for i in range(len(args.lrs))]
print ("d_lrs : ", self.d_lr)
self.lr_min=args.lr_min
self.lr_factor=args.lr_factor
self.lr_patience=args.lr_patience
self.samples=args.samples
self.device=args.device
self.checkpoint=args.checkpoint
self.adv_loss_reg=args.adv
self.diff_loss_reg=args.orth
self.s_steps=args.s_step
self.d_steps=args.d_step
self.diff=args.diff
self.network=network
self.inputsize=args.inputsize
self.taskcla=args.taskcla
self.num_tasks=args.ntasks
# Initialize generator and discriminator
self.model=model
self.discriminator=self.get_discriminator(0)
self.discriminator.get_size()
self.latent_dim=args.latent_dim
self.task_loss=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_d=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_s=torch.nn.CrossEntropyLoss().to(self.device)
self.diff_loss=DiffLoss().to(self.device)
self.optimizer_S=self.get_S_optimizer(0)
self.optimizer_D=self.get_D_optimizer(0)
self.task_encoded={}
self.mu=0.0
self.sigma=1.0
print()
def get_discriminator(self, task_id):
discriminator=Discriminator(self.args, task_id).to(self.args.device)
return discriminator
def get_S_optimizer(self, task_id, e_lr=None):
if e_lr is None: e_lr=self.e_lr[task_id]
optimizer_S=torch.optim.SGD(self.model.parameters(), momentum=self.args.mom,
weight_decay=self.args.e_wd, lr=e_lr)
return optimizer_S
def get_D_optimizer(self, task_id, d_lr=None):
if d_lr is None: d_lr=self.d_lr[task_id]
optimizer_D=torch.optim.SGD(self.discriminator.parameters(), weight_decay=self.args.d_wd, lr=d_lr)
return optimizer_D
def train(self, task_id, dataset):
self.discriminator=self.get_discriminator(task_id)
best_loss=np.inf
best_model=utils.get_model(self.model)
best_loss_d=np.inf
best_model_d=utils.get_model(self.discriminator)
dis_lr_update=True
d_lr=self.d_lr[task_id]
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
e_lr=self.e_lr[task_id]
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(dataset['train'], task_id)
clock1=time.time()
train_res=self.eval_(dataset['train'], task_id)
utils.report_tr(train_res, e, self.sbatch, clock0, clock1)
# lowering the learning rate in the beginning if it predicts random chance for the first 5 epochs
if (self.args.experiment == 'cifar100' or self.args.experiment == 'miniimagenet') and e == 4:
random_chance=20.
threshold=random_chance + 2
if train_res['acc_t'] < threshold:
# Restore best validation model
d_lr=self.d_lr[task_id] / 10.
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print("Performance on task {} is {} so Dis's lr is decreased to {}".format(task_id, train_res[
'acc_t'], d_lr), end=" ")
e_lr=self.e_lr[task_id] / 10.
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
self.discriminator=self.get_discriminator(task_id)
if task_id > 0:
self.model=self.load_checkpoint(task_id - 1)
else:
self.model=self.network.Net(self.args).to(self.args.device)
# Valid
valid_res=self.eval_(dataset['valid'], task_id)
utils.report_val(valid_res)
# Adapt lr for S and D
if valid_res['loss_tot'] < best_loss:
best_loss=valid_res['loss_tot']
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *', end='')
else:
patience-=1
if patience <= 0:
e_lr/=self.lr_factor
print(' lr={:.1e}'.format(e_lr), end='')
if e_lr < self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
if train_res['loss_a'] < best_loss_d:
best_loss_d=train_res['loss_a']
best_model_d=utils.get_model(self.discriminator)
patience_d=self.lr_patience
else:
patience_d-=1
if patience_d <= 0 and dis_lr_update:
d_lr/=self.lr_factor
print(' Dis lr={:.1e}'.format(d_lr))
if d_lr < self.lr_min:
dis_lr_update=False
print("Dis lr reached minimum value")
print()
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print()
# Restore best validation model (early-stopping)
self.model.load_state_dict(copy.deepcopy(best_model))
self.discriminator.load_state_dict(copy.deepcopy(best_model_d))
self.save_all_models(task_id)
def train_epoch(self, train_loader, task_id):
self.model.train()
self.discriminator.train()
for data, target, tt, td in train_loader:
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
# Detaching samples in the batch which do not belong to the current task before feeding them to P
t_current=task_id * torch.ones_like(tt)
body_mask=torch.eq(t_current, tt).cpu().numpy()
# x_task_module=data.to(device=self.device)
x_task_module=data.clone()
for index in range(x.size(0)):
if body_mask[index] == 0:
x_task_module[index]=x_task_module[index].detach()
x_task_module=x_task_module.to(device=self.device)
# Discriminator's real and fake task labels
t_real_D=td.to(self.device)
t_fake_D=torch.zeros_like(t_real_D).to(self.device)
# ================================================================== #
# Train Shared Module #
# ================================================================== #
# training S for s_steps
for s_step in range(self.s_steps):
self.optimizer_S.zero_grad()
self.model.zero_grad()
output=self.model(x, x_task_module, tt, task_id)
task_loss=self.task_loss(output, y)
shared_encoded, task_encoded=self.model.get_encoded_ftrs(x, x_task_module, task_id)
dis_out_gen_training=self.discriminator.forward(shared_encoded, t_real_D, task_id)
adv_loss=self.adversarial_loss_s(dis_out_gen_training, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_encoded, task_encoded)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
total_loss.backward(retain_graph=True)
self.optimizer_S.step()
# ================================================================== #
# Train Discriminator #
# ================================================================== #
# training discriminator for d_steps
for d_step in range(self.d_steps):
self.optimizer_D.zero_grad()
self.discriminator.zero_grad()
# training discriminator on real data
output=self.model(x, x_task_module, tt, task_id)
shared_encoded, task_out=self.model.get_encoded_ftrs(x, x_task_module, task_id)
dis_real_out=self.discriminator.forward(shared_encoded.detach(), t_real_D, task_id)
dis_real_loss=self.adversarial_loss_d(dis_real_out, t_real_D)
if self.args.experiment == 'miniimagenet':
dis_real_loss*=self.adv_loss_reg
dis_real_loss.backward(retain_graph=True)
# training discriminator on fake data
z_fake=torch.as_tensor(np.random.normal(self.mu, self.sigma, (x.size(0), self.latent_dim)),dtype=torch.float32, device=self.device)
dis_fake_out=self.discriminator.forward(z_fake, t_real_D, task_id)
dis_fake_loss=self.adversarial_loss_d(dis_fake_out, t_fake_D)
if self.args.experiment == 'miniimagenet':
dis_fake_loss*=self.adv_loss_reg
dis_fake_loss.backward(retain_graph=True)
self.optimizer_D.step()
return
def eval_(self, data_loader, task_id):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t = 0, 0
num=0
batch=0
self.model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output=self.model(x, x, tt, task_id)
shared_out, task_out=self.model.get_encoded_ftrs(x, x, task_id)
_, pred=output.max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator.forward(shared_out, t_real_D, task_id)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
# Loss values
task_loss=self.task_loss(output, y)
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, task_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss = task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
#
def test(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t=0, 0
num=0
batch=0
model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output=model.forward(x, x, tt, task_id)
shared_out, task_out=model.get_encoded_ftrs(x, x, task_id)
_, pred=output.max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator.forward(shared_out, tt, task_id)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, task_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
# Loss values
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
task_loss=self.task_loss(output, y)
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
def load_model(self, task_id):
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the previous shared module with the current one
current_shared_module=deepcopy(self.model.shared.state_dict())
net.shared.load_state_dict(current_shared_module)
net=net.to(self.args.device)
return net
def load_checkpoint(self, task_id):
print("Loading checkpoint for task {} ...".format(task_id))
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
net=net.to(self.args.device)
return net
def loader_size(self, data_loader):
return data_loader.dataset.__len__()
def get_tsne_embeddings_first_ten_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
model.eval()
tag_ = '_diff_{}'.format(self.args.diff)
all_images, all_shared, all_private = [], [], []
# Test final model on first 10 tasks:
writer = SummaryWriter()
for t in range(10):
for itr, (data, _, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
print (torch.stack(all_shared).size())
tag = ['Shared10_{}_{}'.format(tag_,i) for i in range(1,11)]
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#, metadata_header=list(range(1,11)))
tag = ['Private10_{}_{}'.format(tag_, i) for i in range(1, 11)]
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#,metadata_header=list(range(1,11)))
writer.close()
def get_tsne_embeddings_last_three_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
# Test final model on last 3 tasks:
model.eval()
tag = '_diff_{}'.format(self.args.diff)
for t in [17,18,19]:
all_images, all_labels, all_shared, all_private = [], [], [], []
writer = SummaryWriter()
for itr, (data, target, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
# print (shared_out.size())
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
all_labels.append(y)
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Shared_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Private_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.close()
#
class DiffLoss(torch.nn.Module):
# From: Domain Separation Networks (https://arxiv.org/abs/1608.06019)
# Konstantinos Bousmalis, George Trigeorgis, Nathan Silberman, Dilip Krishnan, Dumitru Erhan
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, D1, D2):
D1=D1.view(D1.size(0), -1)
D1_norm=torch.norm(D1, p=2, dim=1, keepdim=True).detach()
D1_norm=D1.div(D1_norm.expand_as(D1) + 1e-6)
D2=D2.view(D2.size(0), -1)
D2_norm=torch.norm(D2, p=2, dim=1, keepdim=True).detach()
D2_norm=D2.div(D2_norm.expand_as(D2) + 1e-6)
# return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
| Adversarial-Continual-Learning-main | src/acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from copy import deepcopy
import pickle
import time
import uuid
from subprocess import call
########################################################################################################################
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def report_tr(res, e, sbatch, clock0, clock1):
# Training performance
print(
'| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train losses={:.3f} | T: loss={:.3f}, acc={:5.2f}% | D: loss={:.3f}, acc={:5.1f}%, '
'Diff loss:{:.3f} |'.format(
e + 1,
1000 * sbatch * (clock1 - clock0) / res['size'],
1000 * sbatch * (time.time() - clock1) / res['size'], res['loss_tot'],
res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
def report_val(res):
# Validation performance
print(' Valid losses={:.3f} | T: loss={:.6f}, acc={:5.2f}%, | D: loss={:.3f}, acc={:5.2f}%, Diff loss={:.3f} |'.format(
res['loss_tot'], res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
########################################################################################################################
def get_model(model):
return deepcopy(model.state_dict())
########################################################################################################################
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
########################################################################################################################
def save_print_log(taskcla, acc, lss, output_path):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
print ('ACC: {:5.4f}%'.format((np.mean(acc[acc.shape[0]-1,:]))))
print()
print ('BWD Transfer = ')
print ()
print ("Diagonal R_ii")
for i in range(acc.shape[0]):
print('\t',end='')
print('{:5.2f}% '.format(np.diag(acc)[i]), end=',')
print()
print ("Last row")
for i in range(acc.shape[0]):
print('\t', end=',')
print('{:5.2f}% '.format(acc[-1][i]), end=',')
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on our UCB paper (https://openreview.net/pdf?id=HklUCCVKDB)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
with open(os.path.join(output_path, 'logs.p'), 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", os.path.join(output_path, 'logs.p'))
def print_log_acc_bwt(taskcla, acc, lss, output_path, run_id):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
avg_acc = np.mean(acc[acc.shape[0]-1,:])
print ('ACC: {:5.4f}%'.format(avg_acc))
print()
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on UCB paper (https://arxiv.org/abs/1906.02425)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
path = os.path.join(output_path, 'logs_run_id_{}.p'.format(run_id))
with open(path, 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", path)
return avg_acc, gem_bwt
def print_running_acc_bwt(acc, task_num):
print()
acc = acc[:task_num+1,:task_num+1]
avg_acc = np.mean(acc[acc.shape[0] - 1, :])
gem_bwt = sum(acc[-1] - np.diag(acc)) / (len(acc[-1]) - 1)
print('ACC: {:5.4f}% || BWT: {:5.2f}% '.format(avg_acc, gem_bwt))
print()
def make_directories(args):
uid = uuid.uuid4().hex
if args.checkpoint is None:
os.mkdir('checkpoints')
args.checkpoint = os.path.join('./checkpoints/',uid)
os.mkdir(args.checkpoint)
else:
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
args.checkpoint = os.path.join(args.checkpoint, uid)
os.mkdir(args.checkpoint)
def some_sanity_checks(args):
# Making sure the chosen experiment matches with the number of tasks performed in the paper:
datasets_tasks = {}
datasets_tasks['mnist5']=[5]
datasets_tasks['pmnist']=[10,20,30,40]
datasets_tasks['cifar100']=[20]
datasets_tasks['miniimagenet']=[20]
datasets_tasks['multidatasets']=[5]
if not args.ntasks in datasets_tasks[args.experiment]:
raise Exception("Chosen number of tasks ({}) does not match with {} experiment".format(args.ntasks,args.experiment))
# Making sure if memory usage is happenning:
if args.use_memory == 'yes' and not args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
if args.use_memory == 'no' and args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
def save_code(args):
cwd = os.getcwd()
des = os.path.join(args.checkpoint, 'code') + '/'
if not os.path.exists(des):
os.mkdir(des)
def get_folder(folder):
return os.path.join(cwd,folder)
folders = [get_folder(item) for item in ['dataloaders', 'networks', 'configs', 'main.py', 'acl.py', 'utils.py']]
for folder in folders:
call('cp -rf {} {}'.format(folder, des),shell=True)
def print_time():
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("Job finished at =", dt_string)
| Adversarial-Continual-Learning-main | src/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import utils
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='Adversarial Continual Learning...')
# Load the config file
parser.add_argument('--config', type=str, default='./configs/config_mnist5.yml')
flags = parser.parse_args()
args = OmegaConf.load(flags.config)
print()
########################################################################################################################
# Args -- Experiment
if args.experiment=='pmnist':
from dataloaders import pmnist as datagenerator
elif args.experiment=='mnist5':
from dataloaders import mnist5 as datagenerator
elif args.experiment=='cifar100':
from dataloaders import cifar100 as datagenerator
elif args.experiment=='miniimagenet':
from dataloaders import miniimagenet as datagenerator
elif args.experiment=='multidatasets':
from dataloaders import mulitidatasets as datagenerator
else:
raise NotImplementedError
from acl import ACL as approach
# Args -- Network
if args.experiment == 'mnist5' or args.experiment == 'pmnist':
from networks import mlp_acl as network
elif args.experiment == 'cifar100' or args.experiment == 'miniimagenet' or args.experiment == 'multidatasets':
from networks import alexnet_acl as network
else:
raise NotImplementedError
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# Faster run but not deterministic:
# torch.backends.cudnn.benchmark = True
# To get deterministic results that match with paper at cost of lower speed:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
# Model
net = network.Net(args)
net = net.to(args.device)
net.print_model_size()
# print (net)
# Approach
appr=approach(net,args,network=network)
# Loop tasks
acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Train
appr.train(t,dataset[t])
print('-'*250)
print()
for u in range(t+1):
# Load previous model and replace the shared module with the current one
test_model = appr.load_model(u)
test_res = appr.test(dataset[u]['test'], u, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
test_res['loss_t'],
test_res['acc_t']))
acc[t, u] = test_res['acc_t']
lss[t, u] = test_res['loss_t']
# Save
print()
print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')
# Extract embeddings to plot in tensorboard for miniimagenet
if args.tsne == 'yes' and args.experiment == 'miniimagenet':
appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))
avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)
return avg_acc, gem_bwt
#######################################################################################################################
def main(args):
utils.make_directories(args)
utils.some_sanity_checks(args)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
accuracies, forgetting = [], []
for n in range(args.num_runs):
args.seed = n
args.output = '{}_{}_tasks_seed_{}.txt'.format(args.experiment, args.ntasks, args.seed)
print ("args.output: ", args.output)
print (" >>>> Run #", n)
acc, bwt = run(args, n)
accuracies.append(acc)
forgetting.append(bwt)
print('*' * 100)
print ("Average over {} runs: ".format(args.num_runs))
print ('AVG ACC: {:5.4f}% \pm {:5.4f}'.format(np.array(accuracies).mean(), np.array(accuracies).std()))
print ('AVG BWT: {:5.2f}% \pm {:5.4f}'.format(np.array(forgetting).mean(), np.array(forgetting).std()))
print ("All Done! ")
print('[Elapsed time = {:.1f} min]'.format((time.time()-tstart)/(60)))
utils.print_time()
#######################################################################################################################
if __name__ == '__main__':
main(args)
| Adversarial-Continual-Learning-main | src/main.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from utils import *
class iCIFAR10(datasets.CIFAR10):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iCIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform, download=True)
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
if self.train:
train_data = []
train_labels = []
train_tt = [] # task module labels
train_td = [] # disctiminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
train_data.append(self.data[i])
train_labels.append(self.class_mapping[self.targets[i]])
train_tt.append(task_num)
train_td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
train_data.append(memory[task_id]['x'][i])
train_labels.append(memory[task_id]['y'][i])
train_tt.append(memory[task_id]['tt'][i])
train_td.append(memory[task_id]['td'][i])
self.train_data = np.array(train_data)
self.train_labels = train_labels
self.train_tt = train_tt
self.train_td = train_td
if not self.train:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
test_data = []
test_labels = []
test_tt = [] # task module labels
test_td = [] # disctiminator labels
for i in range(len(self.test_data)):
if self.test_labels[i] in classes:
test_data.append(self.test_data[i])
test_labels.append(self.class_mapping[self.test_labels[i]])
test_tt.append(task_num)
test_td.append(task_num + 1)
self.class_indices[self.class_mapping[self.test_labels[i]]].append(i)
self.test_data = np.array(test_data)
self.test_labels = test_labels
self.test_tt = test_tt
self.test_td = test_td
def __getitem__(self, index):
if self.train:
img, target, tt, td = self.train_data[index], self.train_labels[index], self.train_tt[index], self.train_td[index]
else:
img, target, tt, td = self.test_data[index], self.test_labels[index], self.test_tt[index], self.test_td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None:
img = self.transform(img)
except:
pass
try:
if self.target_transform is not None:
target = self.target_transform(target)
except:
pass
return img, target, tt, td
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
class iCIFAR100(iCIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,32,32]
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True, download=True, transform=self.transformation)
self.test_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'CIFAR100-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
| Adversarial-Continual-Learning-main | src/dataloaders/cifar100.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import torch.utils.data
from .datasets_utils import *
from utils import *
from torchvision import transforms
mean_datasets = {
'CIFAR10': [x/255 for x in [125.3,123.0,113.9]],
'notMNIST': (0.4254,),
'MNIST': (0.1,) ,
'SVHN':[0.4377,0.4438,0.4728] ,
'FashionMNIST': (0.2190,),
}
std_datasets = {
'CIFAR10': [x/255 for x in [63.0,62.1,66.7]],
'notMNIST': (0.4501,),
'MNIST': (0.2752,),
'SVHN': [0.198,0.201,0.197],
'FashionMNIST': (0.3318,)
}
classes_datasets = {
'CIFAR10': 10,
'notMNIST': 10,
'MNIST': 10,
'SVHN': 10,
'FashionMNIST': 10,
}
lr_datasets = {
'CIFAR10': 0.001,
'notMNIST': 0.01,
'MNIST': 0.01,
'SVHN': 0.001,
'FashionMNIST': 0.01,
}
gray_datasets = {
'CIFAR10': False,
'notMNIST': True,
'MNIST': True,
'SVHN': False,
'FashionMNIST': True,
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_samples = args.samples
self.inputsize = [3,32,32]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
self.datasets_idx = list(np.random.permutation(self.num_tasks))
print('Task order =', [list(classes_datasets.keys())[item] for item in self.datasets_idx])
self.datasets_names = [list(classes_datasets.keys())[item] for item in self.datasets_idx]
self.taskcla = []
self.lrs = []
for i in range(self.num_tasks):
t = self.datasets_idx[i]
self.taskcla.append([i, list(classes_datasets.values())[t]])
self.lrs.append([i, list(lr_datasets.values())[t]])
print('Learning Rates =', self.lrs)
print('taskcla =', self.taskcla)
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.args=args
self.dataloaders, self.memory_set = {}, {}
self.memoryloaders = {}
self.dataloaders, self.memory_set, self.indices = {}, {}, {}
self.memoryloaders = {}
self.saliency_loaders, self.saliency_set = {}, {}
for i in range(self.num_tasks):
self.dataloaders[i] = {}
self.memory_set[i] = {}
self.memoryloaders[i] = {}
self.indices[i] = {}
# self.saliency_set = {}
self.saliency_loaders[i] = {}
self.download = True
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get_dataset(self, dataset_idx, task_num, num_samples_per_class=False, normalize=True):
dataset_name = list(mean_datasets.keys())[dataset_idx]
nspc = num_samples_per_class
if normalize:
transformation = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean_datasets[dataset_name],std_datasets[dataset_name])])
mnist_transformation = transforms.Compose([
transforms.Pad(padding=2, fill=0),
transforms.ToTensor(),
transforms.Normalize(mean_datasets[dataset_name], std_datasets[dataset_name])])
else:
transformation = transforms.Compose([transforms.ToTensor()])
mnist_transformation = transforms.Compose([
transforms.Pad(padding=2, fill=0),
transforms.ToTensor(),
])
# target_transormation = transforms.Compose([transforms.ToTensor()])
target_transormation = None
if dataset_idx == 0:
trainset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=transformation)
testset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=transformation)
if dataset_idx == 1:
trainset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
if dataset_idx == 2:
trainset = MNIST_RGB(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = MNIST_RGB(root=self.root, train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
if dataset_idx == 3:
trainset = SVHN_(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
testset = SVHN_(root=self.root, train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
if dataset_idx == 4:
trainset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
return trainset, testset
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
current_dataset_idx = self.datasets_idx[task_id]
dataset_name = list(mean_datasets.keys())[current_dataset_idx]
self.train_set[task_id], self.test_set[task_id] = self.get_dataset(current_dataset_idx,task_id)
self.num_classes = classes_datasets[dataset_name]
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = '{} - {} classes - {} images'.format(dataset_name,
classes_datasets[dataset_name],
len(self.train_set[task_id]))
self.dataloaders[task_id]['classes'] = self.num_classes
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
def report_size(self,dataset_name,task_id):
print("Dataset {} size: {} ".format(dataset_name, len(self.train_set[task_id])))
| Adversarial-Continual-Learning-main | src/dataloaders/mulitidatasets.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/pytorch/vision/blob/8635be94d1216f10fb8302da89233bd86445e449/torchvision/datasets/utils.py
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import numpy as np
import torch
import codecs
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x | Adversarial-Continual-Learning-main | src/dataloaders/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import transforms
from utils import *
class MiniImageNet(torch.utils.data.Dataset):
def __init__(self, root, train):
super(MiniImageNet, self).__init__()
if train:
self.name='train'
else:
self.name='test'
root = os.path.join(root, 'miniimagenet')
with open(os.path.join(root,'{}.pkl'.format(self.name)), 'rb') as f:
data_dict = pickle.load(f)
self.data = data_dict['images']
self.labels = data_dict['labels']
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img, label = self.data[i], self.labels[i]
return img, label
class iMiniImageNet(MiniImageNet):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None):
super(iMiniImageNet, self).__init__(root=root, train=train)
self.transform = transform
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
labels = []
tt = [] # task module labels
td = [] # disctiminator labels
for i in range(len(self.data)):
if self.labels[i] in classes:
data.append(self.data[i])
labels.append(self.class_mapping[self.labels[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.labels[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
labels.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = np.array(data)
self.labels = labels
self.tt = tt
self.td = td
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if not torch.is_tensor(img):
img = Image.fromarray(img)
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.use_memory = args.use_memory
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,84,84]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.transformation = transforms.Compose([
transforms.Resize((84,84)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id],
memory_classes=memory_classes, memory=memory,
task_num=task_id, train=True, transform=self.transformation)
self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id,self.task_ids[task_id])
self.dataloaders[task_id]['tsne'] = torch.utils.data.DataLoader(self.test_set[task_id],
batch_size=len(test_loader.dataset),
num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
print ("Task ID: ", task_id)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
for i in range(len(self.task_ids[task_id])):
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class] # randomly sample some data
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x']))) | Adversarial-Continual-Learning-main | src/dataloaders/miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import torch
import numpy as np
import os.path
import sys
import torch.utils.data as data
from torchvision import datasets, transforms
class iMNIST(datasets.MNIST):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iMNIST, self).__init__(root, task_num, transform=transform,
target_transform=target_transform, download=download)
self.train = train # training set or test set
self.root = root
self.target_transform=target_transform
self.transform=transform
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
targets = []
tt = [] # task module labels
td = [] # discriminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
data.append(self.data[i])
targets.append(self.class_mapping[self.targets[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if self.train:
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
targets.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = data.copy()
self.targets = targets.copy()
self.tt = tt.copy()
self.td = td.copy()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img.numpy(), mode='L')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 10
self.num_samples = args.samples
self.inputsize = [1,28,28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
self.task_ids = [[0,1], [2,3], [4,5], [6,7], [8,9]]
self.train_set = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMNIST(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True,
download=True, transform=self.transformation)
self.test_set[task_id] = iMNIST(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, drop_last=True,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),shuffle=True,
num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, drop_last=True,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = '5Split-MNIST-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
dataset = iMNIST(root=self.root, classes=self.task_ids[task_id][i], memory_classes=None, memory=None,
task_num=task_id, train=True, download=True, transform=self.transformation)
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3]) | Adversarial-Continual-Learning-main | src/dataloaders/mnist5.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sys, os
import numpy as np
from PIL import Image
import torch.utils.data as data
from torchvision import datasets, transforms
from sklearn.utils import shuffle
from utils import *
class PermutedMNIST(datasets.MNIST):
def __init__(self, root, task_num, train=True, permute_idx=None, transform=None):
super(PermutedMNIST, self).__init__(root, train, download=True)
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data = torch.stack([img.float().view(-1)[permute_idx] for img in self.data])
self.tl = (task_num) * torch.ones(len(self.data),dtype=torch.long)
self.td = (task_num+1) * torch.ones(len(self.data),dtype=torch.long)
def __getitem__(self, index):
img, target, tl, td = self.data[index], self.targets[index], self.tl[index], self.td[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
print ("We are transforming")
target = self.target_transform(target)
return img, target, tl, td
def __len__(self):
return self.data.size(0)
class DatasetGen(object):
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.num_samples = args.samples
self.num_tasks = args.ntasks
self.root = args.data_dir
self.use_memory = args.use_memory
self.inputsize = [1, 28, 28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean, std)])
self.taskcla = [[t, 10] for t in range(self.num_tasks)]
self.train_set, self.test_set = {}, {}
self.indices = {}
self.dataloaders = {}
self.idx={}
self.get_idx()
self.pin_memory = True
self.num_workers = args.workers
self.task_memory = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
self.train_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
if self.use_memory == 'yes' and self.num_samples > 0:
indices=torch.randperm(len(self.train_set[task_id]))[:self.num_samples]
rand_subset=torch.utils.data.Subset(self.train_set[task_id], indices)
self.task_memory.append(rand_subset)
else:
if self.use_memory == 'yes' and self.num_samples > 0:
current_dataset = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
d = []
d.append(current_dataset)
for m in self.task_memory:
d.append(m)
self.train_set[task_id] = torch.utils.data.ConcatDataset(d)
indices=torch.randperm(len(current_dataset))[:self.num_samples]
rand_subset=torch.utils.data.Subset(current_dataset, indices)
self.task_memory.append(rand_subset)
else:
self.train_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
self.test_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=False,
permute_idx=self.idx[task_id], transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id],
[len(self.train_set[task_id]) - split, split])
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'pmnist-{}'.format(task_id+1)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
return self.dataloaders
def get_idx(self):
for i in range(len(self.taskcla)):
idx = list(range(self.inputsize[1] * self.inputsize[2]))
self.idx[i] = shuffle(idx, random_state=self.seed * 100 + i)
| Adversarial-Continual-Learning-main | src/dataloaders/pmnist.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os.path
import sys
import warnings
import urllib.request
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from .utils import *
# from scipy.imageio import imread
import pandas as pd
import os
import torch
from PIL import Image
import scipy.io as sio
from collections import defaultdict
from itertools import chain
from collections import OrderedDict
class CIFAR10_(datasets.CIFAR10):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
num_classes = 10
def __init__(self, root, task_num, num_samples_per_class, train, transform, target_transform, download=True):
# root, task_num, train, transform = None, download = False):
super(CIFAR10_, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
if not num_samples_per_class:
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
else:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
self._load_meta()
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
# if self.train:
return len(self.data)
# else:
# return len(self.test_data)
def report_size(self):
print("CIFAR10 size at train={} time: {} ".format(self.train,self.__len__()))
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
class CIFAR100_(CIFAR10_):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
num_classes = 100
class SVHN_(torch.utils.data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=True):
self.root = os.path.expanduser(root)
# root, task_num, train, transform = None, download = False):
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if self.train:
split="train"
else:
split="test"
self.num_classes = 10
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.targets = loaded_mat['y'].astype(np.int64).squeeze()
self.data = np.transpose(self.data, (3, 2, 0, 1))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes+1):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = np.array(sum(y,[])).astype(np.int64)
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.targets, self.targets == 10, 0)
# print ("svhn: ", self.data.shape)
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class MNIST_RGB(datasets.MNIST):
def __init__(self, root, task_num, num_samples_per_class, train=True, transform=None, target_transform=None, download=False):
super(MNIST_RGB, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
self.train = train # training set or test set
self.target_transform=target_transform
self.transform=transform
self.num_classes=10
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
# self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img, mode='L').convert('RGB')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_(MNIST_RGB):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class notMNIST_(torch.utils.data.Dataset):
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform=target_transform
self.train = train
self.url = "https://github.com/facebookresearch/Adversarial-Continual-Learning/raw/master/data/notMNIST.zip"
self.filename = 'notMNIST.zip'
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
download_url(self.url, root, filename=self.filename)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
if self.train:
fpath = os.path.join(root, 'notMNIST', 'Train')
else:
fpath = os.path.join(root, 'notMNIST', 'Test')
X, Y = [], []
folders = os.listdir(fpath)
for folder in folders:
folder_path = os.path.join(fpath, folder)
for ims in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, ims)
X.append(np.array(Image.open(img_path).convert('RGB')))
Y.append(ord(folder) - 65) # Folders are A-J so labels will be 0-9
except:
print("File {}/{} is broken".format(folder, ims))
self.data = np.array(X)
self.targets = Y
self.num_classes = len(set(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.labels = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
img = Image.fromarray(img)#.convert('RGB')
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
def download(self):
"""Download the notMNIST data if it doesn't exist in processed_folder already."""
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
| Adversarial-Continual-Learning-main | src/dataloaders/datasets_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
if args.experiment == 'cifar100':
hiddens = [64, 128, 256, 1024, 1024, 512]
elif args.experiment == 'miniimagenet':
hiddens = [64, 128, 256, 512, 512, 512]
# ----------------------------------
elif args.experiment == 'multidatasets':
hiddens = [64, 128, 256, 1024, 1024, 512]
else:
raise NotImplementedError
self.conv1=torch.nn.Conv2d(self.ncha,hiddens[0],kernel_size=size//8)
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2=torch.nn.Conv2d(hiddens[0],hiddens[1],kernel_size=size//10)
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3=torch.nn.Conv2d(hiddens[1],hiddens[2],kernel_size=2)
s=utils.compute_conv_output_size(s,2)
s=s//2
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(hiddens[2]*s*s,hiddens[3])
self.fc2=torch.nn.Linear(hiddens[3],hiddens[4])
self.fc3=torch.nn.Linear(hiddens[4],hiddens[5])
self.fc4=torch.nn.Linear(hiddens[5], self.latent_dim)
def forward(self, x_s):
x_s = x_s.view_as(x_s)
h = self.maxpool(self.drop1(self.relu(self.conv1(x_s))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x_s.size(0), -1)
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop2(self.relu(self.fc2(h)))
h = self.drop2(self.relu(self.fc3(h)))
h = self.drop2(self.relu(self.fc4(h)))
return h
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'cifar100':
hiddens=[32,32]
flatten=1152
elif args.experiment == 'miniimagenet':
# hiddens=[8,8]
# flatten=1800
hiddens=[16,16]
flatten=3600
elif args.experiment == 'multidatasets':
hiddens=[32,32]
flatten=1152
else:
raise NotImplementedError
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.conv = torch.nn.Sequential()
self.conv.add_module('conv1',torch.nn.Conv2d(self.ncha, hiddens[0], kernel_size=self.size // 8))
self.conv.add_module('relu1', torch.nn.ReLU(inplace=True))
self.conv.add_module('drop1', torch.nn.Dropout(0.2))
self.conv.add_module('maxpool1', torch.nn.MaxPool2d(2))
self.conv.add_module('conv2', torch.nn.Conv2d(hiddens[0], hiddens[1], kernel_size=self.size // 10))
self.conv.add_module('relu2', torch.nn.ReLU(inplace=True))
self.conv.add_module('dropout2', torch.nn.Dropout(0.5))
self.conv.add_module('maxpool2', torch.nn.MaxPool2d(2))
self.task_out.append(self.conv)
self.linear = torch.nn.Sequential()
self.linear.add_module('linear1', torch.nn.Linear(flatten,self.latent_dim))
self.linear.add_module('relu3', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x, task_id):
x = x.view_as(x)
out = self.task_out[2*task_id].forward(x)
out = out.view(out.size(0),-1)
out = self.task_out[2*task_id+1].forward(out)
return out
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.samples = args.samples
self.image_size = self.ncha*size*size
self.args=args
self.hidden1 = args.head_units
self.hidden2 = args.head_units//2
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self, x_s, x_p, tt, task_id):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
x_s = self.shared(x_s)
x_p = self.private(x_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
if self.args.experiment == 'multidatasets':
# if no memory is used this is faster:
y=[]
for i,_ in self.taskcla:
y.append(self.head[i](x))
return y[task_id]
else:
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples,
self.pretty_print(self.num_tasks*4*self.samples*self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(4*(count_S + count_P + count_H)+self.num_tasks*4*self.samples*self.image_size))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | src/networks/alexnet_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.nhid = args.units
self.device = args.device
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.linear = torch.nn.Sequential()
self.linear.add_module('linear', torch.nn.Linear(self.ncha*self.size*self.size, self.latent_dim))
self.linear.add_module('relu', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x_p, task_id):
x_p = x_p.view(x_p.size(0), -1)
return self.task_out[task_id].forward(x_p)
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.nhid = args.units
self.nlayers = args.nlayers
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.2)
self.fc1=torch.nn.Linear(ncha*self.size*self.size, self.nhid)
if self.nlayers == 3:
self.fc2 = torch.nn.Linear(self.nhid, self.nhid)
self.fc3=torch.nn.Linear(self.nhid,self.latent_dim)
else:
self.fc2 = torch.nn.Linear(self.nhid,self.latent_dim)
def forward(self, x_s):
h = x_s.view(x_s.size(0), -1)
h = self.drop(self.relu(self.fc1(h)))
h = self.drop(self.relu(self.fc2(h)))
if self.nlayers == 3:
h = self.drop(self.relu(self.fc3(h)))
return h
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'mnist5':
self.hidden1 = 28
self.hidden2 = 14
elif args.experiment == 'pmnist':
self.hidden1 = 28
self.hidden2 = 28
self.samples = args.samples
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2 * self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self,x_s, x_p, tt, task_id):
h_s = x_s.view(x_s.size(0), -1)
h_p = x_s.view(x_p.size(0), -1)
x_s = self.shared(h_s)
x_p = self.private(h_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Total architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | src/networks/mlp_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Discriminator(torch.nn.Module):
def __init__(self,args,task_id):
super(Discriminator, self).__init__()
self.num_tasks=args.ntasks
self.units=args.units
self.latent_dim=args.latent_dim
if args.diff == 'yes':
self.dis = torch.nn.Sequential(
GradientReversal(args.lam),
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
else:
self.dis = torch.nn.Sequential(
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
def forward(self, z, labels, task_id):
return self.dis(z)
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def get_size(self):
count=sum(p.numel() for p in self.dis.parameters() if p.requires_grad)
print('Num parameters in D = %s ' % (self.pretty_print(count)))
class GradientReversalFunction(torch.autograd.Function):
"""
From:
https://github.com/jvanvugt/pytorch-domain-adaptation/blob/cb65581f20b71ff9883dd2435b2275a1fd4b90df/utils.py#L26
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_) | Adversarial-Continual-Learning-main | src/networks/discriminator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper script for launching a job on the fair cluster.
Sample usage:
python cluster_run.py --name=trial --setup='/path/to/setup.sh' --cmd='job_command'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
from absl import app
from absl import flags
import os
import sys
import random
import string
import datetime
import re
opts = flags.FLAGS
flags.DEFINE_integer('nodes', 1, 'Number of nodes per task')
flags.DEFINE_integer('ntp', 1, 'Number of tasks per node')
flags.DEFINE_integer('ncpus', 40, 'Number of cpu cores per task')
flags.DEFINE_integer('ngpus', 1, 'Number of gpus per task')
flags.DEFINE_string('name', '', 'Job name')
flags.DEFINE_enum('partition', 'learnfair', ['dev', 'priority','uninterrupted','learnfair'], 'Cluster partition')
flags.DEFINE_string('comment', 'for ICML deadline in 2020.', 'Comment')
flags.DEFINE_string('time', '72:00:00', 'Time for which the job should run')
flags.DEFINE_string('setup', '/private/home/tanmayshankar/Research/Code/Setup.bash', 'Setup script that will be run before the command')
# flags.DEFINE_string('workdir', os.getcwd(), 'Job command')
flags.DEFINE_string('workdir', '/private/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments', 'Jod command')
# flags.DEFINE_string('workdir', '/private/home/tanmayshankar/Research/Code/SkillsfromDemonstrations/Experiments/BidirectionalInfoModel/', 'Job command')
flags.DEFINE_string('cmd', 'echo $PWD', 'Directory to run job from')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def main(_):
job_folder = '/checkpoint/tanmayshankar/jobs/' + datetime.date.today().strftime('%y_%m_%d')
mkdir(job_folder)
if len(opts.name) == 0:
# read name from command
opts.name = re.search('--name=\w+', opts.cmd).group(0)[7:]
print(opts.name)
slurm_cmd = '#!/bin/bash\n\n'
slurm_cmd += '#SBATCH --job-name={}\n'.format(opts.name)
slurm_cmd += '#SBATCH --output={}/{}-%j.out\n'.format(job_folder, opts.name)
slurm_cmd += '#SBATCH --error={}/{}-%j.err\n'.format(job_folder, opts.name)
# slurm_cmd += '#SBATCH --exclude=learnfair2038'
slurm_cmd += '\n'
slurm_cmd += '#SBATCH --partition={}\n'.format(opts.partition)
if len(opts.comment) > 0:
slurm_cmd += '#SBATCH --comment="{}"\n'.format(opts.comment)
slurm_cmd += '\n'
slurm_cmd += '#SBATCH --nodes={}\n'.format(opts.nodes)
slurm_cmd += '#SBATCH --ntasks-per-node={}\n'.format(opts.ntp)
if opts.ngpus > 0:
slurm_cmd += '#SBATCH --gres=gpu:{}\n'.format(opts.ngpus)
slurm_cmd += '#SBATCH --cpus-per-task={}\n'.format(opts.ncpus)
slurm_cmd += '#SBATCH --time={}\n'.format(opts.time)
slurm_cmd += '\n'
slurm_cmd += 'source {}\n'.format(opts.setup)
slurm_cmd += 'cd {} \n\n'.format(opts.workdir)
slurm_cmd += '{}\n'.format(opts.cmd)
job_fname = '{}/{}.sh'.format(job_folder, ''.join(random.choices(string.ascii_letters, k=8)))
with open(job_fname, 'w') as f:
f.write(slurm_cmd)
#print('sbatch {}'.format(job_fname))
os.system('sbatch {}'.format(job_fname))
if __name__ == '__main__':
app.run(main)
| CausalSkillLearning-main | Experiments/cluster_run.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
# Check if CUDA is available, set device to GPU if it is, otherwise use CPU.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class PolicyNetwork_BaseClass(torch.nn.Module):
def __init__(self):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(PolicyNetwork_BaseClass, self).__init__()
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample().squeeze(0)
return sample_action
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
return action_probabilities.argmax()
# def select_epsilon_greedy_action(self, action_probabilities):
# epsilon = 0.1
# if np.random.random()<epsilon:
# return self.sample_action(action_probabilities)
# else:
# return self.select_greedy_action(action_probabilities)
def select_epsilon_greedy_action(self, action_probabilities, epsilon=0.1):
epsilon = epsilon
whether_greedy = torch.rand(action_probabilities.shape[0]).to(device)
sample_actions = torch.where(whether_greedy<epsilon, self.sample_action(action_probabilities), self.select_greedy_action(action_probabilities))
return sample_actions
class PolicyNetwork(PolicyNetwork_BaseClass):
# REMEMBER, in the Bi-directional model, this is going to be evaluated for log-probabilities alone.
# Forward pass set up for evaluating this already.
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, output_size, number_subpolicies, number_layers=4, batch_size=1, whether_latentb_input=False):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(PolicyNetwork, self).__init__()
if whether_latentb_input:
self.input_size = input_size+number_subpolicies+1
else:
self.input_size = input_size+number_subpolicies
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = number_layers
self.batch_size = batch_size
# Create LSTM Network.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.softmax_layer = torch.nn.Softmax(dim=1)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
def forward(self, input, hidden=None, return_log_probabilities=False):
# The argument hidden_input here is the initial hidden state we want to feed to the LSTM.
# Assume inputs is the trajectory sequence.
# Input Format must be: Sequence_Length x Batch_Size x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
outputs, hidden = self.lstm(format_input)
# Takes softmax of last output.
if return_log_probabilities:
# Computes log probabilities, needed for loss function and log likelihood.
preprobability_outputs = self.output_layer(outputs)
log_probabilities = self.batch_logsoftmax_layer(preprobability_outputs).squeeze(1)
probabilities = self.batch_softmax_layer(preprobability_outputs).squeeze(1)
return outputs, hidden, log_probabilities, probabilities
else:
# Compute action probabilities for sampling.
softmax_output = self.softmax_layer(self.output_layer(outputs[-1]))
return outputs, hidden, softmax_output
class ContinuousPolicyNetwork(PolicyNetwork_BaseClass):
# REMEMBER, in the Bi-directional model, this is going to be evaluated for log-probabilities alone.
# Forward pass set up for evaluating this already.
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
# def __init__(self, input_size, hidden_size, output_size, number_subpolicies, number_layers=4, batch_size=1):
# def __init__(self, input_size, hidden_size, output_size, z_space_size, number_layers=4, batch_size=1, whether_latentb_input=False):
def __init__(self, input_size, hidden_size, output_size, args, number_layers=4, whether_latentb_input=False, zero_z_dim=False, small_init=False):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(ContinuousPolicyNetwork, self).__init__()
self.hidden_size = hidden_size
# The output size here must be mean+variance for each dimension.
# This is output_size*2.
self.args = args
self.output_size = output_size
self.num_layers = number_layers
self.batch_size = self.args.batch_size
if whether_latentb_input:
self.input_size = input_size+self.args.z_dimensions+1
else:
if zero_z_dim:
self.input_size = input_size
else:
self.input_size = input_size+self.args.z_dimensions
# Create LSTM Network.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
# # Try initializing the network to something, so that we can escape the stupid constant output business.
if small_init:
for name, param in self.mean_output_layer.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.0)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=0.0001)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
def forward(self, input, action_sequence, epsilon=0.001):
# Input is the trajectory sequence of shape: Sequence_Length x 1 x Input_Size.
# Here, we also need the continuous actions as input to evaluate their logprobability / probability.
# format_input = torch.tensor(input).view(input.shape[0], self.batch_size, self.input_size).float().to(device)
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
format_action_seq = torch.from_numpy(action_sequence).to(device).float().view(action_sequence.shape[0],1,self.output_size)
lstm_outputs, hidden = self.lstm(format_input)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
# variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias) + epsilon
# Remember, because of Pytorch's dynamic construction, this distribution can have it's own batch size.
# It doesn't matter if batch sizes changes over different forward passes of the LSTM, because we're only going
# to evaluate this distribution (instance)'s log probability with the same sequence length.
dist = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
log_probabilities = dist.log_prob(format_action_seq)
# log_probabilities = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs)).log_prob(format_action_seq)
entropy = dist.entropy()
if self.args.debug:
print("Embedding in the policy network.")
embed()
return log_probabilities, entropy
def get_actions(self, input, greedy=False):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
lstm_outputs, hidden = self.lstm(format_input)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
if greedy:
return mean_outputs
else:
# Remember, because of Pytorch's dynamic construction, this distribution can have it's own batch size.
# It doesn't matter if batch sizes changes over different forward passes of the LSTM, because we're only going
# to evaluate this distribution (instance)'s log probability with the same sequence length.
dist = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
return dist.sample()
def reparameterized_get_actions(self, input, greedy=False, action_epsilon=0.):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
lstm_outputs, hidden = self.lstm(format_input)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
noise = torch.randn_like(variance_outputs)
if greedy:
action = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
action = mean_outputs + variance_outputs * noise
return action
def incremental_reparam_get_actions(self, input, greedy=False, action_epsilon=0., hidden=None):
# Input should be a single timestep input here.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
# Instead of feeding in entire input sequence, we are feeding in current timestep input and previous hidden state.
lstm_outputs, hidden = self.lstm(format_input, hidden)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
noise = torch.randn_like(variance_outputs)
if greedy:
action = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
action = mean_outputs + variance_outputs * noise
return action, hidden
def get_regularization_kl(self, input_z1, input_z2):
# Input is the trajectory sequence of shape: Sequence_Length x 1 x Input_Size.
# Here, we also need the continuous actions as input to evaluate their logprobability / probability.
format_input_z1 = input_z1.view(input_z1.shape[0], self.batch_size, self.input_size)
format_input_z2 = input_z2.view(input_z2.shape[0], self.batch_size, self.input_size)
hidden = None
# format_action_seq = torch.from_numpy(action_sequence).to(device).float().view(action_sequence.shape[0],1,self.output_size)
lstm_outputs_z1, _ = self.lstm(format_input_z1)
# Reset hidden?
lstm_outputs_z2, _ = self.lstm(format_input_z2)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs_z1 = self.activation_layer(self.mean_output_layer(lstm_outputs_z1))
mean_outputs_z2 = self.activation_layer(self.mean_output_layer(lstm_outputs_z2))
else:
mean_outputs_z1 = self.mean_output_layer(lstm_outputs_z1)
mean_outputs_z2 = self.mean_output_layer(lstm_outputs_z2)
variance_outputs_z1 = self.variance_activation_layer(self.variances_output_layer(lstm_outputs_z1))+self.variance_activation_bias
variance_outputs_z2 = self.variance_activation_layer(self.variances_output_layer(lstm_outputs_z2))+self.variance_activation_bias
dist_z1 = torch.distributions.MultivariateNormal(mean_outputs_z1, torch.diag_embed(variance_outputs_z1))
dist_z2 = torch.distributions.MultivariateNormal(mean_outputs_z2, torch.diag_embed(variance_outputs_z2))
kl_divergence = torch.distributions.kl_divergence(dist_z1, dist_z2)
return kl_divergence
class LatentPolicyNetwork(PolicyNetwork_BaseClass):
# REMEMBER, in the Bi-directional Information model, this is going to be evaluated for log-probabilities alone.
# THIS IS STILL A SINGLE DIRECTION LSTM!!
# This still needs to be written separately from the normal sub-policy network(s) because it also requires termination probabilities.
# Must change forward pass back to using lstm() directly on the entire sequence rather than iterating.
# Now we have the whole input sequence beforehand.
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, number_subpolicies, number_layers=4, b_exploration_bias=0., batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(LatentPolicyNetwork, self).__init__()
# Input size is actually input_size + number_subpolicies +1
self.input_size = input_size+number_subpolicies+1
self.offset_for_z = input_size+1
self.hidden_size = hidden_size
self.number_subpolicies = number_subpolicies
self.output_size = number_subpolicies
self.num_layers = number_layers
self.b_exploration_bias = b_exploration_bias
self.batch_size = batch_size
# Define LSTM.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers).to(device)
# # Try initializing the network to something, so that we can escape the stupid constant output business.
for name, param in self.lstm.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.0)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=5)
# Transform to output space - Latent z and Latent b.
self.subpolicy_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.termination_output_layer = torch.nn.Linear(self.hidden_size,2)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
def forward(self, input):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_z_preprobabilities = self.subpolicy_output_layer(outputs)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
latent_z_probabilities = self.batch_softmax_layer(latent_z_preprobabilities).squeeze(1)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
latent_z_logprobabilities = self.batch_logsoftmax_layer(latent_z_preprobabilities).squeeze(1)
latent_b_logprobabilities = self.batch_logsoftmax_layer(latent_b_preprobabilities).squeeze(1)
# Return log probabilities.
return latent_z_logprobabilities, latent_b_logprobabilities, latent_b_probabilities, latent_z_probabilities
def get_actions(self, input, greedy=False):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_z_preprobabilities = self.subpolicy_output_layer(outputs)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
latent_z_probabilities = self.batch_softmax_layer(latent_z_preprobabilities).squeeze(1)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
if greedy==True:
selected_b = self.select_greedy_action(latent_b_probabilities)
selected_z = self.select_greedy_action(latent_z_probabilities)
else:
selected_b = self.sample_action(latent_b_probabilities)
selected_z = self.sample_action(latent_z_probabilities)
return selected_b, selected_z
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
class ContinuousLatentPolicyNetwork(PolicyNetwork_BaseClass):
# def __init__(self, input_size, hidden_size, z_dimensions, number_layers=4, b_exploration_bias=0., batch_size=1):
def __init__(self, input_size, hidden_size, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(ContinuousLatentPolicyNetwork, self).__init__()
self.args = args
# Input size is actually input_size + number_subpolicies +1
self.input_size = input_size+self.args.z_dimensions+1
self.offset_for_z = input_size+1
self.hidden_size = hidden_size
# self.number_subpolicies = number_subpolicies
self.output_size = self.args.z_dimensions
self.num_layers = number_layers
self.b_exploration_bias = self.args.b_exploration_bias
self.batch_size = self.args.batch_size
# Define LSTM.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers).to(device)
# Transform to output space - Latent z and Latent b.
# self.subpolicy_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.termination_output_layer = torch.nn.Linear(self.hidden_size,2)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
# # # Try initializing the network to something, so that we can escape the stupid constant output business.
for name, param in self.lstm.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.001)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=5)
# Also initializing mean_output_layer to something large...
for name, param in self.mean_output_layer.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=2)
def forward(self, input, epsilon=0.001):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_b_preprobabilities = self.termination_output_layer(outputs)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
latent_b_logprobabilities = self.batch_logsoftmax_layer(latent_b_preprobabilities).squeeze(1)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
if self.args.debug:
print("Embedding in Latent Policy.")
embed()
# Return log probabilities.
return latent_b_logprobabilities, latent_b_probabilities, self.dists
def get_actions(self, input, greedy=False, epsilon=0.001):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
if greedy==True:
selected_b = self.select_greedy_action(latent_b_probabilities)
selected_z = mean_outputs
else:
# selected_b = self.sample_action(latent_b_probabilities)
selected_b = self.select_greedy_action(latent_b_probabilities)
selected_z = self.dists.sample()
return selected_b, selected_z
def incremental_reparam_get_actions(self, input, greedy=False, action_epsilon=0.001, hidden=None, previous_z=None):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
outputs, hidden = self.lstm(format_input, hidden)
latent_b_preprobabilities = self.termination_output_layer(outputs)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Greedily select b.
selected_b = self.select_greedy_action(latent_b_probabilities)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + action_epsilon
noise = torch.randn_like(variance_outputs)
if greedy:
selected_z = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
selected_z = mean_outputs + variance_outputs * noise
# If single input and previous_Z is None, this is the first timestep. So set b to 1, and don't do anything to z.
if input.shape[0]==1 and previous_z is None:
selected_b[0] = 1
# If previous_Z is not None, this is not the first timestep, so don't do anything to z. If b is 0, use previous.
elif input.shape[0]==1 and previous_z is not None:
if selected_b==0:
selected_z = previous_z
elif input.shape[0]>1:
# Now modify z's as per New Z Selection.
# Set initial b to 1.
selected_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if selected_b[t]==0:
selected_z[t] = selected_z[t-1]
return selected_z, selected_b, hidden
def reparam_get_actions(self, input, greedy=False, action_epsilon=0.001, hidden=None):
# Wraps incremental
# MUST MODIFY INCREMENTAL ONE TO HANDLE NEW_Z_SELECTION (i.e. only choose new one if b is 1....)
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
class ContinuousLatentPolicyNetwork_ConstrainedBPrior(ContinuousLatentPolicyNetwork):
def __init__(self, input_size, hidden_size, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousLatentPolicyNetwork_ConstrainedBPrior, self).__init__(input_size, hidden_size, args, number_layers)
# We can inherit the forward function from the above class... we just need to modify get actions.
self.min_skill_time = 12
self.max_skill_time = 16
def get_prior_value(self, elapsed_t, max_limit=5):
skill_time_limit = max_limit-1
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# If allowing variable skill length, set length for this sample.
if self.args.var_skill_length:
# Choose length of 12-16 with certain probabilities.
lens = np.array([12,13,14,15,16])
# probabilities = np.array([0.1,0.2,0.4,0.2,0.1])
prob_biases = np.array([[0.8,0.],[0.4,0.],[0.,0.],[0.,0.4]])
max_limit = 16
skill_time_limit = 12
else:
max_limit = 20
skill_time_limit = max_limit-1
prior_value = torch.zeros((1,2)).to(device).float()
# If at or over hard limit.
if elapsed_t>=max_limit:
prior_value[0,1]=1.
# If at or more than typical, less than hard limit:
elif elapsed_t>=skill_time_limit:
if self.args.var_skill_length:
prior_value[0] = torch.tensor(prob_biases[elapsed_t-skill_time_limit]).to(device).float()
else:
# Random
prior_value[0,1]=0.
# If less than typical.
else:
# Continue.
prior_value[0,0]=1.
return prior_value
def get_actions(self, input, greedy=False, epsilon=0.001, delta_t=0):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
############################################
prior_value = self.get_prior_value(delta_t)
# Now... add prior value.
# Only need to do this to the last timestep... because the last sampled b is going to be copied into a different variable that is stored.
latent_b_preprobabilities[-1, :, :] += prior_value
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Sample b.
selected_b = self.select_greedy_action(latent_b_probabilities)
############################################
# Now implementing hard constrained b selection.
if delta_t < self.min_skill_time:
# Continue. Set b to 0.
selected_b[-1] = 0.
elif (self.min_skill_time <= delta_t) and (delta_t < self.max_skill_time):
pass
else:
# Stop and select a new z. Set b to 1.
selected_b[-1] = 1.
# Also get z... assume higher level funciton handles the new z selection component.
if greedy==True:
selected_z = mean_outputs
else:
selected_z = self.dists.sample()
return selected_b, selected_z
def incremental_reparam_get_actions(self, input, greedy=False, action_epsilon=0.001, hidden=None, previous_z=None, delta_t=0):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
outputs, hidden = self.lstm(format_input, hidden)
latent_b_preprobabilities = self.termination_output_layer(outputs)
############################################
# GET PRIOR AND ADD.
prior_value = self.get_prior_value(delta_t)
latent_b_preprobabilities[-1, :, :] += prior_value
############################################
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Greedily select b.
selected_b = self.select_greedy_action(latent_b_probabilities)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + action_epsilon
noise = torch.randn_like(variance_outputs)
if greedy:
selected_z = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
selected_z = mean_outputs + variance_outputs * noise
# If single input and previous_Z is None, this is the first timestep. So set b to 1, and don't do anything to z.
if input.shape[0]==1 and previous_z is None:
selected_b[0] = 1
# If previous_Z is not None, this is not the first timestep, so don't do anything to z. If b is 0, use previous.
elif input.shape[0]==1 and previous_z is not None:
if selected_b==0:
selected_z = previous_z
elif input.shape[0]>1:
# Now modify z's as per New Z Selection.
# Set initial b to 1.
selected_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if selected_b[t]==0:
selected_z[t] = selected_z[t-1]
return selected_z, selected_b, hidden
class VariationalPolicyNetwork(PolicyNetwork_BaseClass):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
# def __init__(self, input_size, hidden_size, number_subpolicies, number_layers=4, z_exploration_bias=0., b_exploration_bias=0., batch_size=1):
def __init__(self, input_size, hidden_size, number_subpolicies, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(VariationalPolicyNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.number_subpolicies = number_subpolicies
self.output_size = number_subpolicies
self.num_layers = number_layers
self.z_exploration_bias = self.args.z_exploration_bias
self.b_exploration_bias = self.args.b_exploration_bias
self.z_probability_factor = self.args.z_probability_factor
self.b_probability_factor = self.args.b_probability_factor
self.batch_size = self.args.batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Transform to output space - Latent z and Latent b.
# THIS OUTPUT LAYER TAKES 2*HIDDEN SIZE as input because it's bidirectional.
self.subpolicy_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.termination_output_layer = torch.nn.Linear(2*self.hidden_size,2)
# Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
def sample_latent_variables(self, subpolicy_outputs, termination_output_layer):
# Run sampling layers.
sample_z = self.sample_action(subpolicy_outputs)
sample_b = self.sample_action(termination_output_layer)
return sample_z, sample_b
def sample_latent_variables_epsilon_greedy(self, subpolicy_outputs, termination_output_layer, epsilon):
sample_z = self.select_epsilon_greedy_action(subpolicy_outputs, epsilon)
sample_b = self.select_epsilon_greedy_action(termination_output_layer, epsilon)
return sample_z, sample_b
def forward(self, input, epsilon, new_z_selection=True):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_z_preprobabilities = self.subpolicy_output_layer(outputs)*self.z_probability_factor + self.z_exploration_bias
# variational_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Add b continuation bias to the continuing option at every timestep.
variational_b_preprobabilities[:,0,0] += self.b_exploration_bias
variational_z_probabilities = self.batch_softmax_layer(variational_z_preprobabilities).squeeze(1)
variational_b_probabilities = self.batch_softmax_layer(variational_b_preprobabilities).squeeze(1)
variational_z_logprobabilities = self.batch_logsoftmax_layer(variational_z_preprobabilities).squeeze(1)
variational_b_logprobabilities = self.batch_logsoftmax_layer(variational_b_preprobabilities).squeeze(1)
# sampled_z_index, sampled_b = self.sample_latent_variables(variational_z_probabilities, variational_b_probabilities)
sampled_z_index, sampled_b = self.sample_latent_variables_epsilon_greedy(variational_z_probabilities, variational_b_probabilities, epsilon)
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# # Trying cheeky thing to see if we can learn in this setting.
# sampled_b[1:] = 0
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
return sampled_z_index, sampled_b, variational_b_logprobabilities,\
variational_z_logprobabilities, variational_b_probabilities, variational_z_probabilities, None
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
# Sampling can handle batched action_probabilities.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample()
return sample_action
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
def select_epsilon_greedy_action(self, action_probabilities, epsilon=0.1):
epsilon = epsilon
# if np.random.random()<epsilon:
# # return(np.random.randint(0,high=len(action_probabilities)))
# return self.sample_action(action_probabilities)
# else:
# return self.select_greedy_action(action_probabilities)
# Issue with the current implementation is that it selects either sampling or greedy selection identically across the entire batch.
# This is stupid, use a toch.where instead?
# Sample an array of binary variables of size = batch size.
# For each, use greedy or ...
whether_greedy = torch.rand(action_probabilities.shape[0]).to(device)
sample_actions = torch.where(whether_greedy<epsilon, self.sample_action(action_probabilities), self.select_greedy_action(action_probabilities))
return sample_actions
def sample_termination(self, termination_probability):
sample_terminal = torch.distributions.Bernoulli(termination_probability).sample().squeeze(0)
return sample_terminal
class ContinuousVariationalPolicyNetwork(PolicyNetwork_BaseClass):
# def __init__(self, input_size, hidden_size, z_dimensions, number_layers=4, z_exploration_bias=0., b_exploration_bias=0., batch_size=1):
def __init__(self, input_size, hidden_size, z_dimensions, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(ContinuousVariationalPolicyNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = z_dimensions
self.num_layers = number_layers
self.z_exploration_bias = self.args.z_exploration_bias
self.b_exploration_bias = self.args.b_exploration_bias
self.z_probability_factor = self.args.z_probability_factor
self.b_probability_factor = self.args.b_probability_factor
self.batch_size = self.args.batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Transform to output space - Latent z and Latent b.
# THIS OUTPUT LAYER TAKES 2*HIDDEN SIZE as input because it's bidirectional.
self.termination_output_layer = torch.nn.Linear(2*self.hidden_size,2)
# Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=-1)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=-1)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
def forward(self, input, epsilon, new_z_selection=True, var_epsilon=0.001):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Add b continuation bias to the continuing option at every timestep.
variational_b_preprobabilities[:,0,0] += self.b_exploration_bias
variational_b_probabilities = self.batch_softmax_layer(variational_b_preprobabilities).squeeze(1)
variational_b_logprobabilities = self.batch_logsoftmax_layer(variational_b_preprobabilities).squeeze(1)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# Still need a softplus activation for variance because needs to be positive.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + var_epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
sampled_b = self.select_epsilon_greedy_action(variational_b_probabilities, epsilon)
if epsilon==0.:
sampled_z_index = mean_outputs.squeeze(1)
else:
# Whether to use reparametrization trick to retrieve the latent_z's.
if self.args.reparam:
if self.args.train:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
sampled_z_index = mean_outputs + variance_outputs*noise
# Ought to be able to pass gradients through this latent_z now.
sampled_z_index = sampled_z_index.squeeze(1)
# If evaluating, greedily get action.
else:
sampled_z_index = mean_outputs.squeeze(1)
else:
sampled_z_index = self.dists.sample().squeeze(1)
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
# Also compute logprobabilities of the latent_z's sampled from this net.
variational_z_logprobabilities = self.dists.log_prob(sampled_z_index.unsqueeze(1))
variational_z_probabilities = None
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(self.dists, standard_distribution)
# Prior loglikelihood
prior_loglikelihood = standard_distribution.log_prob(sampled_z_index)
# if self.args.debug:
# print("#################################")
# print("Embedding in Variational Network.")
# embed()
return sampled_z_index, sampled_b, variational_b_logprobabilities,\
variational_z_logprobabilities, variational_b_probabilities, variational_z_probabilities, kl_divergence, prior_loglikelihood
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
# Sampling can handle batched action_probabilities.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample()
return sample_action
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
def select_epsilon_greedy_action(self, action_probabilities, epsilon=0.1):
epsilon = epsilon
# if np.random.random()<epsilon:
# # return(np.random.randint(0,high=len(action_probabilities)))
# return self.sample_action(action_probabilities)
# else:
# return self.select_greedy_action(action_probabilities)
# Issue with the current implementation is that it selects either sampling or greedy selection identically across the entire batch.
# This is stupid, use a toch.where instead?
# Sample an array of binary variables of size = batch size.
# For each, use greedy or ...
whether_greedy = torch.rand(action_probabilities.shape[0]).to(device)
sample_actions = torch.where(whether_greedy<epsilon, self.sample_action(action_probabilities), self.select_greedy_action(action_probabilities))
return sample_actions
class ContinuousVariationalPolicyNetwork_BPrior(ContinuousVariationalPolicyNetwork):
def __init__(self, input_size, hidden_size, z_dimensions, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousVariationalPolicyNetwork_BPrior, self).__init__(input_size, hidden_size, z_dimensions, args, number_layers)
def get_prior_value(self, elapsed_t, max_limit=5):
skill_time_limit = max_limit-1
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# If allowing variable skill length, set length for this sample.
if self.args.var_skill_length:
# Choose length of 12-16 with certain probabilities.
lens = np.array([12,13,14,15,16])
# probabilities = np.array([0.1,0.2,0.4,0.2,0.1])
prob_biases = np.array([[0.8,0.],[0.4,0.],[0.,0.],[0.,0.4]])
max_limit = 16
skill_time_limit = 12
else:
max_limit = 20
skill_time_limit = max_limit-1
prior_value = torch.zeros((1,2)).to(device).float()
# If at or over hard limit.
if elapsed_t>=max_limit:
prior_value[0,1]=1.
# If at or more than typical, less than hard limit:
elif elapsed_t>=skill_time_limit:
if self.args.var_skill_length:
prior_value[0] = torch.tensor(prob_biases[elapsed_t-skill_time_limit]).to(device).float()
else:
# Random
prior_value[0,1]=0.
# If less than typical.
else:
# Continue.
prior_value[0,0]=1.
return prior_value
def forward(self, input, epsilon, new_z_selection=True):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# Still need a softplus activation for variance because needs to be positive.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
prev_time = 0
# Create variables for prior and probs.
prior_values = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_probabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_logprobabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
sampled_b = torch.zeros(input.shape[0]).to(device).int()
sampled_b[0] = 1
for t in range(1,input.shape[0]):
# Compute prior value.
delta_t = t-prev_time
# if self.args.debug:
# print("##########################")
# print("Time: ",t, " Prev Time:",prev_time, " Delta T:",delta_t)
prior_values[t] = self.get_prior_value(delta_t, max_limit=self.args.skill_length)
# Construct probabilities.
variational_b_probabilities[t,0,:] = self.batch_softmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
variational_b_logprobabilities[t,0,:] = self.batch_logsoftmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
sampled_b[t] = self.select_epsilon_greedy_action(variational_b_probabilities[t:t+1], epsilon)
if sampled_b[t]==1:
prev_time = t
# if self.args.debug:
# print("Sampled b:",sampled_b[t])
if epsilon==0.:
sampled_z_index = mean_outputs.squeeze(1)
else:
# Whether to use reparametrization trick to retrieve the latent_z's.
if self.args.reparam:
if self.args.train:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
sampled_z_index = mean_outputs + variance_outputs*noise
# Ought to be able to pass gradients through this latent_z now.
sampled_z_index = sampled_z_index.squeeze(1)
# If evaluating, greedily get action.
else:
sampled_z_index = mean_outputs.squeeze(1)
else:
sampled_z_index = self.dists.sample().squeeze(1)
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
# Also compute logprobabilities of the latent_z's sampled from this net.
variational_z_logprobabilities = self.dists.log_prob(sampled_z_index.unsqueeze(1))
variational_z_probabilities = None
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(self.dists, standard_distribution)
# Prior loglikelihood
prior_loglikelihood = standard_distribution.log_prob(sampled_z_index)
if self.args.debug:
print("#################################")
print("Embedding in Variational Network.")
embed()
return sampled_z_index, sampled_b, variational_b_logprobabilities.squeeze(1), \
variational_z_logprobabilities, variational_b_probabilities.squeeze(1), variational_z_probabilities, kl_divergence, prior_loglikelihood
class ContinuousVariationalPolicyNetwork_ConstrainedBPrior(ContinuousVariationalPolicyNetwork_BPrior):
def __init__(self, input_size, hidden_size, z_dimensions, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousVariationalPolicyNetwork_ConstrainedBPrior, self).__init__(input_size, hidden_size, z_dimensions, args, number_layers)
self.min_skill_time = 12
self.max_skill_time = 16
def forward(self, input, epsilon, new_z_selection=True):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# Still need a softplus activation for variance because needs to be positive.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
# Create variables for prior and probabilities.
prior_values = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_probabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_logprobabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
#######################################
################ Set B ################
#######################################
# Set the first b to 1, and the time b was == 1.
sampled_b = torch.zeros(input.shape[0]).to(device).int()
sampled_b[0] = 1
prev_time = 0
for t in range(1,input.shape[0]):
# Compute time since the last b occurred.
delta_t = t-prev_time
# Compute prior value.
prior_values[t] = self.get_prior_value(delta_t, max_limit=self.args.skill_length)
# Construct probabilities.
variational_b_probabilities[t,0,:] = self.batch_softmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
variational_b_logprobabilities[t,0,:] = self.batch_logsoftmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
# Now Implement Hard Restriction on Selection of B's.
if delta_t < self.min_skill_time:
# Set B to 0. I.e. Continue.
# variational_b_probabilities[t,0,:] = variational_b_probabilities[t,0,:]*0
# variational_b_probabilities[t,0,0] += 1
sampled_b[t] = 0.
elif (self.min_skill_time <= delta_t) and (delta_t < self.max_skill_time):
# Sample b.
sampled_b[t] = self.select_epsilon_greedy_action(variational_b_probabilities[t:t+1], epsilon)
elif self.max_skill_time <= delta_t:
# Set B to 1. I.e. select new z.
sampled_b[t] = 1.
# If b is 1, set the previous time to now.
if sampled_b[t]==1:
prev_time = t
#######################################
################ Set Z ################
#######################################
# Now set the z's. If greedy, just return the means.
if epsilon==0.:
sampled_z_index = mean_outputs.squeeze(1)
# If not greedy, then reparameterize.
else:
# Whether to use reparametrization trick to retrieve the latent_z's.
if self.args.train:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
sampled_z_index = mean_outputs + variance_outputs*noise
# Ought to be able to pass gradients through this latent_z now.
sampled_z_index = sampled_z_index.squeeze(1)
# If evaluating, greedily get action.
else:
sampled_z_index = mean_outputs.squeeze(1)
# Modify z's based on whether b was 1 or 0. This part should remain the same.
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
# Also compute logprobabilities of the latent_z's sampled from this net.
variational_z_logprobabilities = self.dists.log_prob(sampled_z_index.unsqueeze(1))
variational_z_probabilities = None
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(self.dists, standard_distribution)
# Prior loglikelihood
prior_loglikelihood = standard_distribution.log_prob(sampled_z_index)
if self.args.debug:
print("#################################")
print("Embedding in Variational Network.")
embed()
return sampled_z_index, sampled_b, variational_b_logprobabilities.squeeze(1), \
variational_z_logprobabilities, variational_b_probabilities.squeeze(1), variational_z_probabilities, kl_divergence, prior_loglikelihood
class EncoderNetwork(PolicyNetwork_BaseClass):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, output_size, number_subpolicies=4, batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(EncoderNetwork, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.number_subpolicies = number_subpolicies
self.num_layers = 5
self.batch_size = batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Define output layers for the LSTM, and activations for this output layer.
# Because it's bidrectional, once we compute <outputs, hidden = self.lstm(input)>, we must concatenate:
# From reverse LSTM: <outputs[0,:,hidden_size:]> and from the forward LSTM: <outputs[-1,:,:hidden_size]>.
# (Refer - https://towardsdatascience.com/understanding-bidirectional-rnn-in-pytorch-5bd25a5dd66 )
# Because of this, the output layer must take in size 2*hidden.
self.hidden_layer = torch.nn.Linear(2*self.hidden_size, 2*self.hidden_size)
self.output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
def forward(self, input, epsilon):
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input)
concatenated_outputs = torch.cat([outputs[0,:,self.hidden_size:],outputs[-1,:,:self.hidden_size]],dim=-1).view((1,1,-1))
# Calculate preprobs.
preprobabilities = self.output_layer(self.hidden_layer(concatenated_outputs))
probabilities = self.batch_softmax_layer(preprobabilities)
logprobabilities = self.batch_logsoftmax_layer(preprobabilities)
latent_z = self.select_epsilon_greedy_action(probabilities, epsilon=epsilon)
# Return latentz_encoding as output layer of last outputs.
return latent_z, logprobabilities, None, None
class ContinuousEncoderNetwork(PolicyNetwork_BaseClass):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, output_size, args, batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousEncoderNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 5
self.batch_size = batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Define output layers for the LSTM, and activations for this output layer.
# # Because it's bidrectional, once we compute <outputs, hidden = self.lstm(input)>, we must concatenate:
# # From reverse LSTM: <outputs[0,:,hidden_size:]> and from the forward LSTM: <outputs[-1,:,:hidden_size]>.
# # (Refer - https://towardsdatascience.com/understanding-bidirectional-rnn-in-pytorch-5bd25a5dd66 )
# # Because of this, the output layer must take in size 2*hidden.
# self.hidden_layer = torch.nn.Linear(2*self.hidden_size, self.hidden_size)
# self.output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
def forward(self, input, epsilon=0.001, z_sample_to_evaluate=None):
# This epsilon passed as an argument is just so that the signature of this function is the same as what's provided to the discrete encoder network.
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input)
concatenated_outputs = torch.cat([outputs[0,:,self.hidden_size:],outputs[-1,:,:self.hidden_size]],dim=-1).view((1,1,-1))
# Predict Gaussian means and variances.
# if self.args.mean_nonlinearity:
# mean_outputs = self.activation_layer(self.mean_output_layer(concatenated_outputs))
# else:
mean_outputs = self.mean_output_layer(concatenated_outputs)
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(concatenated_outputs))+self.variance_activation_bias) + epsilon
dist = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
# Whether to use reparametrization trick to retrieve the
if self.args.reparam:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
latent_z = mean_outputs + variance_outputs * noise
# Ought to be able to pass gradients through this latent_z now.
else:
# Retrieve sample from the distribution as the value of the latent variable.
latent_z = dist.sample()
# calculate entropy for training.
entropy = dist.entropy()
# Also retrieve log probability of the same.
logprobability = dist.log_prob(latent_z)
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(dist, standard_distribution)
if self.args.debug:
print("###############################")
print("Embedding in Encoder Network.")
embed()
if z_sample_to_evaluate is None:
return latent_z, logprobability, entropy, kl_divergence
else:
logprobability = dist.log_prob(z_sample_to_evaluate)
return logprobability
class CriticNetwork(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(CriticNetwork, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.number_layers = number_layers
self.batch_size = 1
# Create LSTM Network.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.number_layers)
self.output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
def forward(self, input):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
lstm_outputs, hidden = self.lstm(format_input)
# Predict critic value for each timestep.
critic_value = self.output_layer(lstm_outputs)
return critic_value
class ContinuousMLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(ContinuousMLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.input_layer = torch.nn.Linear(self.input_size, self.hidden_size)
self.hidden_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.relu_activation = torch.nn.ReLU()
self.variance_activation_layer = torch.nn.Softplus()
def forward(self, input, greedy=False, action_epsilon=0.0001):
# Assumes input is Batch_Size x Input_Size.
h1 = self.relu_activation(self.input_layer(input))
h2 = self.relu_activation(self.hidden_layer(h1))
h3 = self.relu_activation(self.hidden_layer(h2))
h4 = self.relu_activation(self.hidden_layer(h3))
mean_outputs = self.output_layer(h4)
variance_outputs = self.variance_activation_layer(self.output_layer(h4))
noise = torch.randn_like(variance_outputs)
if greedy:
action = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
action = mean_outputs + variance_outputs * noise
return action
def reparameterized_get_actions(self, input, greedy=False, action_epsilon=0.0001):
return self.forward(input, greedy, action_epsilon)
class CriticMLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(CriticMLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.batch_size = 1
self.input_layer = torch.nn.Linear(self.input_size, self.hidden_size)
self.hidden_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.relu_activation = torch.nn.ReLU()
def forward(self, input):
# Assumes input is Batch_Size x Input_Size.
h1 = self.relu_activation(self.input_layer(input))
h2 = self.relu_activation(self.hidden_layer(h1))
h3 = self.relu_activation(self.hidden_layer(h2))
h4 = self.relu_activation(self.hidden_layer(h3))
# Predict critic value for each timestep.
critic_value = self.output_layer(h4)
return critic_value
class DiscreteMLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(DiscreteMLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.input_layer = torch.nn.Linear(self.input_size, self.hidden_size)
self.hidden_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.relu_activation = torch.nn.ReLU()
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
self.batch_softmax_layer = torch.nn.Softmax(dim=2 )
def forward(self, input):
# Assumes input is Batch_Size x Input_Size.
h1 = self.relu_activation(self.input_layer(input))
h2 = self.relu_activation(self.hidden_layer(h1))
h3 = self.relu_activation(self.hidden_layer(h2))
h4 = self.relu_activation(self.hidden_layer(h3))
# Compute preprobability with output layer.
preprobability_outputs = self.output_layer(h4)
# Compute probabilities and logprobabilities.
log_probabilities = self.batch_logsoftmax_layer(preprobability_outputs)
probabilities = self.batch_softmax_layer(preprobability_outputs)
return log_probabilities, probabilities
| CausalSkillLearning-main | Experiments/PolicyNetworks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import mocap_processing, glob, numpy as np, os
from mocap_processing.motion.pfnn import Animation, BVH
from mocap_processing.motion.pfnn import Animation, BVH
from IPython import embed
# Define function that loads global and local posiitons, and the rotations from a datafile.
def load_animation_data(bvh_filename):
animation, joint_names, time_per_frame = BVH.load(bvh_filename)
global_positions = Animation.positions_global(animation)
# return global_positions, joint_parents, time_per_frame
return global_positions, animation.positions, animation.rotations, animation
# Set directory.
directory = "/checkpoint/dgopinath/amass/CMU"
save_directory = "/checkpoint/tanmayshankar/Mocap"
# Get file list.
filelist = glob.glob(os.path.join(directory,"*/*.bvh"))
demo_list = []
print("Starting to preprocess data.")
for i in range(len(filelist)):
print("Processing file number: ",i, " of ",len(filelist))
# Get filename.
filename = os.path.join(directory, filelist[i])
# Actually load file.
global_positions, local_positions, local_rotations, animation = load_animation_data(filename)
# Create data element object.
data_element = {}
data_element['global_positions'] = global_positions
data_element['local_positions'] = local_positions
# Get quaternion as array.
data_element['local_rotations'] = local_rotations.qs
data_element['animation'] = animation
demo_list.append(data_element)
demo_array = np.array(demo_list)
np.save(os.path.join(save_directory,"Demo_Array.npy"),demo_array) | CausalSkillLearning-main | Experiments/Processing_MocapData.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class GridWorldDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
# FOR NOW: USE ONLY till 3200 images.
return 3200
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = cv2.imread(os.path.join(self.dataset_directory,"Image{0}.png".format(index)))
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Image{0}_Traj1.npy".format(index))).astype(float)
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence
class SmallMapsDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
return 4000
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = np.load(os.path.join(self.dataset_directory,"Map{0}.npy".format(index)))
time_limit = 20
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Map{0}_Traj1.npy".format(index))).astype(float)[:time_limit]
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence
class ToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_actions.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_actions.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
class ContinuousToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_continuous.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_continuous.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_array_continuous.npy")
self.b_path = os.path.join(self.dataset_directory,"B_array_continuous.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class ContinuousDirectedToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_directed_continuous.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_directed_continuous.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_array_directed_continuous.npy")
self.b_path = os.path.join(self.dataset_directory,"B_array_directed_continuous.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class ContinuousNonZeroToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_continuous_nonzero.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_continuous_nonzero.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_array_continuous_nonzero.npy")
self.b_path = os.path.join(self.dataset_directory,"B_array_continuous_nonzero.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class ContinuousDirectedNonZeroToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_dir_cont_nonzero.npy")
self.a_path = os.path.join(self.dataset_directory,"A_dir_cont_nonzero.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_dir_cont_nonzero.npy")
self.b_path = os.path.join(self.dataset_directory,"B_dir_cont_nonzero.npy")
self.g_path = os.path.join(self.dataset_directory,"G_dir_cont_nonzero.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class GoalDirectedDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_goal_directed.npy")
self.a_path = os.path.join(self.dataset_directory,"A_goal_directed.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_goal_directed.npy")
self.b_path = os.path.join(self.dataset_directory,"B_goal_directed.npy")
self.g_path = os.path.join(self.dataset_directory,"G_goal_directed.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
def get_goal(self, index):
return self.G_array[index]
class DeterministicGoalDirectedDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_deter_goal_directed.npy")
self.a_path = os.path.join(self.dataset_directory,"A_deter_goal_directed.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_deter_goal_directed.npy")
self.b_path = os.path.join(self.dataset_directory,"B_deter_goal_directed.npy")
self.g_path = os.path.join(self.dataset_directory,"G_deter_goal_directed.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
self.goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
def get_goal(self, index):
return self.G_array[index]
def get_goal_position(self, index):
return self.goal_states[self.G_array[index]]
class SeparableDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_separable.npy")
self.a_path = os.path.join(self.dataset_directory,"A_separable.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_separable.npy")
self.b_path = os.path.join(self.dataset_directory,"B_separable.npy")
self.g_path = os.path.join(self.dataset_directory,"G_separable.npy")
self.s_path = os.path.join(self.dataset_directory,"StartConfig_separable.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
self.S_array = np.load(self.s_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
def get_goal(self, index):
return self.G_array[index]
def get_startconfig(self, index):
return self.S_array[index] | CausalSkillLearning-main | Experiments/DataLoaders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
from PolicyNetworks import *
from Visualizers import BaxterVisualizer, SawyerVisualizer, ToyDataVisualizer #, MocapVisualizer
import TFLogger, DMP, RLUtils
# Check if CUDA is available, set device to GPU if it is, otherwise use CPU.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class PolicyManager_BaseClass():
def __init__(self):
super(PolicyManager_BaseClass, self).__init__()
def setup(self):
# Fixing seeds.
np.random.seed(seed=0)
torch.manual_seed(0)
np.set_printoptions(suppress=True,precision=2)
self.create_networks()
self.create_training_ops()
# self.create_util_ops()
# self.initialize_gt_subpolicies()
if self.args.setting=='imitation':
extent = self.dataset.get_number_task_demos(self.demo_task_index)
if (self.args.setting=='transfer' and isinstance(self, PolicyManager_Transfer)) or \
(self.args.setting=='cycle_transfer' and isinstance(self, PolicyManager_CycleConsistencyTransfer)):
extent = self.extent
else:
extent = len(self.dataset)-self.test_set_size
self.index_list = np.arange(0,extent)
self.initialize_plots()
def initialize_plots(self):
if self.args.name is not None:
logdir = os.path.join(self.args.logdir, self.args.name)
if not(os.path.isdir(logdir)):
os.mkdir(logdir)
logdir = os.path.join(logdir, "logs")
if not(os.path.isdir(logdir)):
os.mkdir(logdir)
# Create TF Logger.
self.tf_logger = TFLogger.Logger(logdir)
else:
self.tf_logger = TFLogger.Logger()
if self.args.data=='MIME':
self.visualizer = BaxterVisualizer()
# self.state_dim = 16
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
self.visualizer = SawyerVisualizer()
# self.state_dim = 8
elif self.args.data=='Mocap':
self.visualizer = MocapVisualizer(args=self.args)
else:
self.visualizer = ToyDataVisualizer()
self.rollout_gif_list = []
self.gt_gif_list = []
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
def write_and_close(self):
self.writer.export_scalars_to_json("./all_scalars.json")
self.writer.close()
def collect_inputs(self, i, get_latents=False):
if self.args.data=='DeterGoal':
sample_traj, sample_action_seq = self.dataset[i]
latent_b_seq, latent_z_seq = self.dataset.get_latent_variables(i)
start = 0
if self.args.traj_length>0:
sample_action_seq = sample_action_seq[start:self.args.traj_length-1]
latent_b_seq = latent_b_seq[start:self.args.traj_length-1]
latent_z_seq = latent_z_seq[start:self.args.traj_length-1]
sample_traj = sample_traj[start:self.args.traj_length]
else:
# Traj length is going to be -1 here.
# Don't need to modify action sequence because it does have to be one step less than traj_length anyway.
sample_action_seq = sample_action_seq[start:]
sample_traj = sample_traj[start:]
latent_b_seq = latent_b_seq[start:]
latent_z_seq = latent_z_seq[start:]
# The trajectory is going to be one step longer than the action sequence, because action sequences are constructed from state differences. Instead, truncate trajectory to length of action sequence.
# Now manage concatenated trajectory differently - {{s0,_},{s1,a0},{s2,a1},...,{sn,an-1}}.
concatenated_traj = self.concat_state_action(sample_traj, sample_action_seq)
old_concatenated_traj = self.old_concat_state_action(sample_traj, sample_action_seq)
if self.args.data=='DeterGoal':
self.conditional_information = np.zeros((self.args.condition_size))
self.conditional_information[self.dataset.get_goal(i)] = 1
self.conditional_information[4:] = self.dataset.get_goal_position[i]
else:
self.conditional_information = np.zeros((self.args.condition_size))
if get_latents:
return sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj, latent_b_seq, latent_z_seq
else:
return sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj
elif self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# If we're imitating... select demonstrations from the particular task.
if self.args.setting=='imitation' and self.args.data=='Roboturk':
data_element = self.dataset.get_task_demo(self.demo_task_index, i)
else:
data_element = self.dataset[i]
if not(data_element['is_valid']):
return None, None, None, None
trajectory = data_element['demo']
# If normalization is set to some value.
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
trajectory = (trajectory-self.norm_sub_value)/self.norm_denom_value
action_sequence = np.diff(trajectory,axis=0)
self.current_traj_len = len(trajectory)
if self.args.data=='MIME':
self.conditional_information = np.zeros((self.conditional_info_size))
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
robot_states = data_element['robot-state']
object_states = data_element['object-state']
self.conditional_information = np.zeros((self.conditional_info_size))
# Don't set this if pretraining / baseline.
if self.args.setting=='learntsub' or self.args.setting=='imitation':
self.conditional_information = np.zeros((len(trajectory),self.conditional_info_size))
self.conditional_information[:,:self.cond_robot_state_size] = robot_states
# Doing this instead of self.cond_robot_state_size: because the object_states size varies across demonstrations.
self.conditional_information[:,self.cond_robot_state_size:self.cond_robot_state_size+object_states.shape[-1]] = object_states
# Setting task ID too.
self.conditional_information[:,-self.number_tasks+data_element['task-id']] = 1.
# Concatenate
concatenated_traj = self.concat_state_action(trajectory, action_sequence)
old_concatenated_traj = self.old_concat_state_action(trajectory, action_sequence)
if self.args.setting=='imitation':
action_sequence = RLUtils.resample(data_element['demonstrated_actions'],len(trajectory))
concatenated_traj = np.concatenate([trajectory, action_sequence],axis=1)
return trajectory, action_sequence, concatenated_traj, old_concatenated_traj
def train(self, model=None):
if model:
print("Loading model in training.")
self.load_all_models(model)
counter = self.args.initial_counter_value
# For number of training epochs.
for e in range(self.number_epochs):
self.current_epoch_running = e
print("Starting Epoch: ",e)
if e%self.args.save_freq==0:
self.save_all_models("epoch{0}".format(e))
np.random.shuffle(self.index_list)
if self.args.debug:
print("Embedding in Outer Train Function.")
embed()
# For every item in the epoch:
if self.args.setting=='imitation':
extent = self.dataset.get_number_task_demos(self.demo_task_index)
if self.args.setting=='transfer' or self.args.setting=='cycle_transfer':
extent = self.extent
else:
extent = len(self.dataset)-self.test_set_size
for i in range(extent):
print("Epoch: ",e," Trajectory:",i, "Datapoint: ", self.index_list[i])
self.run_iteration(counter, self.index_list[i])
counter = counter+1
if e%self.args.eval_freq==0:
self.automatic_evaluation(e)
self.write_and_close()
def automatic_evaluation(self, e):
# Writing new automatic evaluation that parses arguments and creates an identical command loading the appropriate model.
# Note: If the initial command loads a model, ignore that.
command_args = self.args._get_kwargs()
base_command = 'python Master.py --train=0 --model={0}'.format("Experiment_Logs/{0}/saved_models/Model_epoch{1}".format(self.args.name, e))
if self.args.data=='Mocap':
base_command = './xvfb-run-safe ' + base_command
# For every argument in the command arguments, add it to the base command with the value used, unless it's train or model.
for ar in command_args:
# Skip model and train, because we need to set these manually.
if ar[0]=='model' or ar[0]=='train':
pass
# Add the rest
else:
base_command = base_command + ' --{0}={1}'.format(ar[0],ar[1])
cluster_command = 'python cluster_run.py --partition=learnfair --name={0}_Eval --cmd=\'{1}\''.format(self.args.name, base_command)
subprocess.call([cluster_command],shell=True)
def visualize_robot_data(self):
self.N = 100
self.rollout_timesteps = self.args.traj_length
if self.args.data=='MIME':
self.visualizer = BaxterVisualizer()
# self.state_dim = 16
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
self.visualizer = SawyerVisualizer()
# self.state_dim = 8
elif self.args.data=='Mocap':
self.visualizer = MocapVisualizer(args=self.args)
# Because there are just more invalid DP's in Mocap.
self.N = 100
else:
self.visualizer = ToyDataVisualizer()
self.latent_z_set = np.zeros((self.N,self.latent_z_dimensionality))
# These are lists because they're variable length individually.
self.indices = []
self.trajectory_set = []
self.trajectory_rollout_set = []
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.rollout_gif_list = []
self.gt_gif_list = []
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
self.max_len = 0
for i in range(self.N):
print("#########################################")
print("Getting visuals for trajectory: ",i)
latent_z, sample_traj, sample_action_seq = self.run_iteration(0, i, return_z=True)
if latent_z is not None:
self.indices.append(i)
if len(sample_traj)>self.max_len:
self.max_len = len(sample_traj)
self.latent_z_set[i] = copy.deepcopy(latent_z.detach().cpu().numpy())
trajectory_rollout = self.get_robot_visuals(i, latent_z, sample_traj)
# self.trajectory_set[i] = copy.deepcopy(sample_traj)
# self.trajectory_rollout_set[i] = copy.deepcopy(trajectory_rollout)
self.trajectory_set.append(copy.deepcopy(sample_traj))
self.trajectory_rollout_set.append(copy.deepcopy(trajectory_rollout))
# Get MIME embedding for rollout and GT trajectories, with same Z embedding.
embedded_z = self.get_robot_embedding()
gt_animation_object = self.visualize_robot_embedding(embedded_z, gt=True)
rollout_animation_object = self.visualize_robot_embedding(embedded_z, gt=False)
self.write_embedding_HTML(gt_animation_object,prefix="GT")
self.write_embedding_HTML(rollout_animation_object,prefix="Rollout")
# Save webpage.
self.write_results_HTML()
def rollout_robot_trajectory(self, trajectory_start, latent_z, rollout_length=None):
subpolicy_inputs = torch.zeros((1,2*self.state_dim+self.latent_z_dimensionality)).to(device).float()
subpolicy_inputs[0,:self.state_dim] = torch.tensor(trajectory_start).to(device).float()
subpolicy_inputs[:,2*self.state_dim:] = torch.tensor(latent_z).to(device).float()
if rollout_length is not None:
length = rollout_length-1
else:
length = self.rollout_timesteps-1
for t in range(length):
actions = self.policy_network.get_actions(subpolicy_inputs, greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
# Downscale the actions by action_scale_factor.
action_to_execute = action_to_execute/self.args.action_scale_factor
# Compute next state.
new_state = subpolicy_inputs[t,:self.state_dim]+action_to_execute
# New input row.
input_row = torch.zeros((1,2*self.state_dim+self.latent_z_dimensionality)).to(device).float()
input_row[0,:self.state_dim] = new_state
# Feed in the ORIGINAL prediction from the network as input. Not the downscaled thing.
input_row[0,self.state_dim:2*self.state_dim] = actions[-1].squeeze(1)
input_row[0,2*self.state_dim:] = latent_z
subpolicy_inputs = torch.cat([subpolicy_inputs,input_row],dim=0)
trajectory = subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy()
return trajectory
def get_robot_visuals(self, i, latent_z, trajectory, return_image=False):
# 1) Feed Z into policy, rollout trajectory.
trajectory_rollout = self.rollout_robot_trajectory(trajectory[0], latent_z, rollout_length=trajectory.shape[0])
# 2) Unnormalize data.
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
unnorm_gt_trajectory = (trajectory*self.norm_denom_value)+self.norm_sub_value
unnorm_pred_trajectory = (trajectory_rollout*self.norm_denom_value) + self.norm_sub_value
else:
unnorm_gt_trajectory = trajectory
unnorm_pred_trajectory = trajectory_rollout
if self.args.data=='Mocap':
# Get animation object from dataset.
animation_object = self.dataset[i]['animation']
# 3) Run unnormalized ground truth trajectory in visualizer.
ground_truth_gif = self.visualizer.visualize_joint_trajectory(unnorm_gt_trajectory, gif_path=self.dir_name, gif_name="Traj_{0}_GT.gif".format(i), return_and_save=True)
# 4) Run unnormalized rollout trajectory in visualizer.
rollout_gif = self.visualizer.visualize_joint_trajectory(unnorm_pred_trajectory, gif_path=self.dir_name, gif_name="Traj_{0}_Rollout.gif".format(i), return_and_save=True)
self.gt_gif_list.append(copy.deepcopy(ground_truth_gif))
self.rollout_gif_list.append(copy.deepcopy(rollout_gif))
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
if return_image:
return unnorm_pred_trajectory, ground_truth_gif, rollout_gif
else:
return unnorm_pred_trajectory
else:
if return_image:
return trajectory_rollout, ground_truth_gif, rollout_gif
else:
return trajectory_rollout
def write_results_HTML(self):
# Retrieve, append, and print images from datapoints across different models.
print("Writing HTML File.")
# Open Results HTML file.
with open(os.path.join(self.dir_name,'Results_{}.html'.format(self.args.name)),'w') as html_file:
# Start HTML doc.
html_file.write('<html>')
html_file.write('<body>')
html_file.write('<p> Model: {0}</p>'.format(self.args.name))
html_file.write('<p> Average Trajectory Distance: {0}</p>'.format(self.mean_distance))
for i in range(self.N):
if i%100==0:
print("Datapoint:",i)
html_file.write('<p> <b> Trajectory {} </b></p>'.format(i))
file_prefix = self.dir_name
# Create gif_list by prefixing base_gif_list with file prefix.
html_file.write('<div style="display: flex; justify-content: row;"> <img src="Traj_{0}_GT.gif"/> <img src="Traj_{0}_Rollout.gif"/> </div>'.format(i))
# Add gap space.
html_file.write('<p> </p>')
html_file.write('</body>')
html_file.write('</html>')
def write_embedding_HTML(self, animation_object, prefix=""):
print("Writing Embedding File.")
t1 = time.time()
# Open Results HTML file.
with open(os.path.join(self.dir_name,'Embedding_{0}_{1}.html'.format(prefix,self.args.name)),'w') as html_file:
# Start HTML doc.
html_file.write('<html>')
html_file.write('<body>')
html_file.write('<p> Model: {0}</p>'.format(self.args.name))
html_file.write(animation_object.to_html5_video())
# print(animation_object.to_html5_video(), file=html_file)
html_file.write('</body>')
html_file.write('</html>')
animation_object.save(os.path.join(self.dir_name,'{0}_Embedding_Video.mp4'.format(self.args.name)))
# animation_object.save(os.path.join(self.dir_name,'{0}_Embedding_Video.mp4'.format(self.args.name)), writer='imagemagick')
t2 = time.time()
print("Time taken to write this embedding: ",t2-t1)
def get_robot_embedding(self, return_tsne_object=False):
# Mean and variance normalize z.
mean = self.latent_z_set.mean(axis=0)
std = self.latent_z_set.std(axis=0)
normed_z = (self.latent_z_set-mean)/std
tsne = skl_manifold.TSNE(n_components=2,random_state=0,perplexity=self.args.perplexity)
embedded_zs = tsne.fit_transform(normed_z)
scale_factor = 1
scaled_embedded_zs = scale_factor*embedded_zs
if return_tsne_object:
return scaled_embedded_zs, tsne
else:
return scaled_embedded_zs
def visualize_robot_embedding(self, scaled_embedded_zs, gt=False):
# Create figure and axis objects.
matplotlib.rcParams['figure.figsize'] = [50, 50]
fig, ax = plt.subplots()
# number_samples = 400
number_samples = self.N
# Create a scatter plot of the embedding itself. The plot does not seem to work without this.
ax.scatter(scaled_embedded_zs[:number_samples,0],scaled_embedded_zs[:number_samples,1])
ax.axis('off')
ax.set_title("Embedding of Latent Representation of our Model",fontdict={'fontsize':40})
artists = []
# For number of samples in TSNE / Embedding, create a Image object for each of them.
for i in range(len(self.indices)):
if i%10==0:
print(i)
# Create offset image (so that we can place it where we choose), with specific zoom.
if gt:
imagebox = OffsetImage(self.gt_gif_list[i][0],zoom=0.4)
else:
imagebox = OffsetImage(self.rollout_gif_list[i][0],zoom=0.4)
# Create an annotation box to put the offset image into. specify offset image, position, and disable bounding frame.
ab = AnnotationBbox(imagebox, (scaled_embedded_zs[self.indices[i],0], scaled_embedded_zs[self.indices[i],1]), frameon=False)
# Add the annotation box artist to the list artists.
artists.append(ax.add_artist(ab))
def update(t):
# for i in range(number_samples):
for i in range(len(self.indices)):
if gt:
imagebox = OffsetImage(self.gt_gif_list[i][min(t, len(self.gt_gif_list[i])-1)],zoom=0.4)
else:
imagebox = OffsetImage(self.rollout_gif_list[i][min(t, len(self.rollout_gif_list[i])-1)],zoom=0.4)
ab = AnnotationBbox(imagebox, (scaled_embedded_zs[self.indices[i],0], scaled_embedded_zs[self.indices[i],1]), frameon=False)
artists.append(ax.add_artist(ab))
# update_len = 20
anim = FuncAnimation(fig, update, frames=np.arange(0, self.max_len), interval=200)
return anim
class PolicyManager_Pretrain(PolicyManager_BaseClass):
def __init__(self, number_policies=4, dataset=None, args=None):
if args.setting=='imitation':
super(PolicyManager_Pretrain, self).__init__(number_policies=number_policies, dataset=dataset, args=args)
else:
super(PolicyManager_Pretrain, self).__init__()
self.args = args
self.data = self.args.data
# Not used if discrete_z is false.
self.number_policies = number_policies
self.dataset = dataset
# Global input size: trajectory at every step - x,y,action
# Inputs is now states and actions.
# Model size parameters
# if self.args.data=='Continuous' or self.args.data=='ContinuousDir' or self.args.data=='ContinuousNonZero' or self.args.data=='ContinuousDirNZ' or self.args.data=='GoalDirected' or self.args.data=='Separable':
self.state_size = 2
self.state_dim = 2
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
# Number of actions
self.output_size = 2
self.latent_z_dimensionality = self.args.z_dimensions
self.number_layers = self.args.number_layers
self.traj_length = 5
self.number_epochs = self.args.epochs
self.test_set_size = 500
if self.args.data=='MIME':
self.state_size = 16
self.state_dim = 16
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.latent_z_dimensionality = self.args.z_dimensions
self.number_layers = self.args.number_layers
self.traj_length = self.args.traj_length
self.number_epochs = self.args.epochs
if self.args.normalization=='meanvar':
self.norm_sub_value = np.load("Statistics/MIME_Means.npy")
self.norm_denom_value = np.load("Statistics/MIME_Var.npy")
elif self.args.normalization=='minmax':
self.norm_sub_value = np.load("Statistics/MIME_Min.npy")
self.norm_denom_value = np.load("Statistics/MIME_Max.npy") - self.norm_sub_value
# Max of robot_state + object_state sizes across all Baxter environments.
self.cond_robot_state_size = 60
self.cond_object_state_size = 25
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
if self.args.gripper:
self.state_size = 8
self.state_dim = 8
else:
self.state_size = 7
self.state_dim = 7
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.number_layers = self.args.number_layers
self.traj_length = self.args.traj_length
if self.args.normalization=='meanvar':
self.norm_sub_value = np.load("Statistics/Roboturk_Mean.npy")
self.norm_denom_value = np.load("Statistics/Roboturk_Var.npy")
elif self.args.normalization=='minmax':
self.norm_sub_value = np.load("Statistics/Roboturk_Min.npy")
self.norm_denom_value = np.load("Statistics/Roboturk_Max.npy") - self.norm_sub_value
# Max of robot_state + object_state sizes across all sawyer environments.
# Robot size always 30. Max object state size is... 23.
self.cond_robot_state_size = 30
self.cond_object_state_size = 23
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size
elif self.args.data=='Mocap':
self.state_size = 22*3
self.state_dim = 22*3
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
self.conditional_info_size = 0
# Training parameters.
self.baseline_value = 0.
self.beta_decay = 0.9
self. learning_rate = self.args.learning_rate
self.initial_epsilon = self.args.epsilon_from
self.final_epsilon = self.args.epsilon_to
self.decay_epochs = self.args.epsilon_over
self.decay_counter = self.decay_epochs*len(self.dataset)
# Log-likelihood penalty.
self.lambda_likelihood_penalty = self.args.likelihood_penalty
self.baseline = None
# Per step decay.
self.decay_rate = (self.initial_epsilon-self.final_epsilon)/(self.decay_counter)
def create_networks(self):
# Create K Policy Networks.
# This policy network automatically manages input size.
if self.args.discrete_z:
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.number_policies, self.number_layers).to(device)
else:
# self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.latent_z_dimensionality, self.number_layers).to(device)
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
# Create encoder.
if self.args.discrete_z:
# The latent space is just one of 4 z's. So make output of encoder a one hot vector.
self.encoder_network = EncoderNetwork(self.input_size, self.hidden_size, self.number_policies).to(device)
else:
# self.encoder_network = ContinuousEncoderNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality).to(device)
# if self.args.transformer:
# self.encoder_network = TransformerEncoder(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args).to(device)
# else:
self.encoder_network = ContinuousEncoderNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args).to(device)
def create_training_ops(self):
# self.negative_log_likelihood_loss_function = torch.nn.NLLLoss()
self.negative_log_likelihood_loss_function = torch.nn.NLLLoss(reduction='none')
self.KLDivergence_loss_function = torch.nn.KLDivLoss(reduction='none')
# Only need one object of the NLL loss function for the latent net.
# These are loss functions. You still instantiate the loss function every time you evaluate it on some particular data.
# When you instantiate it, you call backward on that instantiation. That's why you know what loss to optimize when computing gradients.
if self.args.train_only_policy:
self.parameter_list = self.policy_network.parameters()
else:
self.parameter_list = list(self.policy_network.parameters()) + list(self.encoder_network.parameters())
self.optimizer = torch.optim.Adam(self.parameter_list,lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Encoder_Network'] = self.encoder_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, only_policy=False, just_subpolicy=False):
load_object = torch.load(path)
if self.args.train_only_policy and self.args.train:
self.encoder_network.load_state_dict(load_object['Encoder_Network'])
else:
self.policy_network.load_state_dict(load_object['Policy_Network'])
if not(only_policy):
self.encoder_network.load_state_dict(load_object['Encoder_Network'])
def set_epoch(self, counter):
if self.args.train:
if counter<self.decay_counter:
self.epsilon = self.initial_epsilon-self.decay_rate*counter
else:
self.epsilon = self.final_epsilon
else:
self.epsilon = self.final_epsilon
def visualize_trajectory(self, traj, no_axes=False):
fig = plt.figure()
ax = fig.gca()
ax.scatter(traj[:,0],traj[:,1],c=range(len(traj)),cmap='jet')
plt.xlim(-10,10)
plt.ylim(-10,10)
if no_axes:
plt.axis('off')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
image = np.transpose(image, axes=[2,0,1])
return image
def update_plots(self, counter, loglikelihood, sample_traj):
self.tf_logger.scalar_summary('Subpolicy Likelihood', loglikelihood.mean(), counter)
self.tf_logger.scalar_summary('Total Loss', self.total_loss.mean(), counter)
self.tf_logger.scalar_summary('Encoder KL', self.encoder_KL.mean(), counter)
if not(self.args.reparam):
self.tf_logger.scalar_summary('Baseline', self.baseline.sum(), counter)
self.tf_logger.scalar_summary('Encoder Loss', self.encoder_loss.sum(), counter)
self.tf_logger.scalar_summary('Reinforce Encoder Loss', self.reinforce_encoder_loss.sum(), counter)
self.tf_logger.scalar_summary('Total Encoder Loss', self.total_encoder_loss.sum() ,counter)
if self.args.entropy:
self.tf_logger.scalar_summary('SubPolicy Entropy', torch.mean(subpolicy_entropy), counter)
if counter%self.args.display_freq==0:
self.tf_logger.image_summary("GT Trajectory",self.visualize_trajectory(sample_traj), counter)
def assemble_inputs(self, input_trajectory, latent_z_indices, latent_b, sample_action_seq):
if self.args.discrete_z:
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies+1)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size+latent_z_indices[:-1].long()] = 1.
assembled_inputs[range(1,len(input_trajectory)),-1] = latent_b[:-1].float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size+latent_z_indices.long()] = 1.
# subpolicy_inputs[range(len(input_trajectory)),-1] = latent_b.float()
# # Concatenated action sqeuence for policy network.
padded_action_seq = np.concatenate([sample_action_seq,np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
else:
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality+1)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size:-1] = latent_z_indices[:-1]
assembled_inputs[range(1,len(input_trajectory)),-1] = latent_b[:-1].float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size:] = latent_z_indices
# subpolicy_inputs[range(len(input_trajectory)),-1] = latent_b.float()
# # Concatenated action sequence for policy network's forward / logprobabilities function.
# padded_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
padded_action_seq = np.concatenate([sample_action_seq,np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
def concat_state_action(self, sample_traj, sample_action_seq):
# Add blank to start of action sequence and then concatenate.
sample_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# Currently returns:
# s0, s1, s2, s3, ..., sn-1, sn
# _, a0, a1, a2, ..., an_1, an
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def old_concat_state_action(self, sample_traj, sample_action_seq):
sample_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def get_trajectory_segment(self, i):
if self.args.data=='Continuous' or self.args.data=='ContinuousDir' or self.args.data=='ContinuousNonZero' or self.args.data=='ContinuousDirNZ' or self.args.data=='GoalDirected' or self.args.data=='Separable':
# Sample trajectory segment from dataset.
sample_traj, sample_action_seq = self.dataset[i]
# Subsample trajectory segment.
start_timepoint = np.random.randint(0,self.args.traj_length-self.traj_length)
end_timepoint = start_timepoint + self.traj_length
# The trajectory is going to be one step longer than the action sequence, because action sequences are constructed from state differences. Instead, truncate trajectory to length of action sequence.
sample_traj = sample_traj[start_timepoint:end_timepoint]
sample_action_seq = sample_action_seq[start_timepoint:end_timepoint-1]
self.current_traj_len = self.traj_length
# Now manage concatenated trajectory differently - {{s0,_},{s1,a0},{s2,a1},...,{sn,an-1}}.
concatenated_traj = self.concat_state_action(sample_traj, sample_action_seq)
return concatenated_traj, sample_action_seq, sample_traj
elif self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
data_element = self.dataset[i]
# If Invalid.
if not(data_element['is_valid']):
return None, None, None
# if self.args.data=='MIME':
# # Sample a trajectory length that's valid.
# trajectory = np.concatenate([data_element['la_trajectory'],data_element['ra_trajectory'],data_element['left_gripper'].reshape((-1,1)),data_element['right_gripper'].reshape((-1,1))],axis=-1)
# elif self.args.data=='Roboturk':
# trajectory = data_element['demo']
if self.args.gripper:
trajectory = data_element['demo']
else:
trajectory = data_element['demo'][:,:-1]
# If allowing variable skill length, set length for this sample.
if self.args.var_skill_length:
# Choose length of 12-16 with certain probabilities.
self.current_traj_len = np.random.choice([12,13,14,15,16],p=[0.1,0.2,0.4,0.2,0.1])
else:
self.current_traj_len = self.traj_length
# Sample random start point.
if trajectory.shape[0]>self.current_traj_len:
bias_length = int(self.args.pretrain_bias_sampling*trajectory.shape[0])
# Probability with which to sample biased segment:
sample_biased_segment = np.random.binomial(1,p=self.args.pretrain_bias_sampling_prob)
# If we want to bias sampling of trajectory segments towards the middle of the trajectory, to increase proportion of trajectory segments
# that are performing motions apart from reaching and returning.
# Sample a biased segment if trajectory length is sufficient, and based on probability of sampling.
if ((trajectory.shape[0]-2*bias_length)>self.current_traj_len) and sample_biased_segment:
start_timepoint = np.random.randint(bias_length, trajectory.shape[0] - self.current_traj_len - bias_length)
else:
start_timepoint = np.random.randint(0,trajectory.shape[0]-self.current_traj_len)
end_timepoint = start_timepoint + self.current_traj_len
# Get trajectory segment and actions.
trajectory = trajectory[start_timepoint:end_timepoint]
# If normalization is set to some value.
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
trajectory = (trajectory-self.norm_sub_value)/self.norm_denom_value
# CONDITIONAL INFORMATION for the encoder...
if self.args.data=='MIME' or self.args.data=='Mocap':
pass
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
# robot_states = data_element['robot-state'][start_timepoint:end_timepoint]
# object_states = data_element['object-state'][start_timepoint:end_timepoint]
pass
# self.conditional_information = np.zeros((len(trajectory),self.conditional_info_size))
# self.conditional_information[:,:self.cond_robot_state_size] = robot_states
# self.conditional_information[:,self.cond_robot_state_size:object_states.shape[-1]] = object_states
# conditional_info = np.concatenate([robot_states,object_states],axis=1)
else:
return None, None, None
action_sequence = np.diff(trajectory,axis=0)
# Concatenate
concatenated_traj = self.concat_state_action(trajectory, action_sequence)
# NOW SCALE THIS ACTION SEQUENCE BY SOME FACTOR:
scaled_action_sequence = self.args.action_scale_factor*action_sequence
return concatenated_traj, scaled_action_sequence, trajectory
def construct_dummy_latents(self, latent_z):
if self.args.discrete_z:
latent_z_indices = latent_z.float()*torch.ones((self.traj_length)).to(device).float()
else:
# This construction should work irrespective of reparam or not.
latent_z_indices = torch.cat([latent_z.squeeze(0) for i in range(self.current_traj_len)],dim=0)
# Setting latent_b's to 00001.
# This is just a dummy value.
# latent_b = torch.ones((5)).to(device).float()
latent_b = torch.zeros((self.current_traj_len)).to(device).float()
# latent_b[-1] = 1.
return latent_z_indices, latent_b
# return latent_z_indices
def update_policies_reparam(self, loglikelihood, latent_z, encoder_KL):
self.optimizer.zero_grad()
# Losses computed as sums.
# self.likelihood_loss = -loglikelihood.sum()
# self.encoder_KL = encoder_KL.sum()
# Instead of summing losses, we should try taking the mean of the losses, so we can avoid running into issues of variable timesteps and stuff like that.
# We should also consider training with randomly sampled number of timesteps.
self.likelihood_loss = -loglikelihood.mean()
self.encoder_KL = encoder_KL.mean()
self.total_loss = (self.likelihood_loss + self.args.kl_weight*self.encoder_KL)
if self.args.debug:
print("Embedding in Update subpolicies.")
embed()
self.total_loss.backward()
self.optimizer.step()
def rollout_visuals(self, i, latent_z=None, return_traj=False):
# Initialize states and latent_z, etc.
# For t in range(number timesteps):
# # Retrieve action by feeding input to policy.
# # Step in environment with action.
# # Update inputs with new state and previously executed action.
self.state_dim = 2
self.rollout_timesteps = 5
start_state = torch.zeros((self.state_dim))
if self.args.discrete_z:
# Assuming 4 discrete subpolicies, just set subpolicy input to 1 at the latent_z index == i.
subpolicy_inputs = torch.zeros((1,self.input_size+self.number_policies)).to(device).float()
subpolicy_inputs[0,self.input_size+i] = 1.
else:
subpolicy_inputs = torch.zeros((1,self.input_size+self.latent_z_dimensionality)).to(device)
subpolicy_inputs[0,self.input_size:] = latent_z
subpolicy_inputs[0,:self.state_dim] = start_state
# subpolicy_inputs[0,-1] = 1.
for t in range(self.rollout_timesteps-1):
actions = self.policy_network.get_actions(subpolicy_inputs,greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
# Downscale the actions by action_scale_factor.
action_to_execute = action_to_execute/self.args.action_scale_factor
# Compute next state.
new_state = subpolicy_inputs[t,:self.state_dim]+action_to_execute
# New input row:
if self.args.discrete_z:
input_row = torch.zeros((1,self.input_size+self.number_policies)).to(device).float()
input_row[0,self.input_size+i] = 1.
else:
input_row = torch.zeros((1,self.input_size+self.latent_z_dimensionality)).to(device).float()
input_row[0,self.input_size:] = latent_z
input_row[0,:self.state_dim] = new_state
input_row[0,self.state_dim:2*self.state_dim] = action_to_execute
# input_row[0,-1] = 1.
subpolicy_inputs = torch.cat([subpolicy_inputs,input_row],dim=0)
# print("latent_z:",latent_z)
trajectory_rollout = subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy()
# print("Trajectory:",trajectory_rollout)
if return_traj:
return trajectory_rollout
def run_iteration(self, counter, i, return_z=False, and_train=True):
# Basic Training Algorithm:
# For E epochs:
# # For all trajectories:
# # Sample trajectory segment from dataset.
# # Encode trajectory segment into latent z.
# # Feed latent z and trajectory segment into policy network and evaluate likelihood.
# # Update parameters.
self.set_epoch(counter)
############# (0) #############
# Sample trajectory segment from dataset.
if self.args.traj_segments:
trajectory_segment, sample_action_seq, sample_traj = self.get_trajectory_segment(i)
else:
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
# Calling it trajectory segment, but it's not actually a trajectory segment here.
trajectory_segment = concatenated_traj
if trajectory_segment is not None:
############# (1) #############
torch_traj_seg = torch.tensor(trajectory_segment).to(device).float()
# Encode trajectory segment into latent z.
latent_z, encoder_loglikelihood, encoder_entropy, kl_divergence = self.encoder_network.forward(torch_traj_seg, self.epsilon)
########## (2) & (3) ##########
# Feed latent z and trajectory segment into policy network and evaluate likelihood.
latent_z_seq, latent_b = self.construct_dummy_latents(latent_z)
_, subpolicy_inputs, sample_action_seq = self.assemble_inputs(trajectory_segment, latent_z_seq, latent_b, sample_action_seq)
# Policy net doesn't use the decay epislon. (Because we never sample from it in training, only rollouts.)
loglikelihoods, _ = self.policy_network.forward(subpolicy_inputs, sample_action_seq)
loglikelihood = loglikelihoods[:-1].mean()
if self.args.debug:
print("Embedding in Train.")
embed()
############# (3) #############
# Update parameters.
if self.args.train and and_train:
# If we are regularizing:
# (1) Sample another z.
# (2) Construct inputs and such.
# (3) Compute distances, and feed to update_policies.
regularization_kl = None
z_distance = None
self.update_policies_reparam(loglikelihood, subpolicy_inputs, kl_divergence)
# Update Plots.
self.update_plots(counter, loglikelihood, trajectory_segment)
if return_z:
return latent_z, sample_traj, sample_action_seq
else:
if return_z:
return latent_z, sample_traj, sample_action_seq
else:
np.set_printoptions(suppress=True,precision=2)
print("###################", i)
print("Policy loglikelihood:", loglikelihood)
print("#########################################")
else:
return None, None, None
def evaluate_metrics(self):
self.distances = -np.ones((self.test_set_size))
# Get test set elements as last (self.test_set_size) number of elements of dataset.
for i in range(self.test_set_size):
index = i + len(self.dataset)-self.test_set_size
print("Evaluating ", i, " in test set, or ", index, " in dataset.")
# Get latent z.
latent_z, sample_traj, sample_action_seq = self.run_iteration(0, index, return_z=True)
if sample_traj is not None:
# Feed latent z to the rollout.
# rollout_trajectory = self.rollout_visuals(index, latent_z=latent_z, return_traj=True)
rollout_trajectory = self.rollout_robot_trajectory(sample_traj[0], latent_z, rollout_length=len(sample_traj))
self.distances[i] = ((sample_traj-rollout_trajectory)**2).mean()
self.mean_distance = self.distances[self.distances>0].mean()
def evaluate(self, model=None, suffix=None):
if model:
self.load_all_models(model)
np.set_printoptions(suppress=True,precision=2)
if self.args.data=='ContinuousNonZero':
self.visualize_embedding_space(suffix=suffix)
if self.args.data=="MIME" or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
print("Running Evaluation of State Distances on small test set.")
# self.evaluate_metrics()
# Only running viz if we're actually pretraining.
if self.args.traj_segments:
print("Running Visualization on Robot Data.")
self.visualize_robot_data()
else:
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
# np.save(os.path.join(self.dir_name,"Trajectory_Distances_{0}.npy".format(self.args.name)),self.distances)
# np.save(os.path.join(self.dir_name,"Mean_Trajectory_Distance_{0}.npy".format(self.args.name)),self.mean_distance)
# @profile
def get_trajectory_and_latent_sets(self):
# For N number of random trajectories from MIME:
# # Encode trajectory using encoder into latent_z.
# # Feed latent_z into subpolicy.
# # Rollout subpolicy for t timesteps.
# # Plot rollout.
# Embed plots.
# Set N:
self.N = 100
self.rollout_timesteps = 5
self.state_dim = 2
self.latent_z_set = np.zeros((self.N,self.latent_z_dimensionality))
self.trajectory_set = np.zeros((self.N, self.rollout_timesteps, self.state_dim))
# Use the dataset to get reasonable trajectories (because without the information bottleneck / KL between N(0,1), cannot just randomly sample.)
for i in range(self.N):
# (1) Encoder trajectory.
latent_z, _, _ = self.run_iteration(0, i, return_z=True, and_train=False)
# Copy z.
self.latent_z_set[i] = copy.deepcopy(latent_z.detach().cpu().numpy())
# (2) Now rollout policy.
self.trajectory_set[i] = self.rollout_visuals(i, latent_z=latent_z, return_traj=True)
# # (3) Plot trajectory.
# traj_image = self.visualize_trajectory(rollout_traj)
def visualize_embedding_space(self, suffix=None):
self.get_trajectory_and_latent_sets()
# TSNE on latentz's.
tsne = skl_manifold.TSNE(n_components=2,random_state=0)
embedded_zs = tsne.fit_transform(self.latent_z_set)
ratio = 0.3
for i in range(self.N):
plt.scatter(embedded_zs[i,0]+ratio*self.trajectory_set[i,:,0],embedded_zs[i,1]+ratio*self.trajectory_set[i,:,1],c=range(self.rollout_timesteps),cmap='jet')
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
if suffix is not None:
self.dir_name = os.path.join(self.dir_name, suffix)
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
# Format with name.
plt.savefig("{0}/Embedding_Joint_{1}.png".format(self.dir_name,self.args.name))
plt.close()
class PolicyManager_Joint(PolicyManager_BaseClass):
# Basic Training Algorithm:
# For E epochs:
# # For all trajectories:
# # Sample latent variables from conditional.
# # (Concatenate Latent Variables into Input.)
# # Evaluate log likelihoods of actions and options.
# # Update parameters.
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_Joint, self).__init__()
self.args = args
self.data = self.args.data
self.number_policies = number_policies
self.latent_z_dimensionality = self.args.z_dimensions
self.dataset = dataset
# Global input size: trajectory at every step - x,y,action
# Inputs is now states and actions.
# Model size parameters
self.state_size = 2
self.state_dim = 2
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
# Number of actions
self.output_size = 2
self.number_layers = self.args.number_layers
self.traj_length = 5
self.conditional_info_size = 6
if self.args.data=='MIME':
self.state_size = 16
self.state_dim = 16
self.input_size = 2*self.state_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
# Create Baxter visualizer for MIME data
# self.visualizer = BaxterVisualizer.MujocoVisualizer()
self.visualizer = BaxterVisualizer()
if self.args.normalization=='meanvar':
self.norm_sub_value = np.load("Statistics/MIME_Means.npy")
self.norm_denom_value = np.load("Statistics/MIME_Var.npy")
elif self.args.normalization=='minmax':
self.norm_sub_value = np.load("Statistics/MIME_Min.npy")
self.norm_denom_value = np.load("Statistics/MIME_Max.npy") - np.load("Statistics/MIME_Min.npy")
# Max of robot_state + object_state sizes across all Baxter environments.
self.cond_robot_state_size = 60
self.cond_object_state_size = 25
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size
self.conditional_viz_env = False
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
self.state_size = 8
self.state_dim = 8
self.input_size = 2*self.state_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
self.visualizer = SawyerVisualizer()
# Max of robot_state + object_state sizes across all sawyer environments.
# Robot size always 30. Max object state size is... 23.
self.cond_robot_state_size = 30
self.cond_object_state_size = 23
self.number_tasks = 8
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size+self.number_tasks
self.conditional_viz_env = True
elif self.args.data=='Mocap':
self.state_size = 22*3
self.state_dim = 22*3
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
self.conditional_info_size = 0
self.conditional_information = None
self.conditional_viz_env = False
# Create visualizer object
self.visualizer = MocapVisualizer(args=self.args)
self.training_phase_size = self.args.training_phase_size
self.number_epochs = self.args.epochs
self.test_set_size = 500
self.baseline_value = 0.
self.beta_decay = 0.9
self. learning_rate = self.args.learning_rate
self.latent_b_loss_weight = self.args.lat_b_wt
self.latent_z_loss_weight = self.args.lat_z_wt
self.initial_epsilon = self.args.epsilon_from
self.final_epsilon = self.args.epsilon_to
self.decay_epochs = self.args.epsilon_over
self.decay_counter = self.decay_epochs*len(self.dataset)
# Log-likelihood penalty.
self.lambda_likelihood_penalty = self.args.likelihood_penalty
self.baseline = None
# Per step decay.
self.decay_rate = (self.initial_epsilon-self.final_epsilon)/(self.decay_counter)
def create_networks(self):
if self.args.discrete_z:
# Create K Policy Networks.
# This policy network automatically manages input size.
# self.policy_network = ContinuousPolicyNetwork(self.input_size,self.hidden_size,self.output_size,self.number_policies, self.number_layers).to(device)
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
# Create latent policy, whose action space = self.number_policies.
# This policy network automatically manages input size.
# Also add conditional_info_size to this.
self.latent_policy = LatentPolicyNetwork(self.input_size, self.hidden_size, self.number_policies, self.number_layers, self.args.b_exploration_bias).to(device)
# Create variational network.
# self.variational_policy = VariationalPolicyNetwork(self.input_size, self.hidden_size, self.number_policies, number_layers=self.number_layers, z_exploration_bias=self.args.z_exploration_bias, b_exploration_bias=self.args.b_exploration_bias).to(device)
self.variational_policy = VariationalPolicyNetwork(self.input_size, self.hidden_size, self.number_policies, self.args, number_layers=self.number_layers).to(device)
else:
# self.policy_network = ContinuousPolicyNetwork(self.input_size,self.hidden_size,self.output_size,self.latent_z_dimensionality, self.number_layers).to(device)
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
if self.args.constrained_b_prior:
self.latent_policy = ContinuousLatentPolicyNetwork_ConstrainedBPrior(self.input_size+self.conditional_info_size, self.hidden_size, self.args, self.number_layers).to(device)
self.variational_policy = ContinuousVariationalPolicyNetwork_ConstrainedBPrior(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args, number_layers=self.number_layers).to(device)
else:
# self.latent_policy = ContinuousLatentPolicyNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.number_layers, self.args.b_exploration_bias).to(device)
self.latent_policy = ContinuousLatentPolicyNetwork(self.input_size+self.conditional_info_size, self.hidden_size, self.args, self.number_layers).to(device)
self.variational_policy = ContinuousVariationalPolicyNetwork_BPrior(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args, number_layers=self.number_layers).to(device)
def create_training_ops(self):
self.negative_log_likelihood_loss_function = torch.nn.NLLLoss(reduction='none')
# If we are using reparameterization, use a global optimizer, and a global loss function.
# This means gradients are being handled properly.
parameter_list = list(self.latent_policy.parameters()) + list(self.variational_policy.parameters())
if not(self.args.fix_subpolicy):
parameter_list = parameter_list + list(self.policy_network.parameters())
self.optimizer = torch.optim.Adam(parameter_list, lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Latent_Policy'] = self.latent_policy.state_dict()
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Variational_Policy'] = self.variational_policy.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, just_subpolicy=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
if not(just_subpolicy):
if self.args.load_latent:
self.latent_policy.load_state_dict(load_object['Latent_Policy'])
self.variational_policy.load_state_dict(load_object['Variational_Policy'])
def set_epoch(self, counter):
if self.args.train:
if counter<self.decay_counter:
self.epsilon = self.initial_epsilon-self.decay_rate*counter
else:
self.epsilon = self.final_epsilon
if counter<self.training_phase_size:
self.training_phase=1
elif self.training_phase_size<=counter and counter<2*self.training_phase_size:
self.training_phase=2
print("In Phase 2.")
else:
self.training_phase=3
self.latent_z_loss_weight = 0.01*self.args.lat_b_wt
# For training phase = 3, set latent_b_loss weight to 1 and latent_z_loss weight to something like 0.1 or 0.01.
# After another double training_phase... (i.e. counter>3*self.training_phase_size),
# This should be run when counter > 2*self.training_phase_size, and less than 3*self.training_phase_size.
if counter>3*self.training_phase_size:
# Set equal after 3.
print("In Phase 4.")
self.latent_z_loss_weight = 0.1*self.args.lat_b_wt
else:
print("In Phase 3.")
else:
self.epsilon = 0.
self.training_phase=1
def visualize_trajectory(self, trajectory, segmentations=None, i=0, suffix='_Img'):
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
unnorm_trajectory = (trajectory*self.norm_denom_value)+self.norm_sub_value
else:
unnorm_trajectory = trajectory
if self.args.data=='Mocap':
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
if self.args.model is not None:
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
else:
model_epoch = self.current_epoch_running
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
animation_object = self.dataset[i]['animation']
return self.visualizer.visualize_joint_trajectory(unnorm_trajectory, gif_path=self.dir_name, gif_name="Traj_{0}_{1}.gif".format(i,suffix), return_and_save=True, additional_info=animation_object)
else:
return self.visualizer.visualize_joint_trajectory(unnorm_trajectory, return_gif=True, segmentations=segmentations)
else:
return self.visualize_2D_trajectory(trajectory)
def visualize_2D_trajectory(self, traj):
fig = plt.figure()
ax = fig.gca()
ax.scatter(traj[:,0],traj[:,1],c=range(len(traj)),cmap='jet')
scale = 30
plt.xlim(-scale,scale)
plt.ylim(-scale,scale)
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
# Already got image data. Now close plot so it doesn't cry.
# fig.gcf()
plt.close()
image = np.transpose(image, axes=[2,0,1])
return image
def compute_evaluation_metrics(self, sample_traj, counter, i):
# # Generate trajectory rollouts so we can calculate distance metric.
# self.rollout_visuals(counter, i, get_image=False)
# Compute trajectory distance between:
var_rollout_distance = ((self.variational_trajectory_rollout-sample_traj)**2).mean()
latent_rollout_distance = ((self.latent_trajectory_rollout-sample_traj)**2).mean()
return var_rollout_distance, latent_rollout_distance
def update_plots(self, counter, i, subpolicy_loglikelihood, latent_loglikelihood, subpolicy_entropy, sample_traj, latent_z_logprobability, latent_b_logprobability, kl_divergence, prior_loglikelihood):
self.tf_logger.scalar_summary('Latent Policy Loss', torch.mean(self.total_latent_loss), counter)
self.tf_logger.scalar_summary('SubPolicy Log Likelihood', subpolicy_loglikelihood.mean(), counter)
self.tf_logger.scalar_summary('Latent Log Likelihood', latent_loglikelihood.mean(), counter)
self.tf_logger.scalar_summary('Variational Policy Loss', torch.mean(self.variational_loss), counter)
self.tf_logger.scalar_summary('Variational Reinforce Loss', torch.mean(self.reinforce_variational_loss), counter)
self.tf_logger.scalar_summary('Total Variational Policy Loss', torch.mean(self.total_variational_loss), counter)
self.tf_logger.scalar_summary('Baseline', self.baseline.mean(), counter)
self.tf_logger.scalar_summary('Total Likelihood', subpolicy_loglikelihood+latent_loglikelihood, counter)
self.tf_logger.scalar_summary('Epsilon', self.epsilon, counter)
self.tf_logger.scalar_summary('Latent Z LogProbability', latent_z_logprobability, counter)
self.tf_logger.scalar_summary('Latent B LogProbability', latent_b_logprobability, counter)
self.tf_logger.scalar_summary('KL Divergence', torch.mean(kl_divergence), counter)
self.tf_logger.scalar_summary('Prior LogLikelihood', torch.mean(prior_loglikelihood), counter)
if counter%self.args.display_freq==0:
# Now adding visuals for MIME, so it doesn't depend what data we use.
variational_rollout_image, latent_rollout_image = self.rollout_visuals(counter, i)
# Compute distance metrics.
var_dist, latent_dist = self.compute_evaluation_metrics(sample_traj, counter, i)
self.tf_logger.scalar_summary('Variational Trajectory Distance', var_dist, counter)
self.tf_logger.scalar_summary('Latent Trajectory Distance', latent_dist, counter)
gt_trajectory_image = np.array(self.visualize_trajectory(sample_traj, i=i, suffix='GT'))
variational_rollout_image = np.array(variational_rollout_image)
latent_rollout_image = np.array(latent_rollout_image)
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# Feeding as list of image because gif_summary.
self.tf_logger.gif_summary("GT Trajectory",[gt_trajectory_image],counter)
self.tf_logger.gif_summary("Variational Rollout",[variational_rollout_image],counter)
self.tf_logger.gif_summary("Latent Rollout",[latent_rollout_image],counter)
else:
# Feeding as list of image because gif_summary.
self.tf_logger.image_summary("GT Trajectory",[gt_trajectory_image],counter)
self.tf_logger.image_summary("Variational Rollout",[variational_rollout_image],counter)
self.tf_logger.image_summary("Latent Rollout",[latent_rollout_image],counter)
def assemble_inputs(self, input_trajectory, latent_z_indices, latent_b, sample_action_seq, conditional_information=None):
if self.args.discrete_z:
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies+1)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size+latent_z_indices[:-1].long()] = 1.
assembled_inputs[range(1,len(input_trajectory)),-1] = latent_b[:-1].float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size+latent_z_indices.long()] = 1.
# subpolicy_inputs[range(len(input_trajectory)),-1] = latent_b.float()
# # This method of concatenation is wrong, because it evaluates likelihood of action [0,0] as well.
# # Concatenated action sqeuence for policy network.
# padded_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# This is the right method of concatenation, because it evaluates likelihood
padded_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
else:
if self.training_phase>1:
# Prevents gradients being propagated through this..
latent_z_copy = torch.tensor(latent_z_indices).to(device)
else:
latent_z_copy = latent_z_indices
if conditional_information is None:
conditional_information = torch.zeros((self.conditional_info_size)).to(device).float()
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality+1+self.conditional_info_size)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size:self.input_size+self.latent_z_dimensionality] = latent_z_copy[:-1]
# We were writing the wrong dimension... should we be running again? :/
assembled_inputs[range(1,len(input_trajectory)),self.input_size+self.latent_z_dimensionality] = latent_b[:-1].float()
# assembled_inputs[range(1,len(input_trajectory)),-self.conditional_info_size:] = torch.tensor(conditional_information).to(device).float()
# Instead of feeding conditional infromation only from 1'st timestep onwards, we are going to st it from the first timestep.
if self.conditional_info_size>0:
assembled_inputs[:,-self.conditional_info_size:] = torch.tensor(conditional_information).to(device).float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size:] = latent_z_indices
# # This method of concatenation is wrong, because it evaluates likelihood of action [0,0] as well.
# # Concatenated action sqeuence for policy network.
# padded_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# This is the right method of concatenation, because it evaluates likelihood
padded_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
def concat_state_action(self, sample_traj, sample_action_seq):
# Add blank to start of action sequence and then concatenate.
sample_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# Currently returns:
# s0, s1, s2, s3, ..., sn-1, sn
# _, a0, a1, a2, ..., an_1, an
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def old_concat_state_action(self, sample_traj, sample_action_seq):
# Add blank to the END of action sequence and then concatenate.
sample_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def setup_eval_against_encoder(self):
# Creates a network, loads the network from pretraining model file.
self.encoder_network = ContinuousEncoderNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args).to(device)
load_object = torch.load(self.args.subpolicy_model)
self.encoder_network.load_state_dict(load_object['Encoder_Network'])
# Force encoder to use original variance for eval.
self.encoder_network.variance_factor = 1.
def evaluate_loglikelihoods(self, sample_traj, sample_action_seq, concatenated_traj, latent_z_indices, latent_b):
# Initialize both loglikelihoods to 0.
subpolicy_loglikelihood = 0.
latent_loglikelihood = 0.
# Need to assemble inputs first - returns a Torch CUDA Tensor.
# This doesn't need to take in actions, because we can evaluate for all actions then select.
assembled_inputs, subpolicy_inputs, padded_action_seq = self.assemble_inputs(concatenated_traj, latent_z_indices, latent_b, sample_action_seq, self.conditional_information)
###########################
# Compute learnt subpolicy loglikelihood.
###########################
learnt_subpolicy_loglikelihoods, entropy = self.policy_network.forward(subpolicy_inputs, padded_action_seq)
# Clip values. # Comment this out to remove clipping.
learnt_subpolicy_loglikelihoods = torch.clamp(learnt_subpolicy_loglikelihoods,min=self.args.subpolicy_clamp_value)
# Multiplying the likelihoods with the subpolicy ratio before summing.
learnt_subpolicy_loglikelihoods = self.args.subpolicy_ratio*learnt_subpolicy_loglikelihoods
# Summing until penultimate timestep.
# learnt_subpolicy_loglikelihood = learnt_subpolicy_loglikelihoods[:-1].sum()
# TAKING AVERAGE HERE AS WELL.
learnt_subpolicy_loglikelihood = learnt_subpolicy_loglikelihoods[:-1].mean()
###########################
# Compute Latent policy loglikelihood values.
###########################
# Whether to clone assembled_inputs based on the phase of training.
# In phase one it doesn't matter if we use the clone or not, because we never use latent policy loss.
# So just clone anyway.
# For now, ignore phase 3. This prevents gradients from going into the variational policy from the latent policy.
assembled_inputs_copy = assembled_inputs.clone().detach()
latent_z_copy = latent_z_indices.clone().detach()
# Consideration for later:
# if self.training_phase==3:
# Don't clone.
if self.args.discrete_z:
# Return discrete probabilities from latent policy network.
latent_z_logprobabilities, latent_b_logprobabilities, latent_b_probabilities, latent_z_probabilities = self.latent_policy.forward(assembled_inputs_copy)
# # Selects first option for variable = 1, second option for variable = 0.
# Use this to check if latent_z elements are equal:
diff_val = (1-(latent_z_indices==latent_z_indices.roll(1,0))[1:]).to(device).float()
# We rolled latent_z, we didn't roll diff. This works because latent_b is always guaranteed to be 1 in the first timestep, so it doesn't matter what's in diff_val[0].
diff_val = diff_val.roll(1,0)
# Selects first option for variable = 1, second option for variable = 0.
latent_z_temporal_logprobabilities = torch.where(latent_b[:-1].byte(), latent_z_logprobabilities[range(len(sample_traj)-1),latent_z_indices[:-1].long()], -self.lambda_likelihood_penalty*diff_val)
latent_z_logprobability = latent_z_temporal_logprobabilities.mean()
else:
# If not, we need to evaluate the latent probabilties of latent_z_indices under latent_policy.
latent_b_logprobabilities, latent_b_probabilities, latent_distributions = self.latent_policy.forward(assembled_inputs_copy, self.epsilon)
# Evalute loglikelihood of latent z vectors under the latent policy's distributions.
latent_z_logprobabilities = latent_distributions.log_prob(latent_z_copy.unsqueeze(1))
# Multiply logprobabilities by the latent policy ratio.
latent_z_temporal_logprobabilities = latent_z_logprobabilities[:-1]*self.args.latentpolicy_ratio
latent_z_logprobability = latent_z_temporal_logprobabilities.mean()
latent_z_probabilities = None
# LATENT LOGLIKELIHOOD is defined as:
# = \sum_{t=1}^T \log p(\zeta_t | \tau_{1:t}, \zeta_{1:t-1})
# = \sum_{t=1}^T \log { \phi_t(b_t)} + \log { 1[b_t==1] \eta_t(h_t|s_{1:t}) + 1[b_t==0] 1[z_t==z_{t-1}] }
# Adding log probabilities of termination (of whether it terminated or not), till penultimate step.
latent_b_temporal_logprobabilities = latent_b_logprobabilities[range(len(sample_traj)-1),latent_b[:-1].long()]
latent_b_logprobability = latent_b_temporal_logprobabilities.mean()
latent_loglikelihood += latent_b_logprobability
latent_loglikelihood += latent_z_logprobability
# DON'T CLAMP, JUST MULTIPLY BY SUITABLE RATIO! Probably use the same lat_z_wt and lat_b_wt ratios from the losses.
latent_temporal_loglikelihoods = self.args.lat_b_wt*latent_b_temporal_logprobabilities + self.args.lat_z_wt*latent_z_temporal_logprobabilities.squeeze(1)
##################################################
#### Manage merging likelihoods for REINFORCE ####
##################################################
if self.training_phase==1:
temporal_loglikelihoods = learnt_subpolicy_loglikelihoods[:-1].squeeze(1)
elif self.training_phase==2 or self.training_phase==3:
# temporal_loglikelihoods = learnt_subpolicy_loglikelihoods[:-1].squeeze(1) + self.args.temporal_latentpolicy_ratio*latent_temporal_loglikelihoods
temporal_loglikelihoods = learnt_subpolicy_loglikelihoods[:-1].squeeze(1)
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in the Evaluate Likelihoods Function.")
embed()
return None, None, None, latent_loglikelihood, \
latent_b_logprobabilities, latent_z_logprobabilities, latent_b_probabilities, latent_z_probabilities, \
latent_z_logprobability, latent_b_logprobability, learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, temporal_loglikelihoods
def new_update_policies(self, i, sample_action_seq, subpolicy_loglikelihoods, subpolicy_entropy, latent_b, latent_z_indices,\
variational_z_logprobabilities, variational_b_logprobabilities, variational_z_probabilities, variational_b_probabilities, kl_divergence, \
latent_z_logprobabilities, latent_b_logprobabilities, latent_z_probabilities, latent_b_probabilities, \
learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, loglikelihood, prior_loglikelihood, latent_loglikelihood, temporal_loglikelihoods):
# Set optimizer gradients to zero.
self.optimizer.zero_grad()
# Assemble prior and KL divergence losses.
# Since these are output by the variational network, and we don't really need the last z predicted by it.
prior_loglikelihood = prior_loglikelihood[:-1]
kl_divergence = kl_divergence[:-1]
######################################################
############## Update latent policy. #################
######################################################
# Remember, an NLL loss function takes <Probabilities, Sampled Value> as arguments.
self.latent_b_loss = self.negative_log_likelihood_loss_function(latent_b_logprobabilities, latent_b.long())
if self.args.discrete_z:
self.latent_z_loss = self.negative_log_likelihood_loss_function(latent_z_logprobabilities, latent_z_indices.long())
# If continuous latent_z, just calculate loss as negative log likelihood of the latent_z's selected by variational network.
else:
self.latent_z_loss = -latent_z_logprobabilities.squeeze(1)
# Compute total latent loss as weighted sum of latent_b_loss and latent_z_loss.
self.total_latent_loss = (self.latent_b_loss_weight*self.latent_b_loss+self.latent_z_loss_weight*self.latent_z_loss)[:-1]
#######################################################
############# Compute Variational Losses ##############
#######################################################
# MUST ALWAYS COMPUTE: # Compute cross entropies.
self.variational_b_loss = self.negative_log_likelihood_loss_function(variational_b_logprobabilities[:-1], latent_b[:-1].long())
# In case of reparameterization, the variational loss that goes to REINFORCE should just be variational_b_loss.
self.variational_loss = self.args.var_loss_weight*self.variational_b_loss
#######################################################
########## Compute Variational Reinforce Loss #########
#######################################################
# Compute reinforce target based on how we express the objective:
# The original implementation, i.e. the entropic implementation, uses:
# (1) \mathbb{E}_{x, z \sim q(z|x)} \Big[ \nabla_{\omega} \log q(z|x,\omega) \{ \log p(x||z) + \log p(z||x) - \log q(z|x) - 1 \} \Big]
# The KL divergence implementation uses:
# (2) \mathbb{E}_{x, z \sim q(z|x)} \Big[ \nabla_{\omega} \log q(z|x,\omega) \{ \log p(x||z) + \log p(z||x) - \log p(z) \} \Big] - \nabla_{\omega} D_{KL} \Big[ q(z|x) || p(z) \Big]
# Compute baseline target according to NEW GRADIENT, and Equation (2) above.
baseline_target = (temporal_loglikelihoods - self.args.prior_weight*prior_loglikelihood).clone().detach()
if self.baseline is None:
self.baseline = torch.zeros_like(baseline_target.mean()).to(device).float()
else:
self.baseline = (self.beta_decay*self.baseline)+(1.-self.beta_decay)*baseline_target.mean()
self.reinforce_variational_loss = self.variational_loss*(baseline_target-self.baseline)
# If reparam, the variational loss is a combination of three things.
# Losses from latent policy and subpolicy into variational network for the latent_z's, the reinforce loss on the latent_b's, and the KL divergence.
# But since we don't need to additionall compute the gradients from latent and subpolicy into variational network, just set the variational loss to reinforce + KL.
# self.total_variational_loss = (self.reinforce_variational_loss.sum() + self.args.kl_weight*kl_divergence.squeeze(1).sum()).sum()
self.total_variational_loss = (self.reinforce_variational_loss + self.args.kl_weight*kl_divergence.squeeze(1)).mean()
######################################################
# Set other losses, subpolicy, latent, and prior.
######################################################
# Get subpolicy losses.
self.subpolicy_loss = (-learnt_subpolicy_loglikelihood).mean()
# Get prior losses.
self.prior_loss = (-self.args.prior_weight*prior_loglikelihood).mean()
# Reweight latent loss.
self.total_weighted_latent_loss = (self.args.latent_loss_weight*self.total_latent_loss).mean()
################################################
# Setting total loss based on phase of training.
################################################
# IF PHASE ONE:
if self.training_phase==1:
self.total_loss = self.subpolicy_loss + self.total_variational_loss + self.prior_loss
# IF DONE WITH PHASE ONE:
elif self.training_phase==2 or self.training_phase==3:
self.total_loss = self.subpolicy_loss + self.total_weighted_latent_loss + self.total_variational_loss + self.prior_loss
################################################
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in Update Policies")
embed()
################################################
self.total_loss.sum().backward()
self.optimizer.step()
def set_env_conditional_info(self):
obs = self.environment._get_observation()
self.conditional_information = np.zeros((self.conditional_info_size))
cond_state = np.concatenate([obs['robot-state'],obs['object-state']])
self.conditional_information[:cond_state.shape[-1]] = cond_state
# Also setting particular index in conditional information to 1 for task ID.
self.conditional_information[-self.number_tasks+self.task_id_for_cond_info] = 1
def take_rollout_step(self, subpolicy_input, t, use_env=False):
# Feed subpolicy input into the policy.
actions = self.policy_network.get_actions(subpolicy_input,greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
if use_env==True:
# Take a step in the environment.
step_res = self.environment.step(action_to_execute.squeeze(0).detach().cpu().numpy())
# Get state.
observation = step_res[0]
# Now update conditional information...
# self.conditional_information = np.concatenate([new_state['robot-state'],new_state['object-state']])
gripper_open = np.array([0.0115, -0.0115])
gripper_closed = np.array([-0.020833, 0.020833])
# The state that we want is ... joint state?
gripper_finger_values = step_res[0]['gripper_qpos']
gripper_values = (gripper_finger_values - gripper_open)/(gripper_closed - gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = 2*finger_diff-1
# Concatenate joint and gripper state.
new_state_numpy = np.concatenate([observation['joint_pos'], np.array(gripper_value).reshape((1,))])
new_state = torch.tensor(new_state_numpy).to(device).float().view((1,-1))
# This should be true by default...
# if self.conditional_viz_env:
# self.set_env_conditional_info()
self.set_env_conditional_info()
else:
# Compute next state by adding action to state.
new_state = subpolicy_input[t,:self.state_dim]+action_to_execute
# return new_subpolicy_input
return action_to_execute, new_state
def create_RL_environment_for_rollout(self, environment_name, state=None, task_id=None):
self.environment = robosuite.make(environment_name)
self.task_id_for_cond_info = task_id
if state is not None:
self.environment.sim.set_state_from_flattened(state)
def rollout_variational_network(self, counter, i):
###########################################################
###########################################################
############# (0) #############
# Get sample we're going to train on. Single sample as of now.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if self.args.traj_length>0:
self.rollout_timesteps = self.args.traj_length
else:
self.rollout_timesteps = len(sample_traj)
############# (1) #############
# Sample latent variables from p(\zeta | \tau).
latent_z_indices, latent_b, variational_b_logprobabilities, variational_z_logprobabilities,\
variational_b_probabilities, variational_z_probabilities, kl_divergence, prior_loglikelihood = self.variational_policy.forward(torch.tensor(old_concatenated_traj).to(device).float(), self.epsilon)
############# (1.5) ###########
# Doesn't really matter what the conditional information is here... because latent policy isn't being rolled out.
# We still call it becasue these assembled inputs are passed to the latnet policy rollout later.
if self.conditional_viz_env:
self.set_env_conditional_info()
# Get assembled inputs and subpolicy inputs for variational rollout.
orig_assembled_inputs, orig_subpolicy_inputs, padded_action_seq = self.assemble_inputs(concatenated_traj, latent_z_indices, latent_b, sample_action_seq, self.conditional_information)
###########################################################
############# (A) VARIATIONAL POLICY ROLLOUT. #############
###########################################################
subpolicy_inputs = orig_subpolicy_inputs.clone().detach()
# For number of rollout timesteps:
for t in range(self.rollout_timesteps-1):
# Take a rollout step. Feed into policy, get action, step, return new input.
action_to_execute, new_state = self.take_rollout_step(subpolicy_inputs[:(t+1)].view((t+1,-1)), t)
state_action_tuple = torch.cat([new_state, action_to_execute],dim=1)
# Overwrite the subpolicy inputs with the new state action tuple.
subpolicy_inputs[t+1,:self.input_size] = state_action_tuple
# Get trajectory from this.
self.variational_trajectory_rollout = copy.deepcopy(subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy())
return orig_assembled_inputs, orig_subpolicy_inputs, latent_b
def alternate_rollout_latent_policy(self, counter, i, orig_assembled_inputs, orig_subpolicy_inputs):
assembled_inputs = orig_assembled_inputs.clone().detach()
subpolicy_inputs = orig_subpolicy_inputs.clone().detach()
# This version of rollout uses the incremental reparam get actions function.
hidden = None
############# (0) #############
# Get sample we're going to train on. Single sample as of now.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
# Set rollout length.
if self.args.traj_length>0:
self.rollout_timesteps = self.args.traj_length
else:
self.rollout_timesteps = len(sample_traj)
# For appropriate number of timesteps.
for t in range(self.rollout_timesteps-1):
# First get input row for latent policy.
# Feed into latent policy and get z.
# Feed z and b into subpolicy.
pass
def rollout_latent_policy(self, orig_assembled_inputs, orig_subpolicy_inputs):
assembled_inputs = orig_assembled_inputs.clone().detach()
subpolicy_inputs = orig_subpolicy_inputs.clone().detach()
# Set the previous b time to 0.
delta_t = 0
# For number of rollout timesteps:
for t in range(self.rollout_timesteps-1):
##########################################
#### CODE FOR NEW Z SELECTION ROLLOUT ####
##########################################
# Pick latent_z and latent_b.
selected_b, new_selected_z = self.latent_policy.get_actions(assembled_inputs[:(t+1)].view((t+1,-1)), greedy=True, delta_t=delta_t)
if t==0:
selected_b = torch.ones_like(selected_b).to(device).float()
if selected_b[-1]==1:
# Copy over ALL z's. This is okay to do because we're greedily selecting, and hte latent policy is hence deterministic.
selected_z = torch.tensor(new_selected_z).to(device).float()
# If b was == 1, then... reset b to 0.
delta_t = 0
else:
# Increment counter since last time b was 1.
delta_t += 1
# Set z's to 0.
assembled_inputs[t+1, self.input_size:self.input_size+self.number_policies] = 0.
# Set z and b in assembled input for the future latent policy passes.
if self.args.discrete_z:
assembled_inputs[t+1, self.input_size+selected_z[-1]] = 1.
else:
assembled_inputs[t+1, self.input_size:self.input_size+self.latent_z_dimensionality] = selected_z[-1]
# This was also using wrong dimensions... oops :P
assembled_inputs[t+1, self.input_size+self.latent_z_dimensionality] = selected_b[-1]
# Before copying over, set conditional_info from the environment at the current timestep.
if self.conditional_viz_env:
self.set_env_conditional_info()
if self.conditional_info_size>0:
assembled_inputs[t+1, -self.conditional_info_size:] = torch.tensor(self.conditional_information).to(device).float()
# Set z's to 0.
subpolicy_inputs[t, self.input_size:self.input_size+self.number_policies] = 0.
# Set z and b in subpolicy input for the future subpolicy passes.
if self.args.discrete_z:
subpolicy_inputs[t, self.input_size+selected_z[-1]] = 1.
else:
subpolicy_inputs[t, self.input_size:] = selected_z[-1]
# Now pass subpolicy net forward and get action and next state.
action_to_execute, new_state = self.take_rollout_step(subpolicy_inputs[:(t+1)].view((t+1,-1)), t, use_env=self.conditional_viz_env)
state_action_tuple = torch.cat([new_state, action_to_execute],dim=1)
# Now update assembled input.
assembled_inputs[t+1, :self.input_size] = state_action_tuple
subpolicy_inputs[t+1, :self.input_size] = state_action_tuple
self.latent_trajectory_rollout = copy.deepcopy(subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy())
concatenated_selected_b = np.concatenate([selected_b.detach().cpu().numpy(),np.zeros((1))],axis=-1)
if self.args.debug:
print("Embedding in Latent Policy Rollout.")
embed()
# Clear these variables from memory.
del subpolicy_inputs, assembled_inputs
return concatenated_selected_b
def rollout_visuals(self, counter, i, get_image=True):
# if self.args.data=='Roboturk':
if self.conditional_viz_env:
self.create_RL_environment_for_rollout(self.dataset[i]['environment-name'], self.dataset[i]['flat-state'][0], self.dataset[i]['task-id'],)
# Rollout policy with
# a) Latent variable samples from variational policy operating on dataset trajectories - Tests variational network and subpolicies.
# b) Latent variable samples from latent policy in a rolling fashion, initialized with states from the trajectory - Tests latent and subpolicies.
# c) Latent variables from the ground truth set (only valid for the toy dataset) - Just tests subpolicies.
###########################################################
############# (A) VARIATIONAL POLICY ROLLOUT. #############
###########################################################
orig_assembled_inputs, orig_subpolicy_inputs, variational_segmentation = self.rollout_variational_network(counter, i)
###########################################################
################ (B) LATENT POLICY ROLLOUT. ###############
###########################################################
latent_segmentation = self.rollout_latent_policy(orig_assembled_inputs, orig_subpolicy_inputs)
if get_image==True:
latent_rollout_image = self.visualize_trajectory(self.latent_trajectory_rollout, segmentations=latent_segmentation, i=i, suffix='Latent')
variational_rollout_image = self.visualize_trajectory(self.variational_trajectory_rollout, segmentations=variational_segmentation.detach().cpu().numpy(), i=i, suffix='Variational')
return variational_rollout_image, latent_rollout_image
else:
return None, None
def run_iteration(self, counter, i):
# With learnt discrete subpolicy:
# For all epochs:
# # For all trajectories:
# # Sample z from variational network.
# # Evalute likelihood of latent policy, and subpolicy.
# # Update policies using likelihoods.
self.set_epoch(counter)
self.iter = counter
############# (0) #############
# Get sample we're going to train on. Single sample as of now.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if sample_traj is not None:
############# (1) #############
# Sample latent variables from p(\zeta | \tau).
latent_z_indices, latent_b, variational_b_logprobabilities, variational_z_logprobabilities,\
variational_b_probabilities, variational_z_probabilities, kl_divergence, prior_loglikelihood = self.variational_policy.forward(torch.tensor(old_concatenated_traj).to(device).float(), self.epsilon)
########## (2) & (3) ##########
# Evaluate Log Likelihoods of actions and options as "Return" for Variational policy.
subpolicy_loglikelihoods, subpolicy_loglikelihood, subpolicy_entropy,\
latent_loglikelihood, latent_b_logprobabilities, latent_z_logprobabilities,\
latent_b_probabilities, latent_z_probabilities, latent_z_logprobability, latent_b_logprobability, \
learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, temporal_loglikelihoods = self.evaluate_loglikelihoods(sample_traj, sample_action_seq, concatenated_traj, latent_z_indices, latent_b)
if self.args.train:
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in Train Function.")
embed()
############# (3) #############
# Update latent policy Pi_z with Reinforce like update using LL as return.
self.new_update_policies(i, sample_action_seq, subpolicy_loglikelihoods, subpolicy_entropy, latent_b, latent_z_indices,\
variational_z_logprobabilities, variational_b_logprobabilities, variational_z_probabilities, variational_b_probabilities, kl_divergence, \
latent_z_logprobabilities, latent_b_logprobabilities, latent_z_probabilities, latent_b_probabilities, \
learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, learnt_subpolicy_loglikelihood+latent_loglikelihood, \
prior_loglikelihood, latent_loglikelihood, temporal_loglikelihoods)
# Update Plots.
# self.update_plots(counter, sample_map, loglikelihood)
self.update_plots(counter, i, learnt_subpolicy_loglikelihood, latent_loglikelihood, subpolicy_entropy,
sample_traj, latent_z_logprobability, latent_b_logprobability, kl_divergence, prior_loglikelihood)
# print("Latent LogLikelihood: ", latent_loglikelihood)
# print("Subpolicy LogLikelihood: ", learnt_subpolicy_loglikelihood)
print("#########################################")
else:
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
pass
else:
print("#############################################")
print("Trajectory",i)
print("Predicted Z: \n", latent_z_indices.detach().cpu().numpy())
print("True Z : \n", np.array(self.dataset.Y_array[i][:self.args.traj_length]))
print("Latent B : \n", latent_b.detach().cpu().numpy())
# print("Variational Probs: \n", variational_z_probabilities.detach().cpu().numpy())
# print("Latent Probs : \n", latent_z_probabilities.detach().cpu().numpy())
print("Latent B Probs : \n", latent_b_probabilities.detach().cpu().numpy())
if self.args.subpolicy_model:
eval_encoded_logprobs = torch.zeros((latent_z_indices.shape[0]))
eval_orig_encoder_logprobs = torch.zeros((latent_z_indices.shape[0]))
torch_concat_traj = torch.tensor(concatenated_traj).to(device).float()
# For each timestep z in latent_z_indices, evaluate likelihood under pretrained encoder model.
for t in range(latent_z_indices.shape[0]):
eval_encoded_logprobs[t] = self.encoder_network.forward(torch_concat_traj, z_sample_to_evaluate=latent_z_indices[t])
_, eval_orig_encoder_logprobs[t], _, _ = self.encoder_network.forward(torch_concat_traj)
print("Encoder Loglikelihood:", eval_encoded_logprobs.detach().cpu().numpy())
print("Orig Encoder Loglikelihood:", eval_orig_encoder_logprobs.detach().cpu().numpy())
if self.args.debug:
embed()
def evaluate_metrics(self):
self.distances = -np.ones((self.test_set_size))
# Get test set elements as last (self.test_set_size) number of elements of dataset.
for i in range(self.test_set_size):
index = i + len(self.dataset)-self.test_set_size
print("Evaluating ", i, " in test set, or ", index, " in dataset.")
# Collect inputs.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
# If valid
if sample_traj is not None:
# Create environment to get conditional info.
if self.conditional_viz_env:
self.create_RL_environment_for_rollout(self.dataset[i]['environment-name'], self.dataset[i]['flat-state'][0])
# Rollout variational.
_, _, _ = self.rollout_variational_network(0, i)
self.distances[i] = ((sample_traj-self.variational_trajectory_rollout)**2).mean()
self.mean_distance = self.distances[self.distances>0].mean()
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
np.save(os.path.join(self.dir_name,"Trajectory_Distances_{0}.npy".format(self.args.name)),self.distances)
np.save(os.path.join(self.dir_name,"Mean_Trajectory_Distance_{0}.npy".format(self.args.name)),self.mean_distance)
def evaluate(self, model):
self.set_epoch(0)
if model:
self.load_all_models(model)
np.set_printoptions(suppress=True,precision=2)
print("Running Evaluation of State Distances on small test set.")
self.evaluate_metrics()
# Visualize space if the subpolicy has been trained...
if (self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap') and (self.args.fix_subpolicy==0):
print("Running Visualization on Robot Data.")
self.pretrain_policy_manager = PolicyManager_Pretrain(self.args.number_policies, self.dataset, self.args)
self.pretrain_policy_manager.setup()
self.pretrain_policy_manager.load_all_models(model, only_policy=True)
self.pretrain_policy_manager.visualize_robot_data()
if self.args.subpolicy_model:
print("Loading encoder.")
self.setup_eval_against_encoder()
# Evaluate NLL and (potentially Expected Value Difference) on Validation / Test Datasets.
self.epsilon = 0.
# np.set_printoptions(suppress=True,precision=2)
# for i in range(60):
# self.run_iteration(0, i)
if self.args.debug:
embed()
class PolicyManager_BaselineRL(PolicyManager_BaseClass):
def __init__(self, number_policies=4, dataset=None, args=None):
# super(PolicyManager_BaselineRL, self).__init__(number_policies=number_policies, dataset=dataset, args=args)
super(PolicyManager_BaselineRL, self).__init__()
# Create environment, setup things, etc.
self.args = args
self.initial_epsilon = self.args.epsilon_from
self.final_epsilon = self.args.epsilon_to
self.decay_episodes = self.args.epsilon_over
self.baseline = None
self. learning_rate = self.args.learning_rate
self.max_timesteps = 100
self.gamma = 0.99
self.batch_size = 10
self.number_test_episodes = 100
# Per step decay.
self.decay_rate = (self.initial_epsilon-self.final_epsilon)/(self.decay_episodes)
self.number_episodes = 5000000
# Orhnstein Ullenhbeck noise process parameters.
self.theta = 0.15
self.sigma = 0.2
self.gripper_open = np.array([0.0115, -0.0115])
self.gripper_closed = np.array([-0.020833, 0.020833])
self.reset_statistics()
def create_networks(self):
if self.args.MLP_policy:
self.policy_network = ContinuousMLP(self.input_size, self.args.hidden_size, self.output_size, self.args).to(device)
self.critic_network = CriticMLP(self.input_size, self.args.hidden_size, 1, self.args).to(device)
else:
# Create policy and critic.
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.args.hidden_size, self.output_size, self.args, self.args.number_layers, small_init=True).to(device)
self.critic_network = CriticNetwork(self.input_size, self.args.hidden_size, 1, self.args, self.args.number_layers).to(device)
def create_training_ops(self):
self.NLL_Loss = torch.nn.NLLLoss(reduction='none')
self.MSE_Loss = torch.nn.MSELoss(reduction='none')
# parameter_list = list(self.policy_network.parameters()) + list(self.critic_network.parameters())
self.policy_optimizer = torch.optim.Adam(self.policy_network.parameters(), lr=self.learning_rate)
self.critic_optimizer = torch.optim.Adam(self.critic_network.parameters(), lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Critic_Network'] = self.critic_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, critic=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
if critic:
self.critic_network.load_state_dict(load_object['Critic_Network'])
def setup(self):
# Calling a special RL setup function. This is because downstream classes inherit (and may override setup), but will still inherit RL_setup intact.
self.RL_setup()
def RL_setup(self):
# Create Mujoco environment.
self.environment = robosuite.make(self.args.environment, has_renderer=False, use_camera_obs=False, reward_shaping=self.args.shaped_reward)
# Get input and output sizes from these environments, etc.
self.obs = self.environment.reset()
self.output_size = self.environment.action_spec[0].shape[0]
self.state_size = self.obs['robot-state'].shape[0] + self.obs['object-state'].shape[0]
# self.input_size = self.state_size + self.output_size
self.input_size = self.state_size + self.output_size*2
# Create networks.
self.create_networks()
self.create_training_ops()
self.initialize_plots()
# Create Noise process.
self.NoiseProcess = RLUtils.OUNoise(self.output_size, min_sigma=self.args.OU_min_sigma, max_sigma=self.args.OU_max_sigma)
def set_parameters(self, episode_counter, evaluate=False):
if self.args.train and not(evaluate):
if episode_counter<self.decay_episodes:
self.epsilon = self.initial_epsilon-self.decay_rate*episode_counter
else:
self.epsilon = self.final_epsilon
else:
self.epsilon = 0.
def reset_lists(self):
self.reward_trajectory = []
self.state_trajectory = []
self.action_trajectory = []
self.image_trajectory = []
self.terminal_trajectory = []
self.cummulative_rewards = None
self.episode = None
def get_action(self, hidden=None, random=True, counter=0, evaluate=False):
# Change this to epsilon greedy...
whether_greedy = np.random.binomial(n=1,p=0.8)
if random or not(whether_greedy):
action = 2*np.random.random((self.output_size))-1
return action, hidden
# The rest of this will only be evaluated or run when random is false and when whether_greedy is true.
# Assemble states of current input row.
current_input_row = self.get_current_input_row()
# Using the incremental get actions. Still get action greedily, then add noise.
predicted_action, hidden = self.policy_network.incremental_reparam_get_actions(torch.tensor(current_input_row).to(device).float(), greedy=True, hidden=hidden)
if evaluate:
noise = torch.zeros_like(predicted_action).to(device).float()
else:
# Get noise from noise process.
noise = torch.randn_like(predicted_action).to(device).float()*self.epsilon
# Perturb action with noise.
perturbed_action = predicted_action + noise
if self.args.MLP_policy:
action = perturbed_action[-1].detach().cpu().numpy()
else:
action = perturbed_action[-1].squeeze(0).detach().cpu().numpy()
return action, hidden
def get_OU_action(self, hidden=None, random=False, counter=0, evaluate=False):
if random==True:
action = 2*np.random.random((self.output_size))-1
return action, hidden
# Assemble states of current input row.
current_input_row = self.get_current_input_row()
# Using the incremental get actions. Still get action greedily, then add noise.
predicted_action, hidden = self.policy_network.incremental_reparam_get_actions(torch.tensor(current_input_row).to(device).float(), greedy=True, hidden=hidden)
# Numpy action
if self.args.MLP_policy:
action = predicted_action[-1].detach().cpu().numpy()
else:
action = predicted_action[-1].squeeze(0).detach().cpu().numpy()
if evaluate:
perturbed_action = action
else:
# Perturb action with noise.
perturbed_action = self.NoiseProcess.get_action(action, counter)
return perturbed_action, hidden
def rollout(self, random=False, test=False, visualize=False):
counter = 0
eps_reward = 0.
state = self.environment.reset()
terminal = False
self.reset_lists()
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
self.state_trajectory.append(state)
# self.terminal_trajectory.append(terminal)
# self.reward_trajectory.append(0.)
hidden = None
while not(terminal) and counter<self.max_timesteps:
if self.args.OU:
action, hidden = self.get_OU_action(hidden=hidden,random=random,counter=counter, evaluate=test)
else:
action, hidden = self.get_action(hidden=hidden,random=random,counter=counter, evaluate=test)
# Take a step in the environment.
next_state, onestep_reward, terminal, success = self.environment.step(action)
self.state_trajectory.append(next_state)
self.action_trajectory.append(action)
self.reward_trajectory.append(onestep_reward)
self.terminal_trajectory.append(terminal)
# Copy next state into state.
state = copy.deepcopy(next_state)
# Counter
counter += 1
# Append image.
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
print("Rolled out an episode for ",counter," timesteps.")
# Now that the episode is done, compute cummulative rewards...
self.cummulative_rewards = copy.deepcopy(np.cumsum(np.array(self.reward_trajectory)[::-1])[::-1])
self.episode_reward_statistics = copy.deepcopy(self.cummulative_rewards[0])
print("Achieved reward: ", self.episode_reward_statistics)
# print("########################################################")
# NOW construct an episode out of this..
self.episode = RLUtils.Episode(self.state_trajectory, self.action_trajectory, self.reward_trajectory, self.terminal_trajectory)
# Since we're doing TD updates, we DON'T want to use the cummulative reward, but rather the reward trajectory itself.
def get_transformed_gripper_value(self, gripper_finger_values):
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
return gripper_value
def get_current_input_row(self):
# Addiong joint states, gripper, actions, and conditional info in addition to just conditional and actions.
gripper_finger_values = self.state_trajectory[-1]['gripper_qpos']
conditional = np.concatenate([self.state_trajectory[-1]['robot-state'].reshape((1,-1)),self.state_trajectory[-1]['object-state'].reshape((1,-1))],axis=1)
if len(self.action_trajectory)>0:
state_action = np.concatenate([self.state_trajectory[-1]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[-1].reshape((1,-1))],axis=1)
else:
# state_action = np.concatenate([self.state_trajectory[-1]['robot-state'].reshape((1,-1)),self.state_trajectory[-1]['object-state'].reshape((1,-1)),np.zeros((1,self.output_size))],axis=1)
state_action = np.concatenate([self.state_trajectory[-1]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), np.zeros((1,self.output_size))],axis=1)
return np.concatenate([state_action, conditional],axis=1)
def assemble_inputs(self):
conditional_sequence = np.concatenate([np.concatenate([self.state_trajectory[t]['robot-state'].reshape((1,-1)),self.state_trajectory[t]['object-state'].reshape((1,-1))],axis=1) for t in range(len(self.state_trajectory))],axis=0)
state_action_sequence = np.concatenate([np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(self.state_trajectory[t]['gripper_qpos']), self.action_trajectory[t-1].reshape((1,-1))],axis=1) for t in range(1,len(self.state_trajectory))],axis=0)
initial_state_action = np.concatenate([self.state_trajectory[0]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(self.state_trajectory[0]['gripper_qpos']), np.zeros((1, self.output_size))],axis=1)
# Copy initial state to front of state_action seq.
state_action_sequence = np.concatenate([state_action_sequence, initial_state_action],axis=0)
inputs = np.concatenate([state_action_sequence, conditional_sequence],axis=1)
return inputs
def process_episode(self, episode):
# Assemble states, actions, targets.
# First reset all the lists from the rollout now that they've been written to memory.
self.reset_lists()
# Now set the lists.
self.state_trajectory = episode.state_list
self.action_trajectory = episode.action_list
self.reward_trajectory = episode.reward_list
self.terminal_trajectory = episode.terminal_list
assembled_inputs = self.assemble_inputs()
# Input to the policy should be states and actions.
self.state_action_inputs = torch.tensor(assembled_inputs).to(device).float()
# Get summed reward for statistics.
self.batch_reward_statistics += sum(self.reward_trajectory)
def set_differentiable_critic_inputs(self):
# Get policy's predicted actions by getting action greedily, then add noise.
predicted_action = self.policy_network.reparameterized_get_actions(self.state_action_inputs, greedy=True).squeeze(1)
noise = torch.zeros_like(predicted_action).to(device).float()
# Get noise from noise process.
noise = torch.randn_like(predicted_action).to(device).float()*self.epsilon
# Concatenate the states from policy inputs and the predicted actions.
self.critic_inputs = torch.cat([self.state_action_inputs[:,:self.output_size], predicted_action, self.state_action_inputs[:,2*self.output_size:]],axis=1).to(device).float()
def update_policies(self):
######################################
# Compute losses for actor.
self.set_differentiable_critic_inputs()
self.policy_optimizer.zero_grad()
self.policy_loss = - self.critic_network.forward(self.critic_inputs[:-1]).mean()
self.policy_loss_statistics += self.policy_loss.clone().detach().cpu().numpy().mean()
self.policy_loss.backward()
self.policy_optimizer.step()
def set_targets(self):
if self.args.TD:
# Construct TD Targets.
self.TD_targets = self.critic_predictions.clone().detach().cpu().numpy()
# Select till last time step, because we don't care what critic says after last timestep.
self.TD_targets = np.roll(self.TD_targets,-1,axis=0)[:-1]
# Mask with terminal.
self.TD_targets = self.gamma*np.array(self.terminal_trajectory)*self.TD_targets
self.TD_targets += np.array(self.reward_trajectory)
self.critic_targets = torch.tensor(self.TD_targets).to(device).float()
else:
self.cummulative_rewards = copy.deepcopy(np.cumsum(np.array(self.reward_trajectory)[::-1])[::-1])
self.critic_targets = torch.tensor(self.cummulative_rewards).to(device).float()
def update_critic(self):
######################################
# Zero gradients, then backprop into critic.
self.critic_optimizer.zero_grad()
# Get critic predictions first.
if self.args.MLP_policy:
self.critic_predictions = self.critic_network.forward(self.state_action_inputs).squeeze(1)
else:
self.critic_predictions = self.critic_network.forward(self.state_action_inputs).squeeze(1).squeeze(1)
# Before we actually compute loss, compute targets.
self.set_targets()
# We predicted critic values from states S_1 to S_{T+1} because we needed all for bootstrapping.
# For loss, we don't actually need S_{T+1}, so throw it out.
self.critic_loss = self.MSE_Loss(self.critic_predictions[:-1], self.critic_targets).mean()
self.critic_loss_statistics += self.critic_loss.clone().detach().cpu().numpy().mean()
self.critic_loss.backward()
self.critic_optimizer.step()
######################################
def update_networks(self):
# Update policy network.
self.update_policies()
# Now update critic network.
self.update_critic()
def reset_statistics(self):
# Can also reset the policy and critic loss statistcs here.
self.policy_loss_statistics = 0.
self.critic_loss_statistics = 0.
self.batch_reward_statistics = 0.
self.episode_reward_statistics = 0.
def update_batch(self):
# Get set of indices of episodes in the memory.
batch_indices = self.memory.sample_batch(self.batch_size)
for ind in batch_indices:
# Retrieve appropriate episode from memory.
episode = self.memory.memory[ind]
# Set quantities from episode.
self.process_episode(episode)
# Now compute gradients to both networks from batch.
self.update_networks()
def update_plots(self, counter):
self.tf_logger.scalar_summary('Total Episode Reward', copy.deepcopy(self.episode_reward_statistics), counter)
self.tf_logger.scalar_summary('Batch Rewards', self.batch_reward_statistics/self.batch_size, counter)
self.tf_logger.scalar_summary('Policy Loss', self.policy_loss_statistics/self.batch_size, counter)
self.tf_logger.scalar_summary('Critic Loss', self.critic_loss_statistics/self.batch_size, counter)
if counter%self.args.display_freq==0:
# print("Embedding in Update Plots.")
# Rollout policy.
self.rollout(random=False, test=True, visualize=True)
self.tf_logger.gif_summary("Rollout Trajectory", [np.array(self.image_trajectory)], counter)
# Now that we've updated these into TB, reset stats.
self.reset_statistics()
def run_iteration(self, counter, evaluate=False):
# This is really a run episode function. Ignore the index, just use the counter.
# 1) Rollout trajectory.
# 2) Collect stats / append to memory and stuff.
# 3) Update policies.
self.set_parameters(counter, evaluate=evaluate)
# Maintain counter to keep track of updating the policy regularly.
# cProfile.runctx('self.rollout()',globals(), locals(),sort='cumtime')
self.rollout(random=False, test=evaluate)
if self.args.train and not(evaluate):
# If training, append to memory.
self.memory.append_to_memory(self.episode)
# Update on batch.
self.update_batch()
# Update plots.
self.update_plots(counter)
def initialize_memory(self):
# Create memory object.
self.memory = RLUtils.ReplayMemory(memory_size=self.args.memory_size)
# Number of initial episodes needs to be less than memory size.
self.initial_episodes = self.args.burn_in_eps
# While number of transitions is less than initial_transitions.
episode_counter = 0
while episode_counter<self.initial_episodes:
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
print("Initializing Memory Episode: ", episode_counter)
# Rollout an episode.
self.rollout(random=self.args.random_memory_burn_in)
# Add episode to memory.
self.memory.append_to_memory(self.episode)
episode_counter += 1
def evaluate(self, epoch=None, model=None):
if model is not None:
print("Loading model in training.")
self.load_all_models(model)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
else:
model_epoch = epoch
self.total_rewards = np.zeros((self.number_test_episodes))
# For number of test episodes.
for eps in range(self.number_test_episodes):
# Run an iteration (and rollout)...
self.run_iteration(eps, evaluate=True)
self.total_rewards[eps] = np.array(self.reward_trajectory).sum()
# Create save directory to save these results.
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
np.save(os.path.join(self.dir_name,"Total_Rewards_{0}.npy".format(self.args.name)),self.total_rewards)
np.save(os.path.join(self.dir_name,"Mean_Reward_{0}.npy".format(self.args.name)),self.total_rewards.mean())
def train(self, model=None):
# 1) Initialize memory maybe.
# 2) For number of iterations, RUN ITERATION:
# 3) Rollout trajectory.
# 4) Collect stats.
# 5) Update policies.
if model:
print("Loading model in training.")
self.load_all_models(model)
print("Starting Main Training Procedure.")
self.set_parameters(0)
np.set_printoptions(suppress=True,precision=2)
# Fixing seeds.
np.random.seed(seed=0)
torch.manual_seed(0)
print("Initializing Memory.")
self.initialize_memory()
for e in range(self.number_episodes):
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
if e%self.args.save_freq==0:
self.save_all_models("epoch{0}".format(e))
self.run_iteration(e)
print("#############################")
print("Running Episode: ",e)
if e%self.args.eval_freq==0:
self.evaluate(epoch=e, model=None)
class PolicyManager_DownstreamRL(PolicyManager_BaselineRL):
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_DownstreamRL, self).__init__(number_policies=4, dataset=dataset, args=args)
def setup(self):
# Create Mujoco environment.
self.environment = robosuite.make(self.args.environment, has_renderer=False, use_camera_obs=False, reward_shaping=self.args.shaped_reward)
# Get input and output sizes from these environments, etc.
self.obs = self.environment.reset()
self.output_size = self.environment.action_spec[0].shape[0]
self.state_size = self.environment.action_spec[0].shape[0]
self.conditional_info_size = self.obs['robot-state'].shape[0] + self.obs['object-state'].shape[0]
# If we are loading policies....
if self.args.model:
# Padded conditional info.
self.conditional_info_size = 53
self.input_size = 2*self.state_size
# Create networks.
self.create_networks()
self.create_training_ops()
self.initialize_plots()
self.gripper_open = np.array([0.0115, -0.0115])
self.gripper_closed = np.array([-0.020833, 0.020833])
# Create Noise process.
self.NoiseProcess = RLUtils.OUNoise(self.output_size)
def create_networks(self):
# Copying over the create networks from Joint Policy training.
# Not sure if there's a better way to inherit - unless we inherit from both classes.
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.args.hidden_size, self.output_size, self.args, self.args.number_layers).to(device)
self.critic_network = CriticNetwork(self.input_size+self.conditional_info_size, self.args.hidden_size, 1, self.args, self.args.number_layers).to(device)
if self.args.constrained_b_prior:
self.latent_policy = ContinuousLatentPolicyNetwork_ConstrainedBPrior(self.input_size+self.conditional_info_size, self.args.hidden_size, self.args, self.args.number_layers).to(device)
else:
self.latent_policy = ContinuousLatentPolicyNetwork(self.input_size+self.conditional_info_size, self.args.hidden_size, self.args, self.args.number_layers).to(device)
def create_training_ops(self):
self.NLL_Loss = torch.nn.NLLLoss(reduction='none')
self.MSE_Loss = torch.nn.MSELoss(reduction='none')
# If we are using reparameterization, use a global optimizer for both policies, and a global loss function.
parameter_list = list(self.latent_policy.parameters())
if not(self.args.fix_subpolicy):
parameter_list = parameter_list + list(self.policy_network.parameters())
# The policy optimizer handles both the low and high level policies, as long as the z's being passed from the latent to sub policy are differentiable.
self.policy_optimizer = torch.optim.Adam(parameter_list, lr=self.learning_rate)
self.critic_optimizer = torch.optim.Adam(self.critic_network.parameters(), lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Latent_Policy'] = self.latent_policy.state_dict()
save_object['Critic_Network'] = self.critic_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, critic=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
if self.args.load_latent:
self.latent_policy.load_state_dict(load_object['Latent_Policy'])
if critic:
self.critic_network.load_state_dict(load_object['Critic_Network'])
def reset_lists(self):
self.reward_trajectory = []
self.state_trajectory = []
self.action_trajectory = []
self.image_trajectory = []
self.terminal_trajectory = []
self.latent_z_trajectory = []
self.latent_b_trajectory = []
self.cummulative_rewards = None
self.episode = None
def get_conditional_information_row(self, t=-1):
# Get robot and object state.
conditional_info_row = np.zeros((1,self.conditional_info_size))
info_value = np.concatenate([self.state_trajectory[t]['robot-state'].reshape((1,-1)),self.state_trajectory[t]['object-state'].reshape((1,-1))],axis=1)
conditional_info_row[0,:info_value.shape[1]] = info_value
return conditional_info_row
def get_transformed_gripper_value(self, gripper_finger_values):
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
return gripper_value
def get_current_input_row(self, t=-1):
# The state that we want is ... joint state?
gripper_finger_values = self.state_trajectory[t]['gripper_qpos']
if len(self.action_trajectory)==0 or t==0:
return np.concatenate([self.state_trajectory[0]['joint_pos'].reshape((1,-1)), np.zeros((1,1)), np.zeros((1,self.output_size))],axis=1)
elif t==-1:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t].reshape((1,-1))],axis=1)
else:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t-1].reshape((1,-1))],axis=1)
def get_latent_input_row(self, t=-1):
# If first timestep, z's are 0 and b is 1.
if len(self.latent_z_trajectory)==0 or t==0:
return np.concatenate([np.zeros((1, self.args.z_dimensions)),np.ones((1,1))],axis=1)
if t==-1:
return np.concatenate([self.latent_z_trajectory[t].reshape((1,-1)),self.latent_b_trajectory[t].reshape((1,1))],axis=1)
elif t>0:
t-=1
return np.concatenate([self.latent_z_trajectory[t].reshape((1,-1)),self.latent_b_trajectory[t].reshape((1,1))],axis=1)
def assemble_latent_input_row(self, t=-1):
# Function to assemble ONE ROW of latent policy input.
# Remember, the latent policy takes.. JOINT_states, actions, z's, b's, and then conditional information of robot-state and object-state.
# Assemble these three pieces:
return np.concatenate([self.get_current_input_row(t), self.get_latent_input_row(t), self.get_conditional_information_row(t)],axis=1)
def assemble_latent_inputs(self):
# Assemble latent policy inputs over time.
return np.concatenate([self.assemble_latent_input_row(t) for t in range(len(self.state_trajectory))],axis=0)
def assemble_subpolicy_input_row(self, latent_z=None, t=-1):
# Remember, the subpolicy takes.. JOINT_states, actions, z's.
# Assemble (remember, without b, and without conditional info).
if latent_z is not None:
# return np.concatenate([self.get_current_input_row(t), latent_z.reshape((1,-1))],axis=1)
# Instead of numpy, use torch.
return torch.cat([torch.tensor(self.get_current_input_row(t)).to(device).float(), latent_z.reshape((1,-1))],dim=1)
else:
# Remember, get_latent_input_row isn't operating on something that needs to be differentiable, so just use numpy and then wrap with torch tensor.
# return torch.tensor(np.concatenate([self.get_current_input_row(t), self.get_latent_input_row(t)[:,:-1]],axis=1)).to(device).float()
return torch.tensor(np.concatenate([self.get_current_input_row(t), self.latent_z_trajectory[t].reshape((1,-1))],axis=1)).to(device).float()
def assemble_subpolicy_inputs(self, latent_z_list=None):
# Assemble sub policy inputs over time.
if latent_z_list is None:
# return np.concatenate([self.assemble_subpolicy_input_row(t) for t in range(len(self.state_trajectory))],axis=0)
# Instead of numpy, use torch...
return torch.cat([self.assemble_subpolicy_input_row(t=t) for t in range(len(self.state_trajectory))],dim=0)
else:
# return np.concatenate([self.assemble_subpolicy_input_row(t, latent_z=latent_z_list[t]) for t in range(len(self.state_trajectory))],axis=0)
# Instead of numpy, use torch...
return torch.cat([self.assemble_subpolicy_input_row(t=t, latent_z=latent_z_list[t]) for t in range(len(self.state_trajectory))],dim=0)
def assemble_state_action_row(self, action=None, t=-1):
# Get state action input row for critic.
if action is not None:
gripper_finger_values = self.state_trajectory[t]['gripper_qpos']
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
# Don't create a torch tensor out of actions.
return torch.cat([torch.tensor(self.state_trajectory[t]['joint_pos']).to(device).float().reshape((1,-1)), torch.tensor(gripper_value).to(device).float(), action.reshape((1,-1)), torch.tensor(self.get_conditional_information_row(t)).to(device).float()],dim=1)
else:
# Just use actions that were used in the trajectory. This doesn't need to be differentiable, because it's going to be used for the critic targets, so just make a torch tensor from numpy.
return torch.tensor(np.concatenate([self.get_current_input_row(t), self.get_conditional_information_row(t)],axis=1)).to(device).float()
def assemble_state_action_inputs(self, action_list=None):
# return np.concatenate([self.assemble_state_action_row(t) for t in range(len(self.state_trajectory))],axis=0)
# Instead of numpy use torch.
if action_list is not None:
return torch.cat([self.assemble_state_action_row(t=t, action=action_list[t]) for t in range(len(self.state_trajectory))],dim=0)
else:
return torch.cat([self.assemble_state_action_row(t=t) for t in range(len(self.state_trajectory))],dim=0)
def get_OU_action_latents(self, policy_hidden=None, latent_hidden=None, random=False, counter=0, previous_z=None, test=False, delta_t=0):
# if random==True:
# action = 2*np.random.random((self.output_size))-1
# return action,
# Get latent policy inputs.
latent_policy_inputs = self.assemble_latent_input_row()
# Feed in latent policy inputs and get the latent policy outputs (z, b, and hidden)
latent_z, latent_b, latent_hidden = self.latent_policy.incremental_reparam_get_actions(torch.tensor(latent_policy_inputs).to(device).float(), greedy=True, hidden=latent_hidden, previous_z=previous_z, delta_t=delta_t)
# Perturb latent_z with some noise.
z_noise = self.epsilon*torch.randn_like(latent_z)
# Add noise to z.
latent_z = latent_z + z_noise
if latent_b[-1]==1:
delta_t = 0
else:
delta_t += 1
# Now get subpolicy inputs.
# subpolicy_inputs = self.assemble_subpolicy_input_row(latent_z.detach().cpu().numpy())
subpolicy_inputs = self.assemble_subpolicy_input_row(latent_z=latent_z)
# Feed in subpolicy inputs and get the subpolicy outputs (a, hidden)
predicted_action, hidden = self.policy_network.incremental_reparam_get_actions(torch.tensor(subpolicy_inputs).to(device).float(), greedy=True, hidden=policy_hidden)
# Numpy action
action = predicted_action[-1].squeeze(0).detach().cpu().numpy()
if test:
perturbed_action = action
else:
# Perturb action with noise.
if self.args.OU:
perturbed_action = self.NoiseProcess.get_action(action, counter)
else:
# Just regular epsilon
perturbed_action = action + self.epsilon*np.random.randn(action.shape[-1])
return perturbed_action, latent_z, latent_b, policy_hidden, latent_hidden, delta_t
def rollout(self, random=False, test=False, visualize=False):
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
# Reset some data for the rollout.
counter = 0
eps_reward = 0.
terminal = False
self.reset_lists()
# Reset environment and add state to the list.
state = self.environment.reset()
self.state_trajectory.append(state)
# If we are going to visualize, get an initial image.
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
# Instead of maintaining just one LSTM hidden state... now have one for each policy level.
policy_hidden = None
latent_hidden = None
latent_z = None
delta_t = 0
# For number of steps / while we don't terminate:
while not(terminal) and counter<self.max_timesteps:
# Get the action to execute, b, z, and hidden states.
action, latent_z, latent_b, policy_hidden, latent_hidden, delta_t = self.get_OU_action_latents(policy_hidden=policy_hidden, latent_hidden=latent_hidden, random=random, counter=counter, previous_z=latent_z, test=test, delta_t=delta_t)
if self.args.debug:
print("Embed in Trajectory Rollout.")
embed()
# Take a step in the environment.
next_state, onestep_reward, terminal, success = self.environment.step(action)
# Append everything to lists.
self.state_trajectory.append(next_state)
self.action_trajectory.append(action)
self.reward_trajectory.append(onestep_reward)
self.terminal_trajectory.append(terminal)
self.latent_z_trajectory.append(latent_z.detach().cpu().numpy())
self.latent_b_trajectory.append(latent_b.detach().cpu().numpy())
# Copy next state into state.
state = copy.deepcopy(next_state)
# Counter
counter += 1
# Append image to image list if we are visualizing.
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
# Now that the episode is done, compute cummulative rewards...
self.cummulative_rewards = copy.deepcopy(np.cumsum(np.array(self.reward_trajectory)[::-1])[::-1])
self.episode_reward_statistics = copy.deepcopy(self.cummulative_rewards[0])
print("Rolled out an episode for ",counter," timesteps.")
print("Achieved reward: ", self.episode_reward_statistics)
# NOW construct an episode out of this..
self.episode = RLUtils.HierarchicalEpisode(self.state_trajectory, self.action_trajectory, self.reward_trajectory, self.terminal_trajectory, self.latent_z_trajectory, self.latent_b_trajectory)
def process_episode(self, episode):
# Assemble states, actions, targets.
# First reset all the lists from the rollout now that they've been written to memory.
self.reset_lists()
# Now set the lists.
self.state_trajectory = episode.state_list
self.action_trajectory = episode.action_list
self.reward_trajectory = episode.reward_list
self.terminal_trajectory = episode.terminal_list
self.latent_z_trajectory = episode.latent_z_list
self.latent_b_trajectory = episode.latent_b_list
# Get summed reward for statistics.
self.batch_reward_statistics += sum(self.reward_trajectory)
# Assembling state_action inputs to feed to the Critic network for TARGETS. (These don't need to, and in fact shouldn't, be differentiable).
self.state_action_inputs = torch.tensor(self.assemble_state_action_inputs()).to(device).float()
def update_policies(self):
# There are a few steps that need to be taken.
# 1) Assemble latent policy inputs.
# 2) Get differentiable latent z's from latent policy.
# 3) Assemble subpolicy inputs with these differentiable latent z's.
# 4) Get differentiable actions from subpolicy.
# 5) Assemble critic inputs with these differentiable actions.
# 6) Now compute critic predictions that are differentiable w.r.t. sub and latent policies.
# 7) Backprop.
# 1) Assemble latent policy inputs. # Remember, these are the only things that don't need to be differentiable.
self.latent_policy_inputs = torch.tensor(self.assemble_latent_inputs()).to(device).float()
# 2) Feed this into latent policy.
latent_z, latent_b, _ = self.latent_policy.incremental_reparam_get_actions(torch.tensor(self.latent_policy_inputs).to(device).float(), greedy=True)
# 3) Assemble subpolicy inputs with diff latent z's. Remember, this needs to be differentiable. Modify the assembling to torch, WITHOUT creating new torch tensors of z.
self.subpolicy_inputs = self.assemble_subpolicy_inputs(latent_z_list=latent_z)
# 4) Feed into subpolicy.
diff_actions, _ = self.policy_network.incremental_reparam_get_actions(self.subpolicy_inputs, greedy=True)
# 5) Now assemble critic inputs.
self.differentiable_critic_inputs = self.assemble_state_action_inputs(action_list=diff_actions)
# 6) Compute critic predictions.
self.policy_loss = - self.critic_network.forward(self.differentiable_critic_inputs[:-1]).mean()
# Also log statistics.
self.policy_loss_statistics += self.policy_loss.clone().detach().cpu().numpy().mean()
# 7) Now backprop into policy.
self.policy_optimizer.zero_grad()
self.policy_loss.backward()
self.policy_optimizer.step()
class PolicyManager_DMPBaselines(PolicyManager_Joint):
# Make it inherit joint policy manager init.
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_DMPBaselines, self).__init__(number_policies, dataset, args)
def setup_DMP_parameters(self):
self.output_size
self.number_kernels = 15
self.window = 15
self.kernel_bandwidth = 1.5
self.number_kernels = self.args.baseline_kernels
self.window = self.args.baseline_window
self.kernel_bandwidth = self.args.baseline_kernel_bandwidth
def get_MSE(self, sample_traj, trajectory_rollout):
# Evaluate MSE between reconstruction and sample trajectory.
return ((sample_traj-trajectory_rollout)**2).mean()
def get_FlatDMP_rollout(self, sample_traj, velocities=None):
# Reinitialize DMP Class.
self.dmp = DMP.DMP(time_steps=len(sample_traj), num_ker=self.number_kernels, dimensions=self.state_size, kernel_bandwidth=self.kernel_bandwidth, alphaz=5., time_basis=True)
# Learn DMP for particular trajectory.
self.dmp.learn_DMP(sample_traj)
# Get rollout.
if velocities is not None:
trajectory_rollout = self.dmp.rollout(sample_traj[0],sample_traj[-1],velocities)
else:
trajectory_rollout = self.dmp.rollout(sample_traj[0],sample_traj[-1],np.zeros((self.state_size)))
return trajectory_rollout
def evaluate_FlatDMPBaseline_iteration(self, index, sample_traj):
trajectory_rollout = self.get_FlatDMP_rollout(sample_traj)
self.FlatDMP_distances[index] = self.get_MSE(sample_traj, trajectory_rollout)
def get_AccelerationChangepoint_rollout(self, sample_traj):
# Get magnitudes of acceleration across time.
acceleration_norm = np.linalg.norm(np.diff(sample_traj,n=2,axis=0),axis=1)
# Get velocities.
velocities = np.diff(sample_traj,n=1,axis=0,prepend=sample_traj[0].reshape((1,-1)))
# Find peaks with minimum length = 8.
window = self.window
segmentation = find_peaks(acceleration_norm, distance=window)[0]
if len(segmentation)==0:
segmentation = np.array([0,len(sample_traj)])
else:
# Add start and end to peaks.
if segmentation[0]<window:
segmentation[0] = 0
else:
segmentation = np.insert(segmentation, 0, 0)
# If end segmentation is within WINDOW of end, change segment to end.
if (len(sample_traj) - segmentation[-1])<window:
segmentation[-1] = len(sample_traj)
else:
segmentation = np.insert(segmentation, len(segmentation), sample_traj.shape[0])
trajectory_rollout = np.zeros_like(sample_traj)
# For every segment.
for i in range(len(segmentation)-1):
# Get trajectory segment.
trajectory_segment = sample_traj[segmentation[i]:segmentation[i+1]]
# Get rollout. # Feed velocities into rollout. # First velocity is 0.
segment_rollout = self.get_FlatDMP_rollout(trajectory_segment, velocities[segmentation[i]])
# Copy segment rollout into full rollout.
trajectory_rollout[segmentation[i]:segmentation[i+1]] = segment_rollout
return trajectory_rollout
def evaluate_AccelerationChangepoint_iteration(self, index, sample_traj):
trajectory_rollout = self.get_AccelerationChangepoint_rollout(sample_traj)
self.AccChangepointDMP_distances[index] = self.get_MSE(sample_traj, trajectory_rollout)
def evaluate_MeanRegression_iteration(self, index, sample_traj):
mean = sample_traj.mean(axis=0)
self.MeanRegression_distances[index] = ((sample_traj-mean)**2).mean()
def get_GreedyDMP_rollout(self, sample_traj):
pass
def evaluate_across_testset(self):
self.setup_DMP_parameters()
# Create array for distances.
self.FlatDMP_distances = -np.ones((self.test_set_size))
self.AccChangepointDMP_distances = -np.ones((self.test_set_size))
self.MeanRegression_distances = -np.ones((self.test_set_size))
self.lengths = -np.ones((self.test_set_size))
for i in range(self.test_set_size):
# Set actual index.
index = i + len(self.dataset) - self.test_set_size
if i%100==0:
print("Evaluating Datapoint ", i)
# Get trajectory.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if sample_traj is not None:
# Set sample trajectory to ignore gripper.
if self.args.data=='MIME':
sample_traj = sample_traj[:,:-2]
self.state_size = 14
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
sample_traj = sample_traj[:,:-1]
self.state_size = 7
# sample_traj = gaussian_filter1d(sample_traj,3.5,axis=0,mode='nearest')
# elif self.args.data=='Mocap':
# sample_traj = sample_traj
self.lengths[i] = len(sample_traj)
# Eval Flat DMP.
self.evaluate_FlatDMPBaseline_iteration(i, sample_traj)
# Eval AccChange DMP Baseline.
self.evaluate_AccelerationChangepoint_iteration(i, sample_traj)
# Evaluate Mean regression Basleine.
self.evaluate_MeanRegression_iteration(i, sample_traj)
# self.mean_distance = self.distances[self.distances>0].mean()
print("Average Distance of Flat DMP Baseline: ", self.FlatDMP_distances[self.FlatDMP_distances>0].mean())
print("Average Distance of Acceleration Changepoint Baseline: ", self.AccChangepointDMP_distances[self.AccChangepointDMP_distances>0].mean())
print("Average Distance of Mean Regression Baseline: ", self.MeanRegression_distances[self.MeanRegression_distances>0].mean())
embed()
class PolicyManager_Imitation(PolicyManager_Pretrain, PolicyManager_BaselineRL):
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_Imitation, self).__init__(number_policies=number_policies, dataset=dataset, args=args)
# Explicitly run inits to make sure inheritance is good.
# PolicyManager_Pretrain.__init__(self, number_policies, dataset, args)
# PolicyManager_BaselineRL.__init__(self, args)
# Set train only policy to true.
self.args.train_only_policy = 1
# Get task index from task name.
self.demo_task_index = np.where(np.array(self.dataset.environment_names)==self.args.environment)[0][0]
def setup(self):
# Fixing seeds.
np.random.seed(seed=0)
torch.manual_seed(0)
np.set_printoptions(suppress=True,precision=2)
# Create index list.
extent = self.dataset.get_number_task_demos(self.demo_task_index)
self.index_list = np.arange(0,extent)
# Create Mujoco environment.
self.environment = robosuite.make(self.args.environment, has_renderer=False, use_camera_obs=False, reward_shaping=self.args.shaped_reward)
self.gripper_open = np.array([0.0115, -0.0115])
self.gripper_closed = np.array([-0.020833, 0.020833])
# Get input and output sizes from these environments, etc.
self.obs = self.environment.reset()
self.output_size = self.environment.action_spec[0].shape[0]
self.state_size = self.obs['robot-state'].shape[0] + self.obs['object-state'].shape[0]
self.conditional_info_size = self.state_size
# Input size.. state, action, conditional
self.input_size = self.state_size + self.output_size*2
# Create networks.
self.create_networks()
self.create_training_ops()
self.initialize_plots()
self.total_rewards = 0.
# Create Noise process.
self.NoiseProcess = RLUtils.OUNoise(self.output_size)
def create_networks(self):
# We don't need a decoder.
# Policy Network is the only thing we need.
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, only_policy=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
def update_policies(self, logprobabilities):
# Set gradients to 0.
self.optimizer.zero_grad()
# Set policy loss.
self.policy_loss = -logprobabilities[:-1].mean()
# Backward.
self.policy_loss.backward()
# Take a step.
self.optimizer.step()
def update_plots(self, counter, logprobabilities):
self.tf_logger.scalar_summary('Policy LogLikelihood', torch.mean(logprobabilities), counter)
if counter%self.args.display_freq==0:
# print("Embedding in Update Plots.")
# Rollout policy.
self.rollout(random=False, test=True, visualize=True)
self.tf_logger.gif_summary("Rollout Trajectory", [np.array(self.image_trajectory)], counter)
def run_iteration(self, counter, i):
self.set_epoch(counter)
self.iter = counter
############# (0) #############
# Get sample we're going to train on.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if sample_traj is not None:
# Now concatenate info with... conditional_information
policy_inputs = np.concatenate([concatenated_traj, self.conditional_information], axis=1)
# Add zeros to the last action, so that we evaluate likelihood correctly. Since we're using demo actions, no need.
# padded_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
# Feed concatenated trajectory into the policy.
logprobabilities, _ = self.policy_network.forward(torch.tensor(policy_inputs).to(device).float(), sample_action_seq)
if self.args.train:
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in Train Function.")
embed()
# Update policy.
self.update_policies(logprobabilities)
# Update plots.
self.update_plots(counter, logprobabilities)
def get_transformed_gripper_value(self, gripper_finger_values):
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
return gripper_value
def get_state_action_row(self, t=-1):
# The state that we want is ... joint state?
gripper_finger_values = self.state_trajectory[t]['gripper_qpos']
if len(self.action_trajectory)==0 or t==0:
return np.concatenate([self.state_trajectory[0]['joint_pos'].reshape((1,-1)), np.zeros((1,1)), np.zeros((1,self.output_size))],axis=1)
elif t==-1:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t].reshape((1,-1))],axis=1)
else:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t-1].reshape((1,-1))],axis=1)
def get_current_input_row(self, t=-1):
# Rewrite this funciton so that the baselineRL Rollout class can still use it here...
# First get conditional information.
# Get robot and object state.
conditional_info = np.concatenate([self.state_trajectory[t]['robot-state'].reshape((1,-1)),self.state_trajectory[t]['object-state'].reshape((1,-1))],axis=1)
# Get state actions..
state_action = self.get_state_action_row()
# Concatenate.
input_row = np.concatenate([state_action, conditional_info],axis=1)
return input_row
def evaluate(self, epoch=None, model=None):
if model is not None:
self.load_all_models(model)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
else:
model_epoch = epoch
self.total_rewards = np.zeros((self.number_test_episodes))
# Set parameters like epsilon.
self.set_parameters(0, evaluate=True)
# For number of test episodes.
for eps in range(self.number_test_episodes):
# Now run a rollout.
self.rollout(random=False, test=True)
self.total_rewards[eps] = np.array(self.reward_trajectory).sum()
# Create save directory to save these results.
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
np.save(os.path.join(self.dir_name,"Total_Rewards_{0}.npy".format(self.args.name)),self.total_rewards)
np.save(os.path.join(self.dir_name,"Mean_Reward_{0}.npy".format(self.args.name)),self.total_rewards.mean())
# Add average reward to tensorboard.
self.tf_logger.scalar_summary('Average Reward', self.total_rewards.mean(), model_epoch)
def train(self, model=None):
if model:
print("Loading model in training.")
self.load_all_models(model)
counter = 0
# For number of training epochs.
for e in range(self.number_epochs):
print("Starting Epoch: ",e)
if e%self.args.save_freq==0:
self.save_all_models("epoch{0}".format(e))
# self.automatic_evaluation(e)
np.random.shuffle(self.index_list)
if self.args.debug:
print("Embedding in Outer Train Function.")
embed()
# For every item in the epoch:
if self.args.setting=='imitation':
extent = self.dataset.get_number_task_demos(self.demo_task_index)
else:
extent = len(self.dataset)-self.test_set_size
for i in range(extent):
print("Epoch: ",e," Trajectory:",i, "Datapoint: ", self.index_list[i])
self.run_iteration(counter, self.index_list[i])
counter = counter+1
if e%self.args.eval_freq==0:
self.evaluate(e)
self.write_and_close()
class PolicyManager_Transfer(PolicyManager_BaseClass):
def __init__(self, args=None, source_dataset=None, target_dataset=None):
super(PolicyManager_Transfer, self).__init__()
# The inherited functions refer to self.args. Also making this to make inheritance go smooth.
self.args = args
# Before instantiating policy managers of source or target domains; create copies of args with data attribute changed.
self.source_args = copy.deepcopy(args)
self.source_args.data = self.source_args.source_domain
self.source_dataset = source_dataset
self.target_args = copy.deepcopy(args)
self.target_args.data = self.target_args.target_domain
self.target_dataset = target_dataset
# Now create two instances of policy managers for each domain. Call them source and target domain policy managers.
self.source_manager = PolicyManager_Pretrain(dataset=self.source_dataset, args=self.source_args)
self.target_manager = PolicyManager_Pretrain(dataset=self.target_dataset, args=self.target_args)
self.source_dataset_size = len(self.source_manager.dataset) - self.source_manager.test_set_size
self.target_dataset_size = len(self.target_manager.dataset) - self.target_manager.test_set_size
# Now create variables that we need.
self.number_epochs = 200
self.extent = max(self.source_dataset_size, self.target_dataset_size)
# Now setup networks for these PolicyManagers.
self.source_manager.setup()
self.target_manager.setup()
# Now define other parameters that will be required for the discriminator, etc.
self.input_size = self.args.z_dimensions
self.hidden_size = self.args.hidden_size
self.output_size = 2
self.learning_rate = self.args.learning_rate
def set_iteration(self, counter):
# Based on what phase of training we are in, set discriminability loss weight, etc.
# Phase 1 of training: Don't train discriminator at all, set discriminability loss weight to 0.
if counter<self.args.training_phase_size:
self.discriminability_loss_weight = 0.
self.vae_loss_weight = 1.
self.training_phase = 1
self.skip_vae = False
self.skip_discriminator = True
# Phase 2 of training: Train the discriminator, and set discriminability loss weight to original.
else:
self.discriminability_loss_weight = self.args.discriminability_weight
self.vae_loss_weight = self.args.vae_loss_weight
# Now make discriminator and vae train in alternating fashion.
# Set number of iterations of alteration.
# self.alternating_phase_size = self.args.alternating_phase_size*self.extent
# # If odd epoch, train discriminator. (Just so that we start training discriminator first).
# if (counter/self.alternating_phase_size)%2==1:
# self.skip_discriminator = False
# self.skip_vae = True
# # Otherwise train VAE.
# else:
# self.skip_discriminator = True
# self.skip_vae = False
# Train discriminator for k times as many steps as VAE. Set args.alternating_phase_size as 1 for this.
if (counter/self.args.alternating_phase_size)%(self.args.discriminator_phase_size+1)>=1:
print("Training Discriminator.")
self.skip_discriminator = False
self.skip_vae = True
# Otherwise train VAE.
else:
print("Training VAE.")
self.skip_discriminator = True
self.skip_vae = False
self.training_phase = 2
self.source_manager.set_epoch(counter)
self.target_manager.set_epoch(counter)
def create_networks(self):
# Call create networks from each of the policy managers.
self.source_manager.create_networks()
self.target_manager.create_networks()
# Now must also create discriminator.
self.discriminator_network = DiscreteMLP(self.input_size, self.hidden_size, self.output_size).to(device)
def create_training_ops(self):
# # Call create training ops from each of the policy managers. Need these optimizers, because the encoder-decoders get a different loss than the discriminator.
self.source_manager.create_training_ops()
self.target_manager.create_training_ops()
# Create BCE loss object.
# self.BCE_loss = torch.nn.BCELoss(reduction='None')
self.negative_log_likelihood_loss_function = torch.nn.NLLLoss(reduction='none')
# Create common optimizer for source, target, and discriminator networks.
self.discriminator_optimizer = torch.optim.Adam(self.discriminator_network.parameters(),lr=self.learning_rate)
def save_all_models(self, suffix):
self.logdir = os.path.join(self.args.logdir, self.args.name)
self.savedir = os.path.join(self.logdir,"saved_models")
if not(os.path.isdir(self.savedir)):
os.mkdir(self.savedir)
self.save_object = {}
# Source
self.save_object['Source_Policy_Network'] = self.source_manager.policy_network.state_dict()
self.save_object['Source_Encoder_Network'] = self.source_manager.encoder_network.state_dict()
# Target
self.save_object['Target_Policy_Network'] = self.target_manager.policy_network.state_dict()
self.save_object['Target_Encoder_Network'] = self.target_manager.encoder_network.state_dict()
# Discriminator
self.save_object['Discriminator_Network'] = self.discriminator_network.state_dict()
torch.save(self.save_object,os.path.join(self.savedir,"Model_"+suffix))
def load_all_models(self, path):
self.load_object = torch.load(path)
# Source
self.source_manager.policy_network.load_state_dict(self.load_object['Source_Policy_Network'])
self.source_manager.encoder_network.load_state_dict(self.load_object['Source_Encoder_Network'])
# Target
self.target_manager.policy_network.load_state_dict(self.load_object['Target_Policy_Network'])
self.target_manager.encoder_network.load_state_dict(self.load_object['Target_Encoder_Network'])
# Discriminator
self.discriminator_network.load_state_dict(self.load_object['Discriminator_Network'])
def get_domain_manager(self, domain):
# Create a list, and just index into this list.
domain_manager_list = [self.source_manager, self.target_manager]
return domain_manager_list[domain]
def get_trajectory_segment_tuple(self, source_manager, target_manager):
# Sample indices.
source_index = np.random.randint(0, high=self.source_dataset_size)
target_index = np.random.randint(0, high=self.target_dataset_size)
# Get trajectory segments.
source_trajectory_segment, source_action_seq, _ = source_manager.get_trajectory_segment(source_manager.index_list[source_index])
target_trajectory_segment, target_action_seq, _ = target_manager.get_trajectory_segment(target_manager.index_list[target_index])
return source_trajectory_segment, source_action_seq, target_trajectory_segment, target_action_seq
def encode_decode_trajectory(self, policy_manager, i, return_trajectory=False, trajectory_input=None):
# This should basically replicate the encode-decode steps in run_iteration of the Pretrain_PolicyManager.
############# (0) #############
# Sample trajectory segment from dataset.
# Check if the index is too big. If yes, just sample randomly.
if i >= len(policy_manager.dataset):
i = np.random.randint(0, len(policy_manager.dataset))
if trajectory_input is not None:
# Grab trajectory segment from tuple.
torch_traj_seg = trajectory_input['target_trajectory_rollout']
else:
trajectory_segment, sample_action_seq, sample_traj = policy_manager.get_trajectory_segment(i)
# Torchify trajectory segment.
torch_traj_seg = torch.tensor(trajectory_segment).to(device).float()
if trajectory_segment is not None:
############# (1) #############
# Encode trajectory segment into latent z.
latent_z, encoder_loglikelihood, encoder_entropy, kl_divergence = policy_manager.encoder_network.forward(torch_traj_seg, policy_manager.epsilon)
########## (2) & (3) ##########
# Feed latent z and trajectory segment into policy network and evaluate likelihood.
latent_z_seq, latent_b = policy_manager.construct_dummy_latents(latent_z)
# If we are using the pre-computed trajectory input, (in second encode_decode call, from target trajectory to target latent z.)
# Don't assemble trajectory in numpy, just take the previous subpolicy_inputs, and then clone it and replace the latent z in it.
if trajectory_input is not None:
# Now assigned trajectory_input['target_subpolicy_inputs'].clone() to SubPolicy_inputs, and then replace the latent z's.
subpolicy_inputs = trajectory_input['target_subpolicy_inputs'].clone()
subpolicy_inputs[:,2*self.state_dim:-1] = latent_z_seq
# Now get "sample_action_seq" for forward function.
sample_action_seq = subpolicy_inputs[:,self.state_dim:2*self.state_dim].clone()
else:
_, subpolicy_inputs, sample_action_seq = policy_manager.assemble_inputs(trajectory_segment, latent_z_seq, latent_b, sample_action_seq)
# Policy net doesn't use the decay epislon. (Because we never sample from it in training, only rollouts.)
loglikelihoods, _ = policy_manager.policy_network.forward(subpolicy_inputs, sample_action_seq)
loglikelihood = loglikelihoods[:-1].mean()
if return_trajectory:
return sample_traj, latent_z
else:
return subpolicy_inputs, latent_z, loglikelihood, kl_divergence
if return_trajectory:
return None, None
else:
return None, None, None, None
def update_plots(self, counter, viz_dict):
# VAE Losses.
self.tf_logger.scalar_summary('Policy LogLikelihood', self.likelihood_loss, counter)
self.tf_logger.scalar_summary('Discriminability Loss', self.discriminability_loss, counter)
self.tf_logger.scalar_summary('Encoder KL', self.encoder_KL, counter)
self.tf_logger.scalar_summary('VAE Loss', self.VAE_loss, counter)
self.tf_logger.scalar_summary('Total VAE Loss', self.total_VAE_loss, counter)
self.tf_logger.scalar_summary('Domain', viz_dict['domain'], counter)
# Plot discriminator values after we've started training it.
if self.training_phase>1:
# Discriminator Loss.
self.tf_logger.scalar_summary('Discriminator Loss', self.discriminator_loss, counter)
# Compute discriminator prob of right action for logging.
self.tf_logger.scalar_summary('Discriminator Probability', viz_dict['discriminator_probs'], counter)
# If we are displaying things:
if counter%self.args.display_freq==0:
self.gt_gif_list = []
self.rollout_gif_list = []
# Now using both TSNE and PCA.
# Plot source, target, and shared embeddings via TSNE.
tsne_source_embedding, tsne_target_embedding, tsne_combined_embeddings, tsne_combined_traj_embeddings = self.get_embeddings(projection='tsne')
# Now actually plot the images.
self.tf_logger.image_summary("TSNE Source Embedding", [tsne_source_embedding], counter)
self.tf_logger.image_summary("TSNE Target Embedding", [tsne_target_embedding], counter)
self.tf_logger.image_summary("TSNE Combined Embeddings", [tsne_combined_embeddings], counter)
# Plot source, target, and shared embeddings via PCA.
pca_source_embedding, pca_target_embedding, pca_combined_embeddings, pca_combined_traj_embeddings = self.get_embeddings(projection='pca')
# Now actually plot the images.
self.tf_logger.image_summary("PCA Source Embedding", [pca_source_embedding], counter)
self.tf_logger.image_summary("PCA Target Embedding", [pca_target_embedding], counter)
self.tf_logger.image_summary("PCA Combined Embeddings", [pca_combined_embeddings], counter)
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
self.tf_logger.image_summary("PCA Combined Trajectory Embeddings", [pca_combined_traj_embeddings], counter)
self.tf_logger.image_summary("TSNE Combined Trajectory Embeddings", [tsne_combined_traj_embeddings], counter)
# We are also going to log Ground Truth trajectories and their reconstructions in each of the domains, to make sure our networks are learning.
# Should be able to use the policy manager's functions to do this.
source_trajectory, source_reconstruction, target_trajectory, target_reconstruction = self.get_trajectory_visuals()
if source_trajectory is not None:
# Now actually plot the images.
if self.args.source_domain=='ContinuousNonZero':
self.tf_logger.image_summary("Source Trajectory", [source_trajectory], counter)
self.tf_logger.image_summary("Source Reconstruction", [source_reconstruction], counter)
else:
self.tf_logger.gif_summary("Source Trajectory", [source_trajectory], counter)
self.tf_logger.gif_summary("Source Reconstruction", [source_reconstruction], counter)
if self.args.target_domain=='ContinuousNonZero':
self.tf_logger.image_summary("Target Trajectory", [target_trajectory], counter)
self.tf_logger.image_summary("Target Reconstruction", [target_reconstruction], counter)
else:
self.tf_logger.gif_summary("Target Trajectory", [target_trajectory], counter)
self.tf_logger.gif_summary("Target Reconstruction", [target_reconstruction], counter)
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
# Evaluate metrics and plot them.
# self.evaluate_correspondence_metrics(computed_sets=False)
# Actually, we've probably computed trajectory and latent sets.
self.evaluate_correspondence_metrics()
self.tf_logger.scalar_summary('Source To Target Trajectory Distance', self.source_target_trajectory_distance, counter)
self.tf_logger.scalar_summary('Target To Source Trajectory Distance', self.target_source_trajectory_distance, counter)
def get_transform(self, latent_z_set, projection='tsne', shared=False):
if shared:
# If this set of z's contains z's from both source and target domains, mean-std normalize them independently.
normed_z = np.zeros_like(latent_z_set)
# Normalize source.
source_mean = latent_z_set[:self.N].mean(axis=0)
source_std = latent_z_set[:self.N].std(axis=0)
normed_z[:self.N] = (latent_z_set[:self.N]-source_mean)/source_std
# Normalize target.
target_mean = latent_z_set[self.N:].mean(axis=0)
target_std = latent_z_set[self.N:].std(axis=0)
normed_z[self.N:] = (latent_z_set[self.N:]-target_mean)/target_std
else:
# Just normalize z's.
mean = latent_z_set.mean(axis=0)
std = latent_z_set.std(axis=0)
normed_z = (latent_z_set-mean)/std
if projection=='tsne':
# Use TSNE to project the data:
tsne = skl_manifold.TSNE(n_components=2,random_state=0)
embedded_zs = tsne.fit_transform(normed_z)
scale_factor = 1
scaled_embedded_zs = scale_factor*embedded_zs
return scaled_embedded_zs, tsne
elif projection=='pca':
# Use PCA to project the data:
pca_object = PCA(n_components=2)
embedded_zs = pca_object.fit_transform(normed_z)
return embedded_zs, pca_object
def transform_zs(self, latent_z_set, transforming_object):
# Simply just transform according to a fit transforming_object.
return transforming_object.transform(latent_z_set)
# @profile
def get_embeddings(self, projection='tsne'):
# Function to visualize source, target, and combined embeddings:
self.N = 100
self.source_latent_zs = np.zeros((self.N,self.args.z_dimensions))
self.target_latent_zs = np.zeros((self.N,self.args.z_dimensions))
self.shared_latent_zs = np.zeros((2*self.N,self.args.z_dimensions))
# For N data points:
for i in range(self.N):
# Get corresponding latent z's of source and target domains.
_, source_z, _, _ = self.encode_decode_trajectory(self.source_manager, i)
_, target_z, _, _ = self.encode_decode_trajectory(self.target_manager, i)
if source_z is not None:
self.source_latent_zs[i] = source_z.detach().cpu().numpy()
self.shared_latent_zs[i] = source_z.detach().cpu().numpy()
if target_z is not None:
self.target_latent_zs[i] = target_z.detach().cpu().numpy()
self.shared_latent_zs[self.N+i] = target_z.detach().cpu().numpy()
if projection=='tsne':
# Use TSNE to transform data.
source_embedded_zs, _ = self.get_transform(self.source_latent_zs, projection)
target_embedded_zs, _ = self.get_transform(self.target_latent_zs, projection)
shared_embedded_zs, _ = self.get_transform(self.shared_latent_zs, projection, shared=True)
elif projection=='pca':
# Now fit PCA to source.
source_embedded_zs, pca = self.get_transform(self.source_latent_zs, projection)
target_embedded_zs = self.transform_zs(self.target_latent_zs, pca)
shared_embedded_zs = np.concatenate([source_embedded_zs, target_embedded_zs],axis=0)
source_image = self.plot_embedding(source_embedded_zs, "Source_Embedding")
target_image = self.plot_embedding(target_embedded_zs, "Target_Embedding")
shared_image = self.plot_embedding(shared_embedded_zs, "Shared_Embedding", shared=True)
toy_shared_embedding_image = None
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
toy_shared_embedding_image = self.plot_embedding(shared_embedded_zs, "Toy_Shared_Traj_Embedding", shared=True, trajectory=True)
return source_image, target_image, shared_image, toy_shared_embedding_image
# @profile
def plot_embedding(self, embedded_zs, title, shared=False, trajectory=False):
fig = plt.figure()
ax = fig.gca()
if shared:
colors = 0.2*np.ones((2*self.N))
colors[self.N:] = 0.8
else:
colors = 0.2*np.ones((self.N))
if trajectory:
# Create a scatter plot of the embedding.
self.source_manager.get_trajectory_and_latent_sets()
self.target_manager.get_trajectory_and_latent_sets()
ratio = 0.4
color_scaling = 15
# Assemble shared trajectory set.
traj_length = len(self.source_manager.trajectory_set[0,:,0])
self.shared_trajectory_set = np.zeros((2*self.N, traj_length, 2))
self.shared_trajectory_set[:self.N] = self.source_manager.trajectory_set
self.shared_trajectory_set[self.N:] = self.target_manager.trajectory_set
color_range_min = 0.2*color_scaling
color_range_max = 0.8*color_scaling+traj_length-1
for i in range(2*self.N):
ax.scatter(embedded_zs[i,0]+ratio*self.shared_trajectory_set[i,:,0],embedded_zs[i,1]+ratio*self.shared_trajectory_set[i,:,1],c=colors[i]*color_scaling+range(traj_length),cmap='jet',vmin=color_range_min,vmax=color_range_max)
else:
# Create a scatter plot of the embedding.
ax.scatter(embedded_zs[:,0],embedded_zs[:,1],c=colors,vmin=0,vmax=1,cmap='jet')
# Title.
ax.set_title("{0}".format(title),fontdict={'fontsize':40})
fig.canvas.draw()
# Grab image.
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
image = np.transpose(image, axes=[2,0,1])
return image
def get_trajectory_visuals(self):
i = np.random.randint(0,high=self.extent)
# First get a trajectory, starting point, and latent z.
source_trajectory, source_latent_z = self.encode_decode_trajectory(self.source_manager, i, return_trajectory=True)
if source_trajectory is not None:
# Reconstruct using the source domain manager.
_, source_trajectory_image, source_reconstruction_image = self.source_manager.get_robot_visuals(0, source_latent_z, source_trajectory, return_image=True)
# Now repeat the same for target domain - First get a trajectory, starting point, and latent z.
target_trajectory, target_latent_z = self.encode_decode_trajectory(self.target_manager, i, return_trajectory=True)
# Reconstruct using the target domain manager.
_, target_trajectory_image, target_reconstruction_image = self.target_manager.get_robot_visuals(0, target_latent_z, target_trajectory, return_image=True)
return np.array(source_trajectory_image), np.array(source_reconstruction_image), np.array(target_trajectory_image), np.array(target_reconstruction_image)
else:
return None, None, None, None
def update_networks(self, domain, policy_manager, policy_loglikelihood, encoder_KL, discriminator_loglikelihood, latent_z):
#######################
# Update VAE portion.
#######################
# Zero out gradients of encoder and decoder (policy).
policy_manager.optimizer.zero_grad()
# Compute VAE loss on the current domain as likelihood plus weighted KL.
self.likelihood_loss = -policy_loglikelihood.mean()
self.encoder_KL = encoder_KL.mean()
self.VAE_loss = self.likelihood_loss + self.args.kl_weight*self.encoder_KL
# Compute discriminability loss for encoder (implicitly ignores decoder).
# Pretend the label was the opposite of what it is, and train the encoder to make the discriminator think this was what was true.
# I.e. train encoder to make discriminator maximize likelihood of wrong label.
self.discriminability_loss = self.negative_log_likelihood_loss_function(discriminator_loglikelihood.squeeze(1), torch.tensor(1-domain).to(device).long().view(1,))
# Total encoder loss:
self.total_VAE_loss = self.vae_loss_weight*self.VAE_loss + self.discriminability_loss_weight*self.discriminability_loss
if not(self.skip_vae):
# Go backward through the generator (encoder / decoder), and take a step.
self.total_VAE_loss.backward()
policy_manager.optimizer.step()
#######################
# Update Discriminator.
#######################
# Zero gradients of discriminator.
self.discriminator_optimizer.zero_grad()
# If we tried to zero grad the discriminator and then use NLL loss on it again, Pytorch would cry about going backward through a part of the graph that we already \
# went backward through. Instead, just pass things through the discriminator again, but this time detaching latent_z.
discriminator_logprob, discriminator_prob = self.discriminator_network(latent_z.detach())
# Compute discriminator loss for discriminator.
self.discriminator_loss = self.negative_log_likelihood_loss_function(discriminator_logprob.squeeze(1), torch.tensor(domain).to(device).long().view(1,))
if not(self.skip_discriminator):
# Now go backward and take a step.
self.discriminator_loss.backward()
self.discriminator_optimizer.step()
# @profile
def run_iteration(self, counter, i):
# Phases:
# Phase 1: Train encoder-decoder for both domains initially, so that discriminator is not fed garbage.
# Phase 2: Train encoder, decoder for each domain, and discriminator concurrently.
# Algorithm:
# For every epoch:
# # For every datapoint:
# # 1) Select which domain to use (source or target, i.e. with 50% chance, select either domain).
# # 2) Get trajectory segments from desired domain.
# # 3) Encode trajectory segments into latent z's and compute likelihood of trajectory actions under the decoder.
# # 4) Feed into discriminator, get likelihood of each domain.
# # 5) Compute and apply gradient updates.
# Remember to make domain agnostic function calls to encode, feed into discriminator, get likelihoods, etc.
# (0) Setup things like training phases, epislon values, etc.
self.set_iteration(counter)
# (1) Select which domain to run on. This is supervision of discriminator.
domain = np.random.binomial(1,0.5)
# (1.5) Get domain policy manager.
policy_manager = self.get_domain_manager(domain)
# (2) & (3) Get trajectory segment and encode and decode.
subpolicy_inputs, latent_z, loglikelihood, kl_divergence = self.encode_decode_trajectory(policy_manager, i)
if latent_z is not None:
# (4) Feed latent z's to discriminator, and get discriminator likelihoods.
discriminator_logprob, discriminator_prob = self.discriminator_network(latent_z)
# (5) Compute and apply gradient updates.
self.update_networks(domain, policy_manager, loglikelihood, kl_divergence, discriminator_logprob, latent_z)
# Now update Plots.
viz_dict = {'domain': domain, 'discriminator_probs': discriminator_prob.squeeze(0).squeeze(0)[domain].detach().cpu().numpy()}
self.update_plots(counter, viz_dict)
# Run memory profiling.
# @profile
def set_neighbor_objects(self, computed_sets=False):
if not(computed_sets):
self.source_manager.get_trajectory_and_latent_sets()
self.target_manager.get_trajectory_and_latent_sets()
# Compute nearest neighbors for each set. First build KD-Trees / Ball-Trees.
self.source_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.source_manager.latent_z_set)
self.target_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.target_manager.latent_z_set)
self.neighbor_obj_set = True
def evaluate_correspondence_metrics(self, computed_sets=True):
print("Evaluating correspondence metrics.")
# Evaluate the correspondence and alignment metrics.
# Whether latent_z_sets and trajectory_sets are already computed for each manager.
self.set_neighbor_objects(computed_sets)
# if not(computed_sets):
# self.source_manager.get_trajectory_and_latent_sets()
# self.target_manager.get_trajectory_and_latent_sets()
# # Compute nearest neighbors for each set. First build KD-Trees / Ball-Trees.
# self.source_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.source_manager.latent_z_set)
# self.target_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.target_manager.latent_z_set)
# Compute neighbors.
_, source_target_neighbors = self.source_neighbors_object.kneighbors(self.target_manager.latent_z_set)
_, target_source_neighbors = self.target_neighbors_object.kneighbors(self.source_manager.latent_z_set)
# # Now compute trajectory distances for neighbors.
# source_target_trajectory_diffs = (self.source_manager.trajectory_set - self.target_manager.trajectory_set[source_target_neighbors.squeeze(1)])
# self.source_target_trajectory_distance = copy.deepcopy(np.linalg.norm(source_target_trajectory_diffs,axis=(1,2)).mean())
# target_source_trajectory_diffs = (self.target_manager.trajectory_set - self.source_manager.trajectory_set[target_source_neighbors.squeeze(1)])
# self.target_source_trajectory_distance = copy.deepcopy(np.linalg.norm(target_source_trajectory_diffs,axis=(1,2)).mean())
# Remember, absolute trajectory differences is meaningless, since the data is randomly initialized across the state space.
# Instead, compare actions. I.e. first compute differences along the time dimension.
source_traj_actions = np.diff(self.source_manager.trajectory_set,axis=1)
target_traj_actions = np.diff(self.target_manager.trajectory_set,axis=1)
source_target_trajectory_diffs = (source_traj_actions - target_traj_actions[source_target_neighbors.squeeze(1)])
self.source_target_trajectory_distance = copy.deepcopy(np.linalg.norm(source_target_trajectory_diffs,axis=(1,2)).mean())
target_source_trajectory_diffs = (target_traj_actions - source_traj_actions[target_source_neighbors.squeeze(1)])
self.target_source_trajectory_distance = copy.deepcopy(np.linalg.norm(target_source_trajectory_diffs,axis=(1,2)).mean())
# Reset variables to prevent memory leaks.
# source_neighbors_object = None
# target_neighbors_object = None
del self.source_neighbors_object
del self.target_neighbors_object
def evaluate(self, model=None):
# Evaluating Transfer - we just want embeddings of both source and target; so run evaluate of both source and target policy managers.
# Instead of parsing and passing model to individual source and target policy managers, just load using the transfer policy manager, and then run eval.
if model is not None:
self.load_all_models(model)
# Run source policy manager evaluate.
self.source_manager.evaluate(suffix="Source")
# Run target policy manager evaluate.
self.target_manager.evaluate(suffix="Target")
# Evaluate metrics.
self.evaluate_correspondence_metrics()
def automatic_evaluation(self, e):
pass
# Writing a cycle consistency transfer PM class.
class PolicyManager_CycleConsistencyTransfer(PolicyManager_Transfer):
# Inherit from transfer.
def __init__(self, args=None, source_dataset=None, target_dataset=None):
super(PolicyManager_CycleConsistencyTransfer, self).__init__(args, source_dataset, target_dataset)
self.neighbor_obj_set = False
# Don't actually need to define these functions since they perform same steps as super functions.
# def create_networks(self):
# super().create_networks()
# # Must also create two discriminator networks; one for source --> target --> source, one for target --> source --> target.
# # Remember, since these discriminator networks are operating on the trajectory space, we have to
# # make them LSTM networks, rather than MLPs.
# # # We have the encoder network class that's perfect for this. Output size is 2.
# # self.source_discriminator = EncoderNetwork(self.source_manager.input_size, self.hidden_size, self.output_size).to(device)
# # self.target_discriminator = EncoderNetwork(self.source_manager.input_size, self.hidden_size, self.output_size).to(device)
def create_training_ops(self):
# Call super training ops.
super().create_training_ops()
# # Now create discriminator optimizers.
# self.source_discriminator_optimizer = torch.optim.Adam(self.source_discriminator_network.parameters(),lr=self.learning_rate)
# self.target_discriminator_optimizer = torch.optim.Adam(self.target_discriminator_network.parameters(),lr=self.learning_rate)
# Instead of using the individuals policy manager optimizers, use one single optimizer.
self.parameter_list = self.source_manager.parameter_list + self.target_manager.parameter_list
self.optimizer = torch.optim.Adam(self.parameter_list, lr=self.learning_rate)
# def save_all_models(self, suffix):
# # Call super save model.
# super().save_all_models(suffix)
# # Now save the individual source / target discriminators.
# self.save_object['Source_Discriminator_Network'] = self.source_discriminator_network.state_dict()
# self.save_object['Target_Discriminator_Network'] = self.target_discriminator_network.state_dict()
# # Overwrite the save from super.
# torch.save(self.save_object,os.path.join(self.savedir,"Model_"+suffix))
# def load_all_models(self, path):
# # Call super load.
# super().load_all_models(path)
# # Now load the individual source and target discriminators.
# self.source_discriminator.load_state_dict(self.load_object['Source_Discriminator_Network'])
# self.target_discriminator.load_state_dict(self.load_object['Target_Discriminator_Network'])
# A bunch of functions should just be directly usable:
# get_domain_manager, get_trajectory_segment_tuple, encode_decode_trajectory, update_plots, get_transform,
# transform_zs, get_embeddings, plot_embeddings, get_trajectory_visuals, evaluate_correspondence_metrics,
# evaluate, automatic_evaluation
def get_start_state(self, domain, source_latent_z):
# Function to retrieve the start state for differentiable decoding from target domain.
# How we do this is first to retrieve the target domain latent z closest to the source_latent_z.
# We then select the trajectory corresponding to this target_domain latent_z.
# We then copy the start state of this trajectory.
if not(self.neighbor_obj_set):
self.set_neighbor_objects()
# First get neighbor object and trajectory sets.
neighbor_object_list = [self.source_neighbors_object, self.target_neighbors_object]
trajectory_set_list = [self.source_manager.trajectory_set, self.target_manager.trajectory_set]
# Remember, we need _target_ domain. So use 1-domain instead of domain.
neighbor_object = neighbor_object_list[1-domain]
trajectory_set = trajectory_set_list[1-domain]
# Next get closest target z.
_ , target_latent_z_index = neighbor_object.kneighbors(source_latent_z.squeeze(0).detach().cpu().numpy())
# Don't actually need the target_latent_z, unless we're doing differentiable nearest neighbor transfer.
# Now get the corresponding trajectory.
trajectory = trajectory_set[target_latent_z_index]
# Finally, pick up first state.
start_state = trajectory[0]
return start_state
def differentiable_rollout(self, policy_manager, trajectory_start, latent_z, rollout_length=None):
# Now implementing a differentiable_rollout function that takes in a policy manager.
# Copying over from rollout_robot_trajectory. This function should provide rollout template, but may need modifications for differentiability.
# Remember, the differentiable rollout is required because the backtranslation / cycle-consistency loss needs to be propagated through multiple sets of translations.
# Therefore it must pass through the decoder network(s), and through the latent_z's. (It doesn't actually pass through the states / actions?).
subpolicy_inputs = torch.zeros((1,2*policy_manager.state_dim+policy_manager.latent_z_dimensionality)).to(device).float()
subpolicy_inputs[0,:policy_manager.state_dim] = torch.tensor(trajectory_start).to(device).float()
subpolicy_inputs[:,2*policy_manager.state_dim:] = torch.tensor(latent_z).to(device).float()
if rollout_length is not None:
length = rollout_length-1
else:
length = policy_manager.rollout_timesteps-1
for t in range(length):
# Get actions from the policy.
actions = policy_manager.policy_network.reparameterized_get_actions(subpolicy_inputs, greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
# Downscale the actions by action_scale_factor.
action_to_execute = action_to_execute/self.args.action_scale_factor
# Compute next state.
new_state = subpolicy_inputs[t,:policy_manager.state_dim]+action_to_execute
# New input row.
input_row = torch.zeros((1,2*policy_manager.state_dim+policy_manager.latent_z_dimensionality)).to(device).float()
input_row[0,:policy_manager.state_dim] = new_state
# Feed in the ORIGINAL prediction from the network as input. Not the downscaled thing.
input_row[0,policy_manager.state_dim:2*policy_manager.state_dim] = actions[-1].squeeze(1)
input_row[0,2*policy_manager.state_dim:] = latent_z
# Now that we have assembled the new input row, concatenate it along temporal dimension with previous inputs.
subpolicy_inputs = torch.cat([subpolicy_inputs,input_row],dim=0)
trajectory = subpolicy_inputs[:,:policy_manager.state_dim].detach().cpu().numpy()
differentiable_trajectory = subpolicy_inputs[:,:policy_manager.state_dim]
differentiable_action_seq = subpolicy_inputs[:,policy_manager.state_dim:2*policy_manager.state_dim]
differentiable_state_action_seq = subpolicy_inputs[:,:2*policy_manager.state_dim]
# For differentiabiity, return tuple of trajectory, actions, state actions, and subpolicy_inputs.
return [differentiable_trajectory, differentiable_action_seq, differentiable_state_action_seq, subpolicy_inputs]
def get_source_target_domain_managers(self):
domain = np.random.binomial(1,0.5)
# Also Get domain policy manager.
source_policy_manager = self.get_domain_manager(domain)
target_policy_manager = self.get_domain_manager(1-domain)
return domain, source_policy_manager, target_policy_manager
def cross_domain_decoding(self, domain, domain_manager, latent_z, start_state=None):
# If start state is none, first get start state, else use the argument.
if start_state is None:
start_state = self.get_start_state(domain, latent_z)
# Now rollout in target domain.
differentiable_trajectory, differentiable_action_seq, differentiable_state_action_seq, subpolicy_inputs = self.differentiable_rollout(domain_manager, start_state, latent_z)
return differentiable_trajectory, subpolicy_inputs
def update_networks(self, dictionary, source_policy_manager):
# Here are the objectives we have to be considering.
# 1) Reconstruction of inputs under single domain encoding / decoding.
# In this implementation, we just have to use the source_loglikelihood for this.
# 2) Discriminability of Z space. This is taken care of from the compute_discriminator_losses function.
# 3) Cycle-consistency. This may be implemented as regression (L2), loglikelihood of cycle-reconstructed traj, or discriminability of trajectories.
# In this implementation, we just have to use the cross domain decoded loglikelihood.
####################################
# First update encoder decoder networks. Don't train discriminator.
####################################
# Zero gradients.
self.optimizer.zero_grad()
####################################
# (1) Compute single-domain reconstruction loss.
####################################
# Compute VAE loss on the current domain as negative log likelihood likelihood plus weighted KL.
self.source_likelihood_loss = -dictionary['source_loglikelihood'].mean()
self.source_encoder_KL = dictionary['source_kl_divergence'].mean()
self.source_reconstruction_loss = self.source_likelihood_loss + self.args.kl_weight*self.source_encoder_KL
####################################
# (2) Compute discriminability losses.
####################################
# This block first computes discriminability losses:
# # a) First, feeds the latent_z into the z_discriminator, that is being trained to discriminate between z's of source and target domains.
# # Gets and returns the loglikelihood of the discriminator predicting the true domain.
# # Also returns discriminability loss, that is used to train the _encoders_ of both domains.
# #
# # b) ####### DON'T NEED TO DO THIS YET: ####### Also feeds either the cycle reconstructed trajectory, or the original trajectory from the source domain, into a separate discriminator.
# # This second discriminator is specific to the domain we are operating in. This discriminator is discriminating between the reconstructed and original trajectories.
# # Basically standard GAN adversarial training, except the generative model here is the entire cycle-consistency translation model.
#
# In addition to this, must also compute discriminator losses to train discriminators themselves.
# # a) For the z discriminator (and if we're using trajectory discriminators, those too), clone and detach the inputs of the discriminator and compute a discriminator loss with the right domain used in targets / supervision.
# # This discriminator loss is what is used to actually train the discriminators.
# Get z discriminator logprobabilities.
z_discriminator_logprob, z_discriminator_prob = self.discriminator_network(dictionary['source_latent_z'])
# Compute discriminability loss. Remember, this is not used for training the discriminator, but rather the encoders.
self.z_discriminability_loss = self.negative_log_likelihood_loss_function(z_discriminator_logprob.squeeze(1), torch.tensor(1-domain).to(device).long().view(1,))
###### Block that computes discriminability losses assuming we are using trjaectory discriminators. ######
# # Get the right trajectory discriminator network.
# discriminator_list = [self.source_discriminator, self.target_discriminator]
# source_discriminator = discriminator_list[domain]
# # Now feed trajectory to the trajectory discriminator, based on whether it is the source of target discriminator.
# traj_discriminator_logprob, traj_discriminator_prob = source_discriminator(trajectory)
# # Compute trajectory discriminability loss, based on whether the trajectory was original or reconstructed.
# self.traj_discriminability_loss = self.negative_log_likelihood_loss_function(traj_discriminator_logprob.squeeze(1), torch.tensor(1-original_or_reconstructed).to(device).long().view(1,))
####################################
# (3) Compute cycle-consistency losses.
####################################
# Must compute likelihoods of original actions under the cycle reconstructed trajectory states.
# I.e. evaluate likelihood of original actions under source_decoder (i.e. source subpolicy), with the subpolicy inputs constructed from cycle-reconstruction.
# Get the original action sequence.
original_action_sequence = dictionary['source_subpolicy_inputs_original'][:,self.state_dim:2*self.state_dim]
# Now evaluate likelihood of actions under the source decoder.
cycle_reconstructed_loglikelihood, _ = source_policy_manager.forward(dictionary['source_subpolicy_inputs_crossdomain'], original_action_sequence)
# Reweight the cycle reconstructed likelihood to construct the loss.
self.cycle_reconstruction_loss = -self.args.cycle_reconstruction_loss_weight*cycle_reconstruction_loss.mean()
####################################
# Now that individual losses are computed, compute total loss, compute gradients, and then step.
####################################
# First combine losses.
self.total_VAE_loss = self.source_reconstruction_loss + self.z_discriminability_loss + self.cycle_reconstruction_loss
# If we are in a encoder / decoder training phase, compute gradients and step.
if not(self.skip_vae):
self.total_VAE_loss.backward()
self.optimizer.step()
####################################
# Now compute discriminator losses and update discriminator network(s).
####################################
# First zero out the discriminator gradients.
self.discriminator_optimizer.zero_grad()
# Detach the latent z that is fed to the discriminator, and then compute discriminator loss.
# If we tried to zero grad the discriminator and then use NLL loss on it again, Pytorch would cry about going backward through a part of the graph that we already \
# went backward through. Instead, just pass things through the discriminator again, but this time detaching latent_z.
z_discriminator_detach_logprob, z_discriminator_detach_prob = self.discriminator_network(dictionary['source_latent_z'].detach())
# Compute discriminator loss for discriminator.
self.z_discriminator_loss = self.negative_log_likelihood_loss_function(z_discriminator_detach_logprob.squeeze(1), torch.tensor(domain).to(device).long().view(1,))
if not(self.skip_discriminator):
# Now go backward and take a step.
self.z_discriminator_loss.backward()
self.discriminator_optimizer.step()
def run_iteration(self, counter, i):
# Phases:
# Phase 1: Train encoder-decoder for both domains initially, so that discriminator is not fed garbage.
# Phase 2: Train encoder, decoder for each domain, and Z discriminator concurrently.
# Phase 3: Train encoder, decoder for each domain, and the individual source and target discriminators, concurrently.
# Algorithm (joint training):
# For every epoch:
# # For every datapoint:
# # 1) Select which domain to use as source (i.e. with 50% chance, select either domain).
# # 2) Get trajectory segments from desired domain.
# # 3) Transfer Steps:
# # a) Encode trajectory as latent z (domain 1).
# # b) Use domain 2 decoder to decode latent z into trajectory (domain 2).
# # c) Use domain 2 encoder to encode trajectory into latent z (domain 2).
# # d) Use domain 1 decoder to decode latent z (domain 2) into trajectory (domain 1).
# # 4) Feed cycle-reconstructed trajectory and original trajectory (both domain 1) into discriminator.
# # 5) Train discriminators to predict whether original or cycle reconstructed trajectory.
# # Alternate: Remember, don't actually need to use trajectory level discriminator networks, can just use loglikelihood cycle-reconstruction loss. Try this first.
# # Train z discriminator to predict which domain the latentz sample came from.
# # Train encoder / decoder architectures with mix of reconstruction loss and discriminator confusing objective.
# # Compute and apply gradient updates.
# Remember to make domain agnostic function calls to encode, feed into discriminator, get likelihoods, etc.
####################################
# (0) Setup things like training phases, epislon values, etc.
####################################
self.set_iteration(counter)
dictionary = {}
target_dict = {}
####################################
# (1) Select which domain to use as source domain (also supervision of z discriminator for this iteration).
####################################
domain, source_policy_manager, target_policy_manager = self.get_source_target_domain_managers()
####################################
# (2) & (3 a) Get source trajectory (segment) and encode into latent z. Decode using source decoder, to get loglikelihood for reconstruction objectve.
####################################
dictionary['source_subpolicy_inputs_original'], dictionary['source_latent_z'], dictionary['source_loglikelihood'], dictionary['source_kl_divergence'] = self.encode_decode_trajectory(source_policy_manager, i)
####################################
# (3 b) Cross domain decoding.
####################################
target_dict['target_trajectory_rollout'], target_dict['target_subpolicy_inputs'] = self.cross_domain_decoding(domain, target_policy_manager, dictionary['source_latent_z'])
####################################
# (3 c) Cross domain encoding of target_trajectory_rollout into target latent_z.
####################################
dictionary['target_subpolicy_inputs'], dictionary['target_latent_z'], dictionary['target_loglikelihood'], dictionary['target_kl_divergence'] = self.encode_decode_trajectory(target_policy_manager, i, trajectory_input=target_dict)
####################################
# (3 d) Cross domain decoding of target_latent_z into source trajectory.
# Can use the original start state, or also use the reverse trick for start state. Try both maybe.
####################################
source_trajectory_rollout, dictionary['source_subpolicy_inputs_crossdomain'] = self.cross_domain_decoding(domain, source_policy_manager, dictionary['target_latent_z'], start_state=dictionary['source_subpolicy_inputs'][0,:self.state_dim].detach().cpu().numpy())
####################################
# (4) Feed source and target latent z's to z_discriminator.
####################################
self.compute_discriminator_losses(domain, dictionary['source_latent_z'])
####################################
# (5) Compute all losses, reweight, and take gradient steps.
####################################
self.update_networks(dictionary, source_policy_manager)
# viz_dict = {'domain': domain, 'discriminator_probs': discriminator_prob.squeeze(0).squeeze(0)[domain].detach().cpu().numpy()}
# self.update_plots(counter, viz_dict)
# Encode decode function: First encodes, takes trajectory segment, and outputs latent z. The latent z is then provided to decoder (along with initial state), and then we get SOURCE domain subpolicy inputs.
# Cross domain decoding function: Takes encoded latent z (and start state), and then rolls out with target decoder. Function returns, target trajectory, action sequence, and TARGET domain subpolicy inputs.
| CausalSkillLearning-main | Experiments/PolicyManagers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class Transition():
def __init__(self, state, action, next_state, onestep_reward, terminal, success):
# Now that we're doing 1step TD, and AC architectures rather than MC,
# Don't need an explicit value of return.
self.state = state
self.action = action
self.next_state = next_state
self.onestep_reward = onestep_reward
self.terminal = terminal
self.success = success
class Episode_TransitionList():
def __init__(self, transition_list):
self.episode = transition_list
def length(self):
return len(self.episode)
# Alternate way of implementing an episode...
# Make it a class that has state_list, action_list, etc. over the episode..
class Episode():
def __init__(self, state_list=None, action_list=None, reward_list=None, terminal_list=None):
self.state_list = state_list
self.action_list = action_list
self.reward_list = reward_list
self.terminal_list = terminal_list
self.episode_lenth = len(self.state_list)
def length(self):
return self.episode_lenth
class HierarchicalEpisode(Episode):
def __init__(self, state_list=None, action_list=None, reward_list=None, terminal_list=None, latent_z_list=None, latent_b_list=None):
super(HierarchicalEpisode, self).__init__(state_list, action_list, reward_list, terminal_list)
self.latent_z_list = latent_z_list
self.latent_b_list = latent_b_list
class ReplayMemory():
def __init__(self, memory_size=10000):
# Implementing the memory as a list of EPISODES.
# This acts as a queue.
self.memory = []
# Accessing the memory with indices should be constant time, so it's okay to use a list.
# Not using a priority either.
self.memory_len = 0
self.memory_size = memory_size
print("Setup Memory.")
def append_to_memory(self, episode):
if self.check_full():
# Remove first episode in the memory (queue).
self.memory.pop(0)
# Now push the episode to the end of hte queue.
self.memory.append(episode)
else:
self.memory.append(episode)
self.memory_len+=1
def sample_batch(self, batch_size=25):
self.memory_len = len(self.memory)
indices = np.random.randint(0,high=self.memory_len,size=(batch_size))
return indices
def retrieve_batch(self, batch_size=25):
# self.memory_len = len(self.memory)
return np.arange(0,batch_size)
def check_full(self):
self.memory_len = len(self.memory)
if self.memory_len<self.memory_size:
return 0
else:
return 1
# Refer: https://towardsdatascience.com/deep-deterministic-policy-gradients-explained-2d94655a9b7b
"""
Taken from https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py
"""
class OUNoise(object):
def __init__(self, action_space_size, mu=0.0, theta=0.15, max_sigma=0.2, min_sigma=0.2, decay_period=100000):
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
self.action_dim = action_space_size
self.low = -np.ones((self.action_dim))
self.high = np.ones((self.action_dim))
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
self.state = x + dx
return self.state
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)
return np.clip(action + ou_state, self.low, self.high) | CausalSkillLearning-main | Experiments/RLUtils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import tensorflow as tf
import numpy as np
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
import tempfile
import moviepy.editor as mpy
import os
import os.path as osp
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import summary_op_util
def py_encode_gif(im_thwc, tag, fps=4):
"""
Given a 4D numpy tensor of images, encodes as a gif.
"""
with tempfile.NamedTemporaryFile() as f: fname = f.name + '.gif'
clip = mpy.ImageSequenceClip(list(im_thwc), fps=fps)
clip.write_gif(fname, verbose=False, logger=None)
with open(fname, 'rb') as f: enc_gif = f.read()
os.remove(fname)
# create a tensorflow image summary protobuf:
thwc = im_thwc.shape
im_summ = tf.Summary.Image()
im_summ.height = thwc[1]
im_summ.width = thwc[2]
im_summ.colorspace = 3 # fix to 3 == RGB
im_summ.encoded_image_string = enc_gif
return im_summ
# create a summary obj:
#summ = tf.Summary()
#summ.value.add(tag=tag, image=im_summ)
#summ_str = summ.SerializeToString()
#return summ_str
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
# Switching to TF 2.2 implementation.
self.writer = tf.summary.create_file_writer(log_dir)
# self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def gif_summary(self, tag, images, step):
"""Log a list of TXHXWX3 images."""
# from https://github.com/tensorflow/tensorboard/issues/39
img_summaries = []
for i, img in enumerate(images):
# Create a Summary value
img_sum = py_encode_gif(img, '%s/%d' % (tag, i), fps=4)
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush() | CausalSkillLearning-main | Experiments/TFLogger.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class TestLoaderWithKwargs(unittest.TestLoader):
"""A test loader which allows to parse keyword arguments to the
test case class."""
# def loadTestsFromTestCase(self, testCaseClass, **kwargs):
def loadTestsFromTestCase(self, testCaseClass, policy_manager):
"""Return a suite of all tests cases contained in
testCaseClass."""
if issubclass(testCaseClass, unittest.suite.TestSuite):
raise TypeError("Test cases should not be derived from "\
"TestSuite. Maybe you meant to derive from"\
" TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
# Modification here: parse keyword arguments to testCaseClass.
test_cases = []
# embed()
for test_case_name in testCaseNames:
# test_cases.append(testCaseClass(policy_manager))
test_cases.append(testCaseClass(test_case_name, policy_manager))
loaded_suite = self.suiteClass(test_cases)
return loaded_suite
class MetaTestClass(unittest.TestCase):
def __init__(self, test_name, policy_manager):
super(MetaTestClass, self).__init__(test_name)
self.policy_manager = policy_manager
self.args = self.policy_manager.args
self.dataset = self.policy_manager.dataset
def test_dataloader(self):
if self.args.data=='Roboturk':
self.check_Roboturkdataloader()
if self.args.data=='MIME':
self.check_MIMEdataloader()
def check_MIMEdataloader(self):
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']==1
check_demo_data = (data_element['demo']==np.load("Test_Data/MIME_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
def check_Roboturkdataloader(self):
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']
check_demo_data = (data_element['demo']==np.load("Test_Data/Roboturk_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
def test_variational_policy(self):
if self.args.setting=='learntsub':
# Assume the variational policy is an instance of ContinuousVariationalPolicyNetwork_BPrior class.
inputs = torch.ones((40,self.policy_manager.variational_policy.input_size)).cuda().float()
expected_outputs = np.load("Test_Data/{0}_Varpolicy_Res.npy".format(self.args.data),allow_pickle=True)
pred_outputs = self.policy_manager.variational_policy.forward(inputs, epsilon=0.)
error = (((expected_outputs[0]-pred_outputs[0])**2).mean()).detach().cpu().numpy()
threshold = 0.01
self.assertTrue(error < threshold)
else:
pass
def test_subpolicy(self):
# Assume the subpolicy is an instance of ContinuousPolicyNetwork class.
inputs = torch.ones((15,self.policy_manager.policy_network.input_size)).cuda().float()
actions = np.ones((15,self.policy_manager.policy_network.output_size))
expected_outputs = np.load("Test_Data/{0}_Subpolicy_Res.npy".format(self.args.data),allow_pickle=True)
pred_outputs = self.policy_manager.policy_network.forward(inputs, actions)
error = (((expected_outputs[0]-pred_outputs[0])**2).mean()).detach().cpu().numpy()
threshold = 0.01
self.assertTrue(error < threshold)
def test_latent_policy(self):
# Assume the latent policy is a ContinuousLatentPolicyNetwork class.
pass
def test_encoder_policy(self):
# Assume is instance of ContinuousEncoderNetwork class.
pass | CausalSkillLearning-main | Experiments/TestClass.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from headers import *
import os.path as osp
def select_baxter_angles(trajectory, joint_names, arm='right'):
# joint names in order as used via mujoco visualizer
baxter_joint_names = ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2', 'left_s0', 'left_s1', 'left_e0', 'left_e1', 'left_w0', 'left_w1', 'left_w2']
if arm == 'right':
select_joints = baxter_joint_names[:7]
elif arm == 'left':
select_joints = baxter_joint_names[7:]
elif arm == 'both':
select_joints = baxter_joint_names
inds = [joint_names.index(j) for j in select_joints]
return trajectory[:, inds]
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, split='all'):
self.dataset_directory = '/checkpoint/tanmayshankar/MIME/'
self.ds_freq = 20
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
self.filelist = glob.glob(self.fulltext)
with open(self.filelist[0], 'r') as file:
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
if split == 'all':
self.filelist = self.filelist
else:
self.task_lists = np.load(os.path.join(
self.dataset_directory, 'MIME_jointangles/{}_Lists.npy'.format(split.capitalize())))
self.filelist = []
for i in range(20):
self.filelist.extend(self.task_lists[i])
self.filelist = [f.replace('/checkpoint/tanmayshankar/MIME/', self.dataset_directory) for f in self.filelist]
# print(len(self.filelist))
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)/100
elem['right_gripper'] = resample(right_gripper, n_samples)/100
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['ra_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='right')
elem['la_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='left')
# If max norm of differences is <1.0, valid.
# if elem['joint_angle_trajectory'].shape[0]>1:
elem['is_valid'] = int(np.linalg.norm(np.diff(elem['joint_angle_trajectory'],axis=0),axis=1).max() < 1.0)
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
class MIME_NewDataset(Dataset):
def __init__(self, split='all'):
self.dataset_directory = '/checkpoint/tanmayshankar/MIME/'
# Load the entire set of trajectories.
self.data_list = np.load(os.path.join(self.dataset_directory, "Data_List.npy"),allow_pickle=True)
self.dataset_length = len(self.data_list)
def __len__(self):
# Return length of file list.
return self.dataset_length
def __getitem__(self, index):
# Return n'th item of dataset.
# This has already processed everything.
return self.data_list[index]
def compute_statistics(self):
self.state_size = 16
self.total_length = self.__len__()
mean = np.zeros((self.state_size))
variance = np.zeros((self.state_size))
mins = np.zeros((self.total_length, self.state_size))
maxs = np.zeros((self.total_length, self.state_size))
lens = np.zeros((self.total_length))
# And velocity statistics.
vel_mean = np.zeros((self.state_size))
vel_variance = np.zeros((self.state_size))
vel_mins = np.zeros((self.total_length, self.state_size))
vel_maxs = np.zeros((self.total_length, self.state_size))
for i in range(self.total_length):
print("Phase 1: DP: ",i)
data_element = self.__getitem__(i)
if data_element['is_valid']:
demo = data_element['demo']
vel = np.diff(demo,axis=0)
mins[i] = demo.min(axis=0)
maxs[i] = demo.max(axis=0)
mean += demo.sum(axis=0)
lens[i] = demo.shape[0]
vel_mins[i] = abs(vel).min(axis=0)
vel_maxs[i] = abs(vel).max(axis=0)
vel_mean += vel.sum(axis=0)
mean /= lens.sum()
vel_mean /= lens.sum()
for i in range(self.total_length):
print("Phase 2: DP: ",i)
data_element = self.__getitem__(i)
# Just need to normalize the demonstration. Not the rest.
if data_element['is_valid']:
demo = data_element['demo']
vel = np.diff(demo,axis=0)
variance += ((demo-mean)**2).sum(axis=0)
vel_variance += ((vel-vel_mean)**2).sum(axis=0)
variance /= lens.sum()
variance = np.sqrt(variance)
vel_variance /= lens.sum()
vel_variance = np.sqrt(vel_variance)
max_value = maxs.max(axis=0)
min_value = mins.min(axis=0)
vel_max_value = vel_maxs.max(axis=0)
vel_min_value = vel_mins.min(axis=0)
np.save("MIME_Orig_Mean.npy", mean)
np.save("MIME_Orig_Var.npy", variance)
np.save("MIME_Orig_Min.npy", min_value)
np.save("MIME_Orig_Max.npy", max_value)
np.save("MIME_Orig_Vel_Mean.npy", vel_mean)
np.save("MIME_Orig_Vel_Var.npy", vel_variance)
np.save("MIME_Orig_Vel_Min.npy", vel_min_value)
np.save("MIME_Orig_Vel_Max.npy", vel_max_value)
class MIME_Dataloader_Tester(unittest.TestCase):
def test_MIMEdataloader(self):
self.dataset = MIME_NewDataset()
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']==1
check_demo_data = (data_element['demo']==np.load("Test_Data/MIME_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
if __name__ == '__main__':
# Run all tests defined for the dataloader.
unittest.main() | CausalSkillLearning-main | Experiments/MIME_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags, app
import copy, os, imageio, scipy.misc, pdb, math, time, numpy as np
import robosuite, threading
from robosuite.wrappers import IKWrapper
import matplotlib.pyplot as plt
from IPython import embed
# # Mocap viz.
# import MocapVisualizationUtils
# from mocap_processing.motion.pfnn import Animation, BVH
class SawyerVisualizer():
def __init__(self, has_display=False):
# Create environment.
print("Do I have a display?", has_display)
# self.base_env = robosuite.make('BaxterLift', has_renderer=has_display)
self.base_env = robosuite.make("SawyerViz",has_renderer=has_display)
# Create kinematics object.
self.sawyer_IK_object = IKWrapper(self.base_env)
self.environment = self.sawyer_IK_object.env
def update_state(self):
# Updates all joint states
self.full_state = self.environment._get_observation()
def set_joint_pose_return_image(self, joint_angles, arm='both', gripper=False):
# In the roboturk dataset, we've the following joint angles:
# ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint')
# Set usual joint angles through set joint positions API.
self.environment.reset()
self.environment.set_robot_joint_positions(joint_angles[:7])
# For gripper, use "step".
# Mujoco requires actions that are -1 for Open and 1 for Close.
# [l,r]
# gripper_open = [0.0115, -0.0115]
# gripper_closed = [-0.020833, 0.020833]
# In mujoco, -1 is open, and 1 is closed.
actions = np.zeros((8))
actions[-1] = joint_angles[-1]
# Move gripper positions.
self.environment.step(actions)
image = np.flipud(self.environment.sim.render(600, 600, camera_name='vizview1'))
return image
def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
image_list = []
for t in range(trajectory.shape[0]):
new_image = self.set_joint_pose_return_image(trajectory[t])
image_list.append(new_image)
# Insert white
if segmentations is not None:
if t>0 and segmentations[t]==1:
image_list.append(255*np.ones_like(new_image)+new_image)
if return_and_save:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
return image_list
elif return_gif:
return image_list
else:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
class BaxterVisualizer():
def __init__(self, has_display=False):
# Create environment.
print("Do I have a display?", has_display)
# self.base_env = robosuite.make('BaxterLift', has_renderer=has_display)
self.base_env = robosuite.make("BaxterViz",has_renderer=has_display)
# Create kinematics object.
self.baxter_IK_object = IKWrapper(self.base_env)
self.environment = self.baxter_IK_object.env
def update_state(self):
# Updates all joint states
self.full_state = self.environment._get_observation()
def set_ee_pose_return_image(self, ee_pose, arm='right', seed=None):
# Assumes EE pose is Position in the first three elements, and quaternion in last 4 elements.
self.update_state()
if seed is None:
# Set seed to current state.
seed = self.full_state['joint_pos']
if arm == 'right':
joint_positions = self.baxter_IK_object.controller.inverse_kinematics(
target_position_right=ee_pose[:3],
target_orientation_right=ee_pose[3:],
target_position_left=self.full_state['left_eef_pos'],
target_orientation_left=self.full_state['left_eef_quat'],
rest_poses=seed
)
elif arm == 'left':
joint_positions = self.baxter_IK_object.controller.inverse_kinematics(
target_position_right=self.full_state['right_eef_pos'],
target_orientation_right=self.full_state['right_eef_quat'],
target_position_left=ee_pose[:3],
target_orientation_left=ee_pose[3:],
rest_poses=seed
)
elif arm == 'both':
joint_positions = self.baxter_IK_object.controller.inverse_kinematics(
target_position_right=ee_pose[:3],
target_orientation_right=ee_pose[3:7],
target_position_left=ee_pose[7:10],
target_orientation_left=ee_pose[10:],
rest_poses=seed
)
image = self.set_joint_pose_return_image(joint_positions, arm=arm, gripper=False)
return image
def set_joint_pose_return_image(self, joint_pose, arm='both', gripper=False):
# FOR FULL 16 DOF STATE: ASSUMES JOINT_POSE IS <LEFT_JA, RIGHT_JA, LEFT_GRIPPER, RIGHT_GRIPPER>.
self.update_state()
self.state = copy.deepcopy(self.full_state['joint_pos'])
# THE FIRST 7 JOINT ANGLES IN MUJOCO ARE THE RIGHT HAND.
# THE LAST 7 JOINT ANGLES IN MUJOCO ARE THE LEFT HAND.
if arm=='right':
# Assume joint_pose is 8 DoF - 7 for the arm, and 1 for the gripper.
self.state[:7] = copy.deepcopy(joint_pose[:7])
elif arm=='left':
# Assume joint_pose is 8 DoF - 7 for the arm, and 1 for the gripper.
self.state[7:] = copy.deepcopy(joint_pose[:7])
elif arm=='both':
# The Plans were generated as: Left arm, Right arm, left gripper, right gripper.
# Assume joint_pose is 16 DoF. 7 DoF for left arm, 7 DoF for right arm. (These need to be flipped)., 1 for left gripper. 1 for right gripper.
# First right hand.
self.state[:7] = joint_pose[7:14]
# Now left hand.
self.state[7:] = joint_pose[:7]
# Set the joint angles magically.
self.environment.set_robot_joint_positions(self.state)
action = np.zeros((16))
if gripper:
# Left gripper is 15. Right gripper is 14.
# MIME Gripper values are from 0 to 100 (Close to Open), but we treat the inputs to this function as 0 to 1 (Close to Open), and then rescale to (-1 Open to 1 Close) for Mujoco.
if arm=='right':
action[14] = -joint_pose[-1]*2+1
elif arm=='left':
action[15] = -joint_pose[-1]*2+1
elif arm=='both':
action[14] = -joint_pose[15]*2+1
action[15] = -joint_pose[14]*2+1
# Move gripper positions.
self.environment.step(action)
image = np.flipud(self.environment.sim.render(600, 600, camera_name='vizview1'))
return image
def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
image_list = []
for t in range(trajectory.shape[0]):
new_image = self.set_joint_pose_return_image(trajectory[t])
image_list.append(new_image)
# Insert white
if segmentations is not None:
if t>0 and segmentations[t]==1:
image_list.append(255*np.ones_like(new_image)+new_image)
if return_and_save:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
return image_list
elif return_gif:
return image_list
else:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
# class MocapVisualizer():
# def __init__(self, has_display=False, args=None):
# # Load some things from the MocapVisualizationUtils and set things up so that they're ready to go.
# # self.cam_cur = MocapVisualizationUtils.camera.Camera(pos=np.array([6.0, 0.0, 2.0]),
# # origin=np.array([0.0, 0.0, 0.0]),
# # vup=np.array([0.0, 0.0, 1.0]),
# # fov=45.0)
# self.args = args
# # Default is local data.
# self.global_data = False
# self.cam_cur = MocapVisualizationUtils.camera.Camera(pos=np.array([4.5, 0.0, 2.0]),
# origin=np.array([0.0, 0.0, 0.0]),
# vup=np.array([0.0, 0.0, 1.0]),
# fov=45.0)
# # Path to dummy file that is going to populate joint_parents, initial global positions, etc.
# bvh_filename = "/private/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments/01_01_poses.bvh"
# # Run init before loading animation.
# MocapVisualizationUtils.init()
# MocapVisualizationUtils.global_positions, MocapVisualizationUtils.joint_parents, MocapVisualizationUtils.time_per_frame = MocapVisualizationUtils.load_animation(bvh_filename)
# # State sizes.
# self.number_joints = 22
# self.number_dimensions = 3
# self.total_dimensions = self.number_joints*self.number_dimensions
# # Run thread of viewer, so that callbacks start running.
# thread = threading.Thread(target=self.run_thread)
# thread.start()
# # Also create dummy animation object.
# self.animation_object, _, _ = BVH.load(bvh_filename)
# def run_thread(self):
# MocapVisualizationUtils.viewer.run(
# title='BVH viewer',
# cam=self.cam_cur,
# size=(1280, 720),
# keyboard_callback=None,
# render_callback=MocapVisualizationUtils.render_callback_time_independent,
# idle_callback=MocapVisualizationUtils.idle_callback_return,
# )
# def get_global_positions(self, positions, animation_object=None):
# # Function to get global positions corresponding to predicted or actual local positions.
# traj_len = positions.shape[0]
# def resample(original_trajectory, desired_number_timepoints):
# original_traj_len = len(original_trajectory)
# new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
# return original_trajectory[new_timepoints]
# if animation_object is not None:
# # Now copy over from animation_object instead of just dummy animation object.
# new_animation_object = Animation.Animation(resample(animation_object.rotations, traj_len), positions, animation_object.orients, animation_object.offsets, animation_object.parents)
# else:
# # Create a dummy animation object.
# new_animation_object = Animation.Animation(self.animation_object.rotations[:traj_len], positions, self.animation_object.orients, self.animation_object.offsets, self.animation_object.parents)
# # Then transform them.
# transformed_global_positions = Animation.positions_global(new_animation_object)
# # Now return coordinates.
# return transformed_global_positions
# def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
# image_list = []
# if self.global_data:
# # If we predicted in the global setting, just reshape.
# predicted_global_positions = np.reshape(trajectory, (-1,self.number_joints,self.number_dimensions))
# else:
# # If it's local data, then transform to global.
# # Assume trajectory is number of timesteps x number_dimensions.
# # Convert to number_of_timesteps x number_of_joints x 3.
# predicted_local_positions = np.reshape(trajectory, (-1,self.number_joints,self.number_dimensions))
# # Assume trajectory was predicted in local coordinates. Transform to global for visualization.
# predicted_global_positions = self.get_global_positions(predicted_local_positions, animation_object=additional_info)
# # Copy into the global variable.
# MocapVisualizationUtils.global_positions = predicted_global_positions
# # Reset Image List.
# MocapVisualizationUtils.image_list = []
# # Set save_path and prefix.
# MocapVisualizationUtils.save_path = gif_path
# MocapVisualizationUtils.name_prefix = gif_name.rstrip('.gif')
# # Now set the whether_to_render as true.
# MocapVisualizationUtils.whether_to_render = True
# # Wait till rendering is complete.
# x_count = 0
# while MocapVisualizationUtils.done_with_render==False and MocapVisualizationUtils.whether_to_render==True:
# x_count += 1
# time.sleep(1)
# # Now that rendering is complete, load images.
# image_list = MocapVisualizationUtils.image_list
# # Now actually save the GIF or return.
# if return_and_save:
# imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
# return image_list
# elif return_gif:
# return image_list
# else:
# imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
class ToyDataVisualizer():
def __init__(self):
pass
def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
fig = plt.figure()
ax = fig.gca()
ax.scatter(trajectory[:,0],trajectory[:,1],c=range(len(trajectory)),cmap='jet')
plt.xlim(-10,10)
plt.ylim(-10,10)
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
image = np.transpose(image, axes=[2,0,1])
return image
if __name__ == '__main__':
# end_eff_pose = [0.3, -0.3, 0.09798524029948213, 0.38044099037703677, 0.9228975092885654, -0.021717379118030174, 0.05525572942370394]
# end_eff_pose = [0.53303758, -0.59997265, 0.09359371, 0.77337391, 0.34998901, 0.46797516, -0.24576358]
# end_eff_pose = np.array([0.64, -0.83, 0.09798524029948213, 0.38044099037703677, 0.9228975092885654, -0.021717379118030174, 0.05525572942370394])
visualizer = MujocoVisualizer()
# img = visualizer.set_ee_pose_return_image(end_eff_pose, arm='right')
# scipy.misc.imsave('mj_vis.png', img)
| CausalSkillLearning-main | Experiments/Visualizers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from headers import *
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class Roboturk_Dataset(Dataset):
# LINK TO DATASET and INFO: http://roboturk.stanford.edu/dataset.html
# Class implementing instance of Roboturk dataset.
def __init__(self, args):
self.dataset_directory = '/checkpoint/tanmayshankar/Roboturk/RoboTurkPilot'
self.args = args
# Require a task list.
# The task name is needed for setting the environment, rendering.
# We shouldn't need the environment for .. training though, should we?
self.task_list = ["bins-Bread", "bins-Can", "bins-Cereal", "bins-full", "bins-Milk", "pegs-full", "pegs-RoundNut", "pegs-SquareNut"]
self.num_demos = np.array([1069, 1069, 1069, 1069, 1069, 1145, 1144, 1145])
self.cummulative_num_demos = self.num_demos.cumsum()
self.cummulative_num_demos = np.insert(self.cummulative_num_demos,0,0)
# Append -1 to the start of cummulative_num_demos. This has two purposes.
# The first is that when we are at index 0 of the dataset, if we appended 0, np.searchsorted returns 0, rather than 1.
# For index 1, it returns 1. This was becoming inconsistent behavior for demonstrations in the same task.
# Now with -1 added to cumm_num_demos, when we are at task index 0, it would add -1 to the demo index. This is necessary for ALL tasks, not just the first...
# So that foils our really clever idea.
# Well, if the searchsorted returns the index of the equalling element, it probably consistently does this irrespective of vlaue.
# This means we can use this...
# No need for a clever solution, searchsorted has a "side" option that takes care of this.
self.total_length = self.num_demos.sum()
# Seems to follow joint angles order:
# ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint', 'Milk0', 'Bread0', 'Cereal0', 'Can0').
# Extract these into...
self.joint_angle_indices = [1,3,4,5,6,7,8]
self.gripper_indices = [9,10]
self.ds_freq = 20
# self.r_gripper_r_finger_joint = np.array([-0.0116, 0.020833])
# self.r_gripper_l_finger_joint = np.array([-0.020833, 0.0135])
# [l,r]
# gripper_open = [0.0115, -0.0115]
# gripper_closed = [-0.020833, 0.020833]
# Set files.
self.setup()
def setup(self):
# Load data from all tasks.
self.files = []
for i in range(len(self.task_list)):
self.files.append(h5py.File("{0}/{1}/demo.hdf5".format(self.dataset_directory,self.task_list[i]),'r'))
def __len__(self):
return self.total_length
def __getitem__(self, index):
if index>=self.total_length:
print("Out of bounds of dataset.")
return None
# Get bucket that index falls into based on num_demos array.
task_index = np.searchsorted(self.cummulative_num_demos, index, side='right')-1
if index==self.total_length-1:
task_index-=1
# Decide task ID, and new index modulo num_demos.
# Subtract number of demonstrations in cumsum until then, and then
new_index = index-self.cummulative_num_demos[max(task_index,0)]+1
try:
# Get raw state sequence.
state_sequence = self.files[task_index]['data/demo_{0}/states'.format(new_index)].value
except:
# If this failed, return invalid.
data_element = {}
data_element['is_valid'] = False
return data_element
# Performing another check that makes sure data element actually has states.
if state_sequence.shape[0]==0:
data_element = {}
data_element['is_valid'] = False
return data_element
# If we are here, the data element is presumably valid till now.
# Get joint angles from this state sequence.
joint_values = state_sequence[:,self.joint_angle_indices]
# Get gripper values from state sequence.
gripper_finger_values = state_sequence[:,self.gripper_indices]
# Normalize gripper values.
# 1 is right finger. 0 is left finger.
# 1-0 is right-left.
gripper_values = gripper_finger_values[:,1]-gripper_finger_values[:,0]
gripper_values = (gripper_values-gripper_values.min()) / (gripper_values.max()-gripper_values.min())
gripper_values = 2*gripper_values-1
concatenated_demonstration = np.concatenate([joint_values,gripper_values.reshape((-1,1))],axis=1)
downsampled_demonstration = resample(concatenated_demonstration, concatenated_demonstration.shape[0]//self.ds_freq)
# Performing another check that makes sure data element actually has states.
if downsampled_demonstration.shape[0]==0:
data_element = {}
data_element['is_valid'] = False
return data_element
data_element = {}
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(downsampled_demonstration,self.args.smoothing_kernel_bandwidth,axis=0,mode='nearest')
else:
data_element['demo'] = downsampled_demonstration
# Trivially setting is valid to true until we come up wiuth a better strategy.
data_element['is_valid'] = True
return data_element
def close(self):
for file in self.files:
file.close()
def preprocess_dataset(self):
# for task_index in range(len(self.task_list)):
# for task_index in [3,5]:
for task_index in [0,1,2,4,6,7]:
print("#######################################")
print("Preprocessing task index: ", task_index)
print("#######################################")
# Get the name of environment.
environment_name = self.files[task_index]['data'].attrs['env']
# Create an actual robo-suite environment.
self.env = robosuite.make(environment_name)
# Get sizes.
obs = self.env._get_observation()
robot_state_size = obs['robot-state'].shape[0]
object_state_size = obs['object-state'].shape[0]
# Create list of files for this task.
task_demo_list = []
# For every element in the filelist of the element,
for i in range(1,self.num_demos[task_index]+1):
print("Preprocessing task index: ", task_index, " Demo Index: ", i, " of: ", self.num_demos[task_index])
# Create list of datapoints for this demonstrations.
datapoint = {}
# Get SEQUENCE of flattened states.
try:
flattened_state_sequence = self.files[task_index]['data/demo_{0}/states'.format(i)].value
joint_action_sequence = self.files[task_index]['data/demo_{0}/joint_velocities'.format(i)].value
gripper_action_sequence = self.files[task_index]['data/demo_{0}/gripper_actuations'.format(i)].value
flattened_state_sequence = resample(flattened_state_sequence, flattened_state_sequence.shape[0]//self.ds_freq)
number_timesteps = flattened_state_sequence.shape[0]
robot_state_array = np.zeros((number_timesteps, robot_state_size))
object_state_array = np.zeros((number_timesteps, object_state_size))
# Get joint angle values from
joint_values = flattened_state_sequence[:,self.joint_angle_indices]
# Get gripper values from state sequence.
gripper_finger_values = flattened_state_sequence[:,self.gripper_indices]
# Normalize gripper values.
# 1 is right finger. 0 is left finger.
# 1-0 is right-left.
gripper_values = gripper_finger_values[:,1]-gripper_finger_values[:,0]
gripper_values = (gripper_values-gripper_values.min()) / (gripper_values.max()-gripper_values.min())
gripper_values = 2*gripper_values-1
concatenated_demonstration = np.concatenate([joint_values,gripper_values.reshape((-1,1))],axis=1)
concatenated_actions = np.concatenate([joint_action_sequence,gripper_action_sequence.reshape((-1,1))],axis=1)
# For every element in sequence, set environment state.
for t in range(flattened_state_sequence.shape[0]):
self.env.sim.set_state_from_flattened(flattened_state_sequence[t])
# Now get observation.
observation = self.env._get_observation()
# Robot and Object state appended to datapoint dictionary.
robot_state_array[t] = observation['robot-state']
object_state_array[t] = observation['object-state']
except:
datapoint['robot_state_array'] = np.zeros((1, robot_state_size))
datapoint['object_state_array'] = np.zeros((1, object_state_size))
# Put both lists in a dictionary.
datapoint['flat-state'] = flattened_state_sequence
datapoint['robot-state'] = robot_state_array
datapoint['object-state'] = object_state_array
datapoint['demo'] = concatenated_demonstration
datapoint['demonstrated_actions'] = concatenated_actions
# Add this dictionary to the file_demo_list.
task_demo_list.append(datapoint)
# Create array.
task_demo_array = np.array(task_demo_list)
# Now save this file_demo_list.
np.save(os.path.join(self.dataset_directory,self.task_list[task_index],"New_Task_Demo_Array.npy"),task_demo_array)
class Roboturk_FullDataset(Roboturk_Dataset):
def __init__(self, args):
super(Roboturk_FullDataset, self).__init__(args)
self.environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlace","SawyerPickPlaceMilk","SawyerNutAssembly", "SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
def setup(self):
self.files = []
for i in range(len(self.task_list)):
if i==3 or i==5:
self.files.append(np.load("{0}/{1}/FullDataset_Task_Demo_Array.npy".format(self.dataset_directory, self.task_list[i]), allow_pickle=True))
else:
self.files.append(np.load("{0}/{1}/New_Task_Demo_Array.npy".format(self.dataset_directory, self.task_list[i]), allow_pickle=True))
def __getitem__(self, index):
if index>=self.total_length:
print("Out of bounds of dataset.")
return None
# Get bucket that index falls into based on num_demos array.
task_index = np.searchsorted(self.cummulative_num_demos, index, side='right')-1
# Decide task ID, and new index modulo num_demos.
# Subtract number of demonstrations in cumsum until then, and then
new_index = index-self.cummulative_num_demos[max(task_index,0)]
data_element = self.files[task_index][new_index]
resample_length = len(data_element['demo'])//self.args.ds_freq
# print("Orig:", len(data_element['demo']),"New length:",resample_length)
self.kernel_bandwidth = self.args.smoothing_kernel_bandwidth
if resample_length<=1 or data_element['robot-state'].shape[0]<=1:
data_element['is_valid'] = False
else:
data_element['is_valid'] = True
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(data_element['demo'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['robot-state'] = gaussian_filter1d(data_element['robot-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['object-state'] = gaussian_filter1d(data_element['object-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['flat-state'] = gaussian_filter1d(data_element['flat-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['environment-name'] = self.environment_names[task_index]
if self.args.ds_freq>1:
data_element['demo'] = resample(data_element['demo'], resample_length)
data_element['robot-state'] = resample(data_element['robot-state'], resample_length)
data_element['object-state'] = resample(data_element['object-state'], resample_length)
data_element['flat-state'] = resample(data_element['flat-state'], resample_length)
return data_element
class Roboturk_SegmentedDataset(Roboturk_Dataset):
def __init__(self):
super(Roboturk_SegmentedDataset, self).__init__()
self.dataset_directory = '/checkpoint/tanmayshankar/Roboturk/RoboTurkPilot'
# Require a task list.
# The task name is needed for setting the environment, rendering.
# We shouldn't need the environment for .. training though, should we?
self.task_list = ["bins-Bread", "bins-Can", "bins-Cereal", "bins-Milk", "pegs-RoundNut", "pegs-SquareNut"]
self.num_demos = np.array([1069, 1069, 1069, 1069, 1144, 1145])
self.cummulative_num_demos = self.num_demos.cumsum()
self.cummulative_num_demos = np.insert(self.cummulative_num_demos,0,0)
# Append -1 to the start of cummulative_num_demos. This has two purposes.
# The first is that when we are at index 0 of the dataset, if we appended 0, np.searchsorted returns 0, rather than 1.
# For index 1, it returns 1. This was becoming inconsistent behavior for demonstrations in the same task.
# Now with -1 added to cumm_num_demos, when we are at task index 0, it would add -1 to the demo index. This is necessary for ALL tasks, not just the first...
# So that foils our really clever idea.
# Well, if the searchsorted returns the index of the equalling element, it probably consistently does this irrespective of vlaue.
# This means we can use this...
# No need for a clever solution, searchsorted has a "side" option that takes care of this.
self.total_length = self.num_demos.sum()
# Load data from all tasks.
self.files = []
for i in range(len(self.task_list)):
self.files.append(h5py.File("{0}/{1}/demo.hdf5".format(self.dataset_directory,self.task_list[i]),'r'))
# Seems to follow joint angles order:
# ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint', 'Milk0', 'Bread0', 'Cereal0', 'Can0').
# Extract these into...
self.joint_angle_indices = [1,3,4,5,6,7,8]
self.gripper_indices = [9,10]
self.ds_freq = 20
# self.r_gripper_r_finger_joint = np.array([-0.0116, 0.020833])
# self.r_gripper_l_finger_joint = np.array([-0.020833, 0.0135])
# [l,r]
# gripper_open = [0.0115, -0.0115]
# gripper_closed = [-0.020833, 0.020833]
class Roboturk_NewSegmentedDataset(Dataset):
def __init__(self, args):
super(Roboturk_NewSegmentedDataset, self).__init__()
self.dataset_directory = '/checkpoint/tanmayshankar/Roboturk/RoboTurkPilot'
self.args = args
# Require a task list.
# The task name is needed for setting the environment, rendering.
# We shouldn't need the environment for .. training though, should we?
self.task_list = ["bins-Bread", "bins-Can", "bins-Cereal", "bins-Milk", "pegs-RoundNut", "pegs-SquareNut"]
self.environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
self.num_demos = np.array([1069, 1069, 1069, 1069, 1144, 1145])
self.cummulative_num_demos = self.num_demos.cumsum()
self.cummulative_num_demos = np.insert(self.cummulative_num_demos,0,0)
# Append -1 to the start of cummulative_num_demos. This has two purposes.
# The first is that when we are at index 0 of the dataset, if we appended 0, np.searchsorted returns 0, rather than 1.
# For index 1, it returns 1. This was becoming inconsistent behavior for demonstrations in the same task.
# Now with -1 added to cumm_num_demos, when we are at task index 0, it would add -1 to the demo index. This is necessary for ALL tasks, not just the first...
# So that foils our really clever idea.
# Well, if the searchsorted returns the index of the equalling element, it probably consistently does this irrespective of vlaue.
# This means we can use this...
# No need for a clever solution, searchsorted has a "side" option that takes care of this.
self.total_length = self.num_demos.sum()
# Load data from all tasks.
self.files = []
# for i in range(len(self.task_list)):
for i in range(len(self.task_list)):
self.files.append( np.load("{0}/{1}/New_Task_Demo_Array.npy".format(self.dataset_directory, self.task_list[i]), allow_pickle=True))
# # Seems to follow joint angles order:
# # ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint', 'Milk0', 'Bread0', 'Cereal0', 'Can0').
# # Extract these into...
# self.joint_angle_indices = [1,3,4,5,6,7,8]
# self.gripper_indices = [9,10]
# self.ds_freq = 20
# # self.r_gripper_r_finger_joint = np.array([-0.0116, 0.020833])
# # self.r_gripper_l_finger_joint = np.array([-0.020833, 0.0135])
# # [l,r]
# # gripper_open = [0.0115, -0.0115]
# # gripper_closed = [-0.020833, 0.020833]
def __len__(self):
return self.total_length
def __getitem__(self, index):
if index>=self.total_length:
print("Out of bounds of dataset.")
return None
# Get bucket that index falls into based on num_demos array.
task_index = np.searchsorted(self.cummulative_num_demos, index, side='right')-1
# Decide task ID, and new index modulo num_demos.
# Subtract number of demonstrations in cumsum until then, and then
new_index = index-self.cummulative_num_demos[max(task_index,0)]
data_element = self.files[task_index][new_index]
resample_length = len(data_element['demo'])//self.args.ds_freq
# print("Orig:", len(data_element['demo']),"New length:",resample_length)
self.kernel_bandwidth = self.args.smoothing_kernel_bandwidth
if resample_length<=1 or index==4900 or index==537:
data_element['is_valid'] = False
else:
data_element['is_valid'] = True
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(data_element['demo'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['robot-state'] = gaussian_filter1d(data_element['robot-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['object-state'] = gaussian_filter1d(data_element['object-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['flat-state'] = gaussian_filter1d(data_element['flat-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['environment-name'] = self.environment_names[task_index]
data_element['task-id'] = task_index
if self.args.ds_freq>1:
data_element['demo'] = resample(data_element['demo'], resample_length)
data_element['robot-state'] = resample(data_element['robot-state'], resample_length)
data_element['object-state'] = resample(data_element['object-state'], resample_length)
data_element['flat-state'] = resample(data_element['flat-state'], resample_length)
return data_element
def get_number_task_demos(self, task_index):
return self.num_demos[task_index]
def get_task_demo(self, task_index, index):
if index>=self.num_demos[task_index]:
print("Out of bounds of dataset.")
return None
data_element = self.files[task_index][index]
resample_length = len(data_element['demo'])//self.args.ds_freq
# print("Orig:", len(data_element['demo']),"New length:",resample_length)
self.kernel_bandwidth = self.args.smoothing_kernel_bandwidth
if resample_length<=1 or data_element['robot-state'].shape[0]==0:
data_element['is_valid'] = False
else:
data_element['is_valid'] = True
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(data_element['demo'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['robot-state'] = gaussian_filter1d(data_element['robot-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['object-state'] = gaussian_filter1d(data_element['object-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['flat-state'] = gaussian_filter1d(data_element['flat-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['environment-name'] = self.environment_names[task_index]
if self.args.ds_freq>1:
data_element['demo'] = resample(data_element['demo'], resample_length)
data_element['robot-state'] = resample(data_element['robot-state'], resample_length)
data_element['object-state'] = resample(data_element['object-state'], resample_length)
data_element['flat-state'] = resample(data_element['flat-state'], resample_length)
return data_element
class Roboturk_Dataloader_Tester(unittest.TestCase):
def test_Roboturkdataloader(self):
self.dataset = Roboturk_Dataset()
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']
check_demo_data = (data_element['demo']==np.load("Test_Data/Roboturk_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
if __name__ == '__main__':
# Run all tests defined for the dataloader.
unittest.main() | CausalSkillLearning-main | Experiments/Roboturk_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
import DataLoaders, MIME_DataLoader, Roboturk_DataLoader, Mocap_DataLoader
from PolicyManagers import *
import TestClass
def return_dataset(args, data=None):
# The data parameter overrides the data in args.data.
# This is so that we can call return_dataset with source and target data for transfer setting.
if data is not None:
args.data = data
# Define Data Loader.
if args.data=='Continuous':
dataset = DataLoaders.ContinuousToyDataset(args.datadir)
elif args.data=='ContinuousNonZero':
dataset = DataLoaders.ContinuousNonZeroToyDataset(args.datadir)
elif args.data=='DeterGoal':
dataset = DataLoaders.DeterministicGoalDirectedDataset(args.datadir)
elif args.data=='MIME':
dataset = MIME_DataLoader.MIME_NewDataset()
elif args.data=='Roboturk':
dataset = Roboturk_DataLoader.Roboturk_NewSegmentedDataset(args)
elif args.data=='OrigRoboturk':
dataset = Roboturk_DataLoader.Roboturk_Dataset(args)
elif args.data=='FullRoboturk':
dataset = Roboturk_DataLoader.Roboturk_FullDataset(args)
elif args.data=='Mocap':
dataset = Mocap_DataLoader.Mocap_Dataset(args)
return dataset
class Master():
def __init__(self, arguments):
self.args = arguments
self.dataset = return_dataset(self.args)
# Now define policy manager.
if self.args.setting=='learntsub':
self.policy_manager = PolicyManager_Joint(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='pretrain_sub':
self.policy_manager = PolicyManager_Pretrain(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='baselineRL':
self.policy_manager = PolicyManager_BaselineRL(args=self.args)
elif self.args.setting=='downstreamRL':
self.policy_manager = PolicyManager_DownstreamRL(args=self.args)
elif self.args.setting=='DMP':
self.policy_manager = PolicyManager_DMPBaselines(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='imitation':
self.policy_manager = PolicyManager_Imitation(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='transfer' or self.args.setting=='cycle_transfer':
source_dataset = return_dataset(self.args, data=self.args.source_domain)
target_dataset = return_dataset(self.args, data=self.args.target_domain)
if self.args.setting=='transfer':
self.policy_manager = PolicyManager_Transfer(args=self.args, source_dataset=source_dataset, target_dataset=target_dataset)
elif self.args.setting=='cycle_transfer':
self.policy_manager = PolicyManager_CycleConsistencyTransfer(args=self.args, source_dataset=source_dataset, target_dataset=target_dataset)
if self.args.debug:
embed()
# Create networks and training operations.
self.policy_manager.setup()
def run(self):
if self.args.setting=='pretrain_sub' or self.args.setting=='pretrain_prior' or \
self.args.setting=='imitation' or self.args.setting=='baselineRL' or self.args.setting=='downstreamRL' or \
self.args.setting=='transfer' or self.args.setting=='cycle_transfer':
if self.args.train:
if self.args.model:
self.policy_manager.train(self.args.model)
else:
self.policy_manager.train()
else:
if self.args.setting=='pretrain_prior':
self.policy_manager.train(self.args.model)
else:
self.policy_manager.evaluate(model=self.args.model)
elif self.args.setting=='learntsub':
if self.args.train:
if self.args.model:
self.policy_manager.train(self.args.model)
else:
if self.args.subpolicy_model:
print("Just loading subpolicies.")
self.policy_manager.load_all_models(self.args.subpolicy_model, just_subpolicy=True)
self.policy_manager.train()
else:
# self.policy_manager.train(self.args.model)
self.policy_manager.evaluate(self.args.model)
# elif self.args.setting=='baselineRL' or self.args.setting=='downstreamRL':
# if self.args.train:
# if self.args.model:
# self.policy_manager.train(self.args.model)
# else:
# self.policy_manager.train()
elif self.args.setting=='DMP':
self.policy_manager.evaluate_across_testset()
def test(self):
if self.args.test_code:
loader = TestClass.TestLoaderWithKwargs()
suite = loader.loadTestsFromTestCase(TestClass.MetaTestClass, policy_manager=self.policy_manager)
unittest.TextTestRunner().run(suite)
def parse_arguments():
parser = argparse.ArgumentParser(description='Learning Skills from Demonstrations')
# Setup training.
parser.add_argument('--datadir', dest='datadir',type=str,default='../Data/ContData/')
parser.add_argument('--train',dest='train',type=int,default=0)
parser.add_argument('--debug',dest='debug',type=int,default=0)
parser.add_argument('--notes',dest='notes',type=str)
parser.add_argument('--name',dest='name',type=str,default=None)
parser.add_argument('--fake_batch_size',dest='fake_batch_size',type=int,default=1)
parser.add_argument('--batch_size',dest='batch_size',type=int,default=1)
parser.add_argument('--training_phase_size',dest='training_phase_size',type=int,default=500000)
parser.add_argument('--initial_counter_value',dest='initial_counter_value',type=int,default=0)
parser.add_argument('--data',dest='data',type=str,default='Continuous')
parser.add_argument('--setting',dest='setting',type=str,default='gtsub')
parser.add_argument('--test_code',dest='test_code',type=int,default=0)
parser.add_argument('--model',dest='model',type=str)
parser.add_argument('--logdir',dest='logdir',type=str,default='Experiment_Logs/')
parser.add_argument('--epochs',dest='epochs',type=int,default=500) # Number of epochs to train for. Reduce for Mocap.
# Training setting.
parser.add_argument('--discrete_z',dest='discrete_z',type=int,default=0)
# parser.add_argument('--transformer',dest='transformer',type=int,default=0)
parser.add_argument('--z_dimensions',dest='z_dimensions',type=int,default=64)
parser.add_argument('--number_layers',dest='number_layers',type=int,default=5)
parser.add_argument('--hidden_size',dest='hidden_size',type=int,default=64)
parser.add_argument('--environment',dest='environment',type=str,default='SawyerLift') # Defines robosuite environment for RL.
# Data parameters.
parser.add_argument('--traj_segments',dest='traj_segments',type=int,default=1) # Defines whether to use trajectory segments for pretraining or entire trajectories. Useful for baseline implementation.
parser.add_argument('--gripper',dest='gripper',type=int,default=1) # Whether to use gripper training in roboturk.
parser.add_argument('--ds_freq',dest='ds_freq',type=int,default=1) # Additional downsample frequency.
parser.add_argument('--condition_size',dest='condition_size',type=int,default=4)
parser.add_argument('--smoothen', dest='smoothen',type=int,default=0) # Whether to smoothen the original dataset.
parser.add_argument('--smoothing_kernel_bandwidth', dest='smoothing_kernel_bandwidth',type=float,default=3.5) # The smoothing bandwidth that is applied to data loader trajectories.
parser.add_argument('--new_gradient',dest='new_gradient',type=int,default=1)
parser.add_argument('--b_prior',dest='b_prior',type=int,default=1)
parser.add_argument('--constrained_b_prior',dest='constrained_b_prior',type=int,default=1) # Whether to use constrained b prior var network or just normal b prior one.
parser.add_argument('--reparam',dest='reparam',type=int,default=1)
parser.add_argument('--number_policies',dest='number_policies',type=int,default=4)
parser.add_argument('--fix_subpolicy',dest='fix_subpolicy',type=int,default=1)
parser.add_argument('--train_only_policy',dest='train_only_policy',type=int,default=0) # Train only the policy network and use a pretrained encoder. This is weird but whatever.
parser.add_argument('--load_latent',dest='load_latent',type=int,default=1) # Whether to load latent policy from model or not.
parser.add_argument('--subpolicy_model',dest='subpolicy_model',type=str)
parser.add_argument('--traj_length',dest='traj_length',type=int,default=10)
parser.add_argument('--skill_length',dest='skill_length',type=int,default=5)
parser.add_argument('--var_skill_length',dest='var_skill_length',type=int,default=0)
parser.add_argument('--display_freq',dest='display_freq',type=int,default=10000)
parser.add_argument('--save_freq',dest='save_freq',type=int,default=1)
parser.add_argument('--eval_freq',dest='eval_freq',type=int,default=20)
parser.add_argument('--perplexity',dest='perplexity',type=float,default=30,help='Value of perplexity fed to TSNE.')
parser.add_argument('--entropy',dest='entropy',type=int,default=0)
parser.add_argument('--var_entropy',dest='var_entropy',type=int,default=0)
parser.add_argument('--ent_weight',dest='ent_weight',type=float,default=0.)
parser.add_argument('--var_ent_weight',dest='var_ent_weight',type=float,default=2.)
parser.add_argument('--pretrain_bias_sampling',type=float,default=0.) # Defines percentage of trajectory within which to sample trajectory segments for pretraining.
parser.add_argument('--pretrain_bias_sampling_prob',type=float,default=0.)
parser.add_argument('--action_scale_factor',type=float,default=1)
parser.add_argument('--z_exploration_bias',dest='z_exploration_bias',type=float,default=0.)
parser.add_argument('--b_exploration_bias',dest='b_exploration_bias',type=float,default=0.)
parser.add_argument('--lat_z_wt',dest='lat_z_wt',type=float,default=0.1)
parser.add_argument('--lat_b_wt',dest='lat_b_wt',type=float,default=1.)
parser.add_argument('--z_probability_factor',dest='z_probability_factor',type=float,default=0.1)
parser.add_argument('--b_probability_factor',dest='b_probability_factor',type=float,default=0.1)
parser.add_argument('--subpolicy_clamp_value',dest='subpolicy_clamp_value',type=float,default=-5)
parser.add_argument('--latent_clamp_value',dest='latent_clamp_value',type=float,default=-5)
parser.add_argument('--min_variance_bias',dest='min_variance_bias',type=float,default=0.01)
parser.add_argument('--normalization',dest='normalization',type=str,default='None')
parser.add_argument('--likelihood_penalty',dest='likelihood_penalty',type=int,default=10)
parser.add_argument('--subpolicy_ratio',dest='subpolicy_ratio',type=float,default=0.01)
parser.add_argument('--latentpolicy_ratio',dest='latentpolicy_ratio',type=float,default=0.1)
parser.add_argument('--temporal_latentpolicy_ratio',dest='temporal_latentpolicy_ratio',type=float,default=0.)
parser.add_argument('--latent_loss_weight',dest='latent_loss_weight',type=float,default=0.1)
parser.add_argument('--kl_weight',dest='kl_weight',type=float,default=0.01)
parser.add_argument('--var_loss_weight',dest='var_loss_weight',type=float,default=1.)
parser.add_argument('--prior_weight',dest='prior_weight',type=float,default=0.00001)
# Cross Domain Skill Transfer parameters.
parser.add_argument('--discriminability_weight',dest='discriminability_weight',type=float,default=1.,help='Weight of discriminability loss in cross domain skill transfer.')
parser.add_argument('--vae_loss_weight',dest='vae_loss_weight',type=float,default=1.,help='Weight of VAE loss in cross domain skill transfer.')
parser.add_argument('--alternating_phase_size',dest='alternating_phase_size',type=int,default=2000, help='Size of alternating training phases.')
parser.add_argument('--discriminator_phase_size',dest='discriminator_phase_size',type=int,default=2,help='Factor by which to train discriminator more than generator.')
parser.add_argument('--cycle_reconstruction_loss_weight',dest='cycle_reconstruction_loss_weight',type=float,default=1.,help='Weight of the cycle-consistency reconstruction loss term.')
# Exploration and learning rate parameters.
parser.add_argument('--epsilon_from',dest='epsilon_from',type=float,default=0.3)
parser.add_argument('--epsilon_to',dest='epsilon_to',type=float,default=0.05)
parser.add_argument('--epsilon_over',dest='epsilon_over',type=int,default=30)
parser.add_argument('--learning_rate',dest='learning_rate',type=float,default=1e-4)
# Baseline parameters.
parser.add_argument('--baseline_kernels',dest='baseline_kernels',type=int,default=15)
parser.add_argument('--baseline_window',dest='baseline_window',type=int,default=15)
parser.add_argument('--baseline_kernel_bandwidth',dest='baseline_kernel_bandwidth',type=float,default=3.5)
# Reinforcement Learning parameters.
parser.add_argument('--TD',dest='TD',type=int,default=0) # Whether or not to use Temporal difference while training the critic network.
parser.add_argument('--OU',dest='OU',type=int,default=1) # Whether or not to use the Ornstein Uhlenbeck noise process while training.
parser.add_argument('--OU_max_sigma',dest='OU_max_sigma',type=float,default=0.2) # Max Sigma value of the Ornstein Uhlenbeck noise process.
parser.add_argument('--OU_min_sigma',dest='OU_min_sigma',type=float,default=0.2) # Min Sigma value of the Ornstein Uhlenbeck noise process.
parser.add_argument('--MLP_policy',dest='MLP_policy',type=int,default=0) # Whether or not to use MLP policy.
parser.add_argument('--mean_nonlinearity',dest='mean_nonlinearity',type=int,default=0) # Whether or not to use Tanh activation.
parser.add_argument('--burn_in_eps',dest='burn_in_eps',type=int,default=500) # How many epsiodes to burn in.
parser.add_argument('--random_memory_burn_in',dest='random_memory_burn_in',type=int,default=1) # Whether to burn in episodes into memory randomly or not.
parser.add_argument('--shaped_reward',dest='shaped_reward',type=int,default=0) # Whether or not to use shaped rewards.
parser.add_argument('--memory_size',dest='memory_size',type=int,default=2000) # Size of replay memory. 2000 is okay, but is still kind of short sighted.
# Transfer learning domains, etc.
parser.add_argument('--source_domain',dest='source_domain',type=str,help='What the source domain is in transfer.')
parser.add_argument('--target_domain',dest='target_domain',type=str,help='What the target domain is in transfer.')
return parser.parse_args()
def main(args):
args = parse_arguments()
master = Master(args)
if args.test_code:
master.test()
else:
master.run()
if __name__=='__main__':
main(sys.argv)
| CausalSkillLearning-main | Experiments/Master.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from headers import *
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class Mocap_Dataset(Dataset):
def __init__(self, args, split='all'):
self.dataset_directory = '/checkpoint/tanmayshankar/Mocap/'
self.args = args
# Load the entire set of trajectories.
self.data_list = np.load(os.path.join(self.dataset_directory, "Demo_Array.npy"),allow_pickle=True)
self.dataset_length = len(self.data_list)
self.ds_freq = self.args.ds_freq
def __len__(self):
# Return length of file list.
return self.dataset_length
def process_item(self, item):
resample_length = len(item['global_positions']) // self.ds_freq
if resample_length<5:
item['is_valid'] = False
else:
item['is_valid'] = True
item['global_positions'] = resample(item['global_positions'], resample_length)
demo = resample(item['local_positions'], resample_length)
item['local_positions'] = demo
item['local_rotations'] = resample(item['local_rotations'], resample_length)
item['animation'] = resample(item['animation'], resample_length)
# Replicate as demo for downstream dataloading. # Reshape to TxNumber of dimensions.
item['demo'] = demo.reshape((demo.shape[0],-1))
return item
def __getitem__(self, index):
# Return n'th item of dataset.
# This has already processed everything.
# Remember, the global and local posiitons are all stored as Number_Frames x Number_Joints x 3 array.
# Change this to # Number_Frames x Number_Dimensions...? But the dimensions are not independent.. so what do we do?
return self.process_item(copy.deepcopy(self.data_list[index]))
def compute_statistics(self):
embed() | CausalSkillLearning-main | Experiments/Mocap_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class DMP():
# def __init__(self, time_steps=100, num_ker=25, dimensions=3, kernel_bandwidth=None, alphaz=None, time_basis=False):
def __init__(self, time_steps=40, num_ker=15, dimensions=7, kernel_bandwidth=3.5, alphaz=5., time_basis=True):
# DMP(dimensions=7,time_steps=40,num_ker=15,kernel_bandwidth=3.5,alphaz=5.,time_basis=True)
# self.alphaz = 25.0
if alphaz is not None:
self.alphaz = alphaz
else:
self.alphaz = 10.
self.betaz = self.alphaz/4
self.alpha = self.alphaz/3
self.time_steps = time_steps
self.tau = self.time_steps
# self.tau = 1.
self.use_time_basis = time_basis
self.dimensions = dimensions
# self.number_kernels = max(500,self.time_steps)
self.number_kernels = num_ker
if kernel_bandwidth is not None:
self.kernel_bandwidth = kernel_bandwidth
else:
self.kernel_bandwidth = self.calculate_good_sigma(self.time_steps, self.number_kernels)
self.epsilon = 0.001
self.setup()
def setup(self):
self.gaussian_kernels = np.zeros((self.number_kernels,2))
self.weights = np.zeros((self.number_kernels, self.dimensions))
self.demo_pos = np.zeros((self.time_steps, self.dimensions))
self.demo_vel = np.zeros((self.time_steps, self.dimensions))
self.demo_acc = np.zeros((self.time_steps, self.dimensions))
self.target_forces = np.zeros((self.time_steps, self.dimensions))
self.phi = np.zeros((self.number_kernels, self.time_steps, self.time_steps))
self.eta = np.zeros((self.time_steps, self.dimensions))
self.vector_phase = np.zeros(self.time_steps)
# Defining Rollout variables.
self.rollout_time = self.time_steps
self.dt = 1./self.rollout_time
self.pos_roll = np.zeros((self.rollout_time,self.dimensions))
self.vel_roll = np.zeros((self.rollout_time,self.dimensions))
self.acc_roll = np.zeros((self.rollout_time,self.dimensions))
self.force_roll = np.zeros((self.rollout_time,self.dimensions))
self.goal = np.zeros(self.dimensions)
self.start = np.zeros(self.dimensions)
def calculate_good_sigma(self, time, number_kernels, threshold=0.15):
return time/(2*(number_kernels-1)*(np.sqrt(-np.log(threshold))))
def load_trajectory(self,pos,vel=None,acc=None):
self.demo_pos = np.zeros((self.time_steps, self.dimensions))
self.demo_vel = np.zeros((self.time_steps, self.dimensions))
self.demo_acc = np.zeros((self.time_steps, self.dimensions))
if vel is not None and acc is not None:
self.demo_pos = copy.deepcopy(pos)
self.demo_vel = copy.deepcopy(vel)
self.demo_acc = copy.deepcopy(acc)
else:
self.smooth_interpolate(pos)
def smooth_interpolate(self, pos):
# Filter the posiiton input by Gaussian smoothing.
smooth_pos = gaussian_filter1d(pos,3.5,axis=0,mode='nearest')
time_range = np.linspace(0, pos.shape[0]-1, pos.shape[0])
new_time_range = np.linspace(0,pos.shape[0]-1,self.time_steps+2)
self.interpolated_pos = np.zeros((self.time_steps+2,self.dimensions))
interpolating_objects = []
for i in range(self.dimensions):
interpolating_objects.append(interp1d(time_range,pos[:,i],kind='linear'))
self.interpolated_pos[:,i] = interpolating_objects[i](new_time_range)
self.demo_vel = np.diff(self.interpolated_pos,axis=0)[:self.time_steps]
self.demo_acc = np.diff(self.interpolated_pos,axis=0,n=2)[:self.time_steps]
self.demo_pos = self.interpolated_pos[:self.time_steps]
def initialize_variables(self):
self.weights = np.zeros((self.number_kernels, self.dimensions))
self.target_forces = np.zeros((self.time_steps, self.dimensions))
self.phi = np.zeros((self.number_kernels, self.time_steps, self.time_steps))
self.eta = np.zeros((self.time_steps, self.dimensions))
self.kernel_centers = np.linspace(0,self.time_steps,self.number_kernels)
self.vector_phase = self.calc_vector_phase(self.kernel_centers)
self.gaussian_kernels[:,0] = self.vector_phase
# Different kernel parameters that have worked before, giving different behavior.
# # dummy = (np.diff(self.gaussian_kernels[:,0]*0.55))**2
# # dummy = (np.diff(self.gaussian_kernels[:,0]*2))**2
# # dummy = (np.diff(self.gaussian_kernels[:,0]))**2
dummy = (np.diff(self.gaussian_kernels[:,0]*self.kernel_bandwidth))**2
self.gaussian_kernels[:,1] = 1. / np.append(dummy,dummy[-1])
# self.gaussian_kernels[:,1] = self.number_kernels/self.gaussian_kernels[:,0]
def calc_phase(self,time):
return np.exp(-self.alpha*float(time)/self.tau)
def calc_vector_phase(self,time):
return np.exp(-self.alpha*time.astype(float)/self.tau)
def basis(self,index,time):
return np.exp(-(self.gaussian_kernels[index,1])*((self.calc_phase(time)-self.gaussian_kernels[index,0])**2))
def time_basis(self, index, time):
# return np.exp(-(self.gaussian_kernels[index,1])*((time-self.kernel_centers[index])**2))
# return np.exp(-(time-self.kernel_centers[index])**2)
return np.exp(-((time-self.kernel_centers[index])**2)/(self.kernel_bandwidth))
def vector_basis(self, index, time_range):
return np.exp(-(self.gaussian_kernels[index,1])*((self.calc_vector_phase(time_range)-self.gaussian_kernels[index,0])**2))
def update_target_force_itau(self):
self.target_forces = (self.tau**2)*self.demo_acc - self.alphaz*(self.betaz*(self.demo_pos[self.time_steps-1]-self.demo_pos)-self.tau*self.demo_vel)
def update_target_force_dtau(self):
self.target_forces = self.demo_acc/(self.tau**2) - self.alphaz*(self.betaz*(self.demo_pos[self.time_steps-1]-self.demo_pos)-self.demo_vel/self.tau)
def update_target_force(self):
self.target_forces = self.demo_acc - self.alphaz*(self.betaz*(self.demo_pos[self.time_steps-1]-self.demo_pos)-self.demo_vel)
def update_phi(self):
for i in range(self.number_kernels):
for t in range(self.time_steps):
if self.use_time_basis:
self.phi[i,t,t] = self.time_basis(i,t)
else:
self.phi[i,t,t] = self.basis(i,t)
def update_eta(self):
t_range = np.linspace(0,self.time_steps,self.time_steps)
vector_phase = self.calc_vector_phase(t_range)
for k in range(self.dimensions):
self.eta[:,k] = vector_phase*(self.demo_pos[self.time_steps-1,k]-self.demo_pos[0,k])
def learn_DMP(self, pos, forces="i"):
self.setup()
self.load_trajectory(pos)
self.initialize_variables()
self.learn_weights(forces=forces)
def learn_weights(self, forces="i"):
if forces=="i":
self.update_target_force_itau()
elif forces=="d":
self.update_target_force_dtau()
elif forces=="n":
self.update_target_force()
self.update_phi()
self.update_eta()
for j in range(self.dimensions):
for i in range(self.number_kernels):
self.weights[i,j] = np.dot(self.eta[:,j],np.dot(self.phi[i],self.target_forces[:,j]))
self.weights[i,j] /= np.dot(self.eta[:,j],np.dot(self.phi[i],self.eta[:,j])) + self.epsilon
def initialize_rollout(self,start,goal,init_vel):
self.pos_roll = np.zeros((self.rollout_time,self.dimensions))
self.vel_roll = np.zeros((self.rollout_time,self.dimensions))
self.acc_roll = np.zeros((self.rollout_time,self.dimensions))
self.tau = self.rollout_time
self.pos_roll[0] = copy.deepcopy(start)
self.vel_roll[0] = copy.deepcopy(init_vel)
self.goal = goal
self.start = start
self.dt = self.tau/self.rollout_time
# print(self.dt,self.tau,self.rollout_time)
def calc_rollout_force(self, roll_time):
den = 0
time = copy.deepcopy(roll_time)
for i in range(self.number_kernels):
if self.use_time_basis:
self.force_roll[roll_time] += self.time_basis(i,time)*self.weights[i]
den += self.time_basis(i,time)
else:
self.force_roll[roll_time] += self.basis(i,time)*self.weights[i]
den += self.basis(i,time)
self.force_roll[roll_time] *= (self.goal-self.start)*self.calc_phase(time)/den
def calc_rollout_acceleration(self,time):
self.acc_roll[time] = (1./self.tau**2)*(self.alphaz * (self.betaz * (self.goal - self.pos_roll[time]) - self.tau*self.vel_roll[time]) + self.force_roll[time])
def calc_rollout_vel(self,time):
self.vel_roll[time] = self.vel_roll[time-1] + self.acc_roll[time-1]*self.dt
def calc_rollout_pos(self,time):
self.pos_roll[time] = self.pos_roll[time-1] + self.vel_roll[time-1]*self.dt
def rollout(self,start,goal,init_vel):
self.initialize_rollout(start,goal,init_vel)
self.calc_rollout_force(0)
self.calc_rollout_acceleration(0)
for i in range(1,self.rollout_time):
self.calc_rollout_force(i)
self.calc_rollout_vel(i)
self.calc_rollout_pos(i)
self.calc_rollout_acceleration(i)
return self.pos_roll
def load_weights(self, weight):
self.weights = copy.deepcopy(weight)
def main(args):
pos = np.load(str(sys.argv[1]))[:,:3]
vel = np.load(str(sys.argv[2]))[:,:3]
acc = np.load(str(sys.argv[3]))[:,:3]
rolltime = 500
dmp = DMP(rolltime)
dmp.load_trajectory(pos)
dmp.initialize_variables()
dmp.learn_DMP()
start = np.zeros(dmp.dimensions)
goal = np.ones(dmp.dimensions)
norm_vector = pos[-1]-pos[0]
init_vel = np.divide(vel[0],norm_vector)
dmp.rollout(start, goal, init_vel)
dmp.save_rollout()
| CausalSkillLearning-main | Experiments/DMP.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import glob, os, sys, argparse
import torch, copy
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from IPython import embed
import matplotlib
matplotlib.use('Agg')
# matplotlib.rcParams['animation.ffmpeg_args'] = '-report'
matplotlib.rcParams['animation.bitrate'] = 2000
import matplotlib.pyplot as plt
import tensorboardX
from scipy import stats
from absl import flags
from memory_profiler import profile
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from IPython import embed
import pdb
import sklearn.manifold as skl_manifold
from sklearn.decomposition import PCA
from matplotlib.offsetbox import (TextArea, DrawingArea, OffsetImage,
AnnotationBbox)
from matplotlib.animation import FuncAnimation
import tensorflow as tf
import tempfile
import moviepy.editor as mpy
import subprocess
import h5py
import time
import robosuite
import unittest
import cProfile
from scipy import stats, signal
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import find_peaks, argrelextrema
from sklearn.neighbors import NearestNeighbors
| CausalSkillLearning-main | Experiments/headers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np, glob, os
from IPython import embed
# Env list.
environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
# Evaluate baselineRL methods.
a = 86
b = 86
a = 130
b = 137
prefix = 'RL'
increment = 100
reward_list = []
for i in range(a,b+1):
model_template = "RL{0}/saved_models/Model_epoch*".format(i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("RL{0}/saved_models/Model_epoch".format(i)))
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)):
rewards[j] = np.load("RL{0}/MEval/m{1}/Mean_Reward_RL{0}.npy".format(i,model_range[j]))
reward_list.append(rewards)
embed()
# x = np.arange(0,260,20)
# dists = np.zeros((6,len(x),100))
# a = 6
# b = 12
# for i in range(a,b):
# for j in range(len(x)):
# dists[i-a,j] = np.load("IL0{0}/MEval/m{1}/Total_Rewards_IL0{0}.npy".format(str(i).zfill(2),x[j]))
# IL
a = 18
b = 23
prefix = 'IL0'
increment = 20
reward_list = []
for i in range(a,b+1):
model_template = "{0}{1}/saved_models/Model_epoch*".format(prefix,i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{0}{1}/saved_models/Model_epoch".format(prefix,i)))
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(i,model_range[j],prefix))
reward_list.append(rewards)
# Get distances
a = 30
b = 37
prefix = 'RJ'
increment = 20
distance_list = []
for i in range(a,b+1):
model_template = "{0}{1}/saved_models/Model_epoch*".format(prefix,i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{0}{1}/saved_models/Model_epoch".format(prefix,i)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
distances = np.zeros((len(model_range)))
for j in range(len(model_range)):
distances[j] = np.load("{2}{0}/MEval/m{1}/Mean_Trajectory_Distance_{2}{0}.npy".format(i,model_range[j],prefix))
distance_list.append(distances)
################################################
# Env list.
environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
# Evaluate baselineRL methods.
a = 5
b = 12
prefix = 'downRL'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(3)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
##############################################
# MOcap distances
# Get distances
a = 1
b = 2
prefix = 'Mocap00'
increment = 20
distance_list = []
for i in range(a,b+1):
model_template = "{0}{1}/saved_models/Model_epoch*".format(prefix,i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{0}{1}/saved_models/Model_epoch".format(prefix,i)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
distances = np.zeros((len(model_range)))
for j in range(len(model_range)):
distances[j] = np.load("{2}{0}/MEval/m{1}/Mean_Trajectory_Distance_{2}{0}.npy".format(i,model_range[j],prefix))
distance_list.append(distances)
##############################################
################################################
# Env list.
environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
def remove_start(inputstring, word_to_remove):
return inputstring[len(word_to_remove):] if inputstring.startswith(word_to_remove) else inputstring
# Evaluate baselineRL methods.
a = 23
b = 28
prefix = 'downRL_pi'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(3)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
# max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = int(remove_start(models[-1],"{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)-1):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
for i in range(a,b+1):
print("For environment: ", environment_names[i-a])
print("Average reward:", np.array(reward_list[i-a]).max())
def evalrl(a,b):
prefix = 'downRL_pi'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(3)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
# max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = int(remove_start(models[-1],"{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)-1):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
for i in range(a,b+1):
print("For environment: ", environment_names[i-a])
print("Average reward:", np.array(reward_list[i-a]).max())
def evalrl(a,b):
prefix = 'RL'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(2)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
# max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = int(remove_start(models[-1],"{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)-1):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
for i in range(a,b+1):
print("For environment: ", environment_names[i-a])
print("Average reward:", np.array(reward_list[i-a]).max()) | CausalSkillLearning-main | Experiments/Eval_RLRewards.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from mocap_processing.motion.pfnn import Animation, BVH
from basecode.render import glut_viewer as viewer
from basecode.render import gl_render, camera
from basecode.utils import basics
from basecode.math import mmMath
import numpy as np, imageio
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import time, threading
from IPython import embed
global whether_to_render
whether_to_render = False
def init():
global whether_to_render, global_positions, counter, joint_parents, done_with_render, save_path, name_prefix, image_list
whether_to_render = False
done_with_render = False
global_positions = None
joint_parents = None
save_path = "/private/home/tanmayshankar/Research/Code/"
name_prefix = "Viz_Image"
image_list = []
counter = 0
# Define function to load animation file.
def load_animation(bvh_filename):
animation, joint_names, time_per_frame = BVH.load(bvh_filename)
joint_parents = animation.parents
global_positions = Animation.positions_global(animation)
return global_positions, joint_parents, time_per_frame
# Function that draws body of animated character from the global positions.
def render_pose_by_capsule(global_positions, frame_num, joint_parents, scale=1.0, color=[0.5, 0.5, 0.5, 1], radius=0.05):
glPushMatrix()
glScalef(scale, scale, scale)
for i in range(len(joint_parents)):
pos = global_positions[frame_num][i]
# gl_render.render_point(pos, radius=radius, color=color)
j = joint_parents[i]
if j!=-1:
pos_parent = global_positions[frame_num][j]
p = 0.5 * (pos_parent + pos)
l = np.linalg.norm(pos_parent-pos)
R = mmMath.getSO3FromVectors(np.array([0, 0, 1]), pos_parent-pos)
gl_render.render_capsule(mmMath.Rp2T(R,p), l, radius, color=color, slice=16)
glPopMatrix()
# Callback that renders one pose.
def render_callback_time_independent():
global global_positions, joint_parents, counter
if counter<global_positions.shape[0]:
gl_render.render_ground(size=[100, 100], color=[0.8, 0.8, 0.8], axis='z', origin=True, use_arrow=True)
# Render Shadow of Character
glEnable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glPushMatrix()
glTranslatef(0, 0, 0.001)
glScalef(1, 1, 0)
render_pose_by_capsule(global_positions, counter, joint_parents, color=[0.5,0.5,0.5,1.0])
glPopMatrix()
# Render Character
glEnable(GL_LIGHTING)
render_pose_by_capsule(global_positions, counter, joint_parents, color=np.array([85, 160, 173, 255])/255.0)
# Callback that runs rendering when the global variable is set to true.
def idle_callback():
# # Increment counter
# # Set frame number of trajectory to be rendered
# # Using the time independent rendering.
# # Call drawGL and savescreen.
# # Since this is an idle callback, drawGL won't call itself (only calls render callback).
global whether_to_render, counter, global_positions, done_with_render, save_path, name_prefix
done_with_render = False
# if whether_to_render and counter<global_positions.shape[0]:
if whether_to_render and counter<10:
# print("Whether to render is actually true, with counter:",counter)
# render_callback_time_independent()
viewer.drawGL()
viewer.save_screen(save_path, "Image_{}_{}".format(name_prefix, counter))
# viewer.save_screen("/home/tanmayshankar/Research/Code/","Visualize_Image_{}".format(counter))
counter += 1
# Set whether to render to false if counter exceeded.
# if counter>=global_positions.shape[0]:
if counter>=10:
whether_to_render = False
done_with_render = True
# If whether to render is false, reset the counter.
else:
counter = 0
def idle_callback_return():
# # Increment counter
# # Set frame number of trajectory to be rendered
# # Using the time independent rendering.
# # Call drawGL and savescreen.
# # Since this is an idle callback, drawGL won't call itself (only calls render callback).
global whether_to_render, counter, global_positions, done_with_render, save_path, name_prefix, image_list
done_with_render = False
if whether_to_render and counter<global_positions.shape[0]:
# if whether_to_render and counter<10:
# print("Whether to render is actually true, with counter:",counter)
# render_callback_time_independent()
viewer.drawGL()
name = "Image_{}_{}".format(name_prefix, counter)
viewer.save_screen(save_path, name)
img = imageio.imread(os.path.join(save_path, name+".png"))
image_list.append(img)
counter += 1
# Set whether to render to false if counter exceeded.
if counter>=global_positions.shape[0]:
# if counter>=10:
whether_to_render = False
done_with_render = True
# If whether to render is false, reset the counter.
else:
counter = 0 | CausalSkillLearning-main | Experiments/MocapVisualizationUtils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import MocapVisualizationUtils
import threading, time, numpy as np
# bvh_filename = "/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments/01_01_poses.bvh"
bvh_filename = "/private/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments/01_01_poses.bvh"
filenames = [bvh_filename]
file_num = 0
print("About to run viewer.")
cam_cur = MocapVisualizationUtils.camera.Camera(pos=np.array([6.0, 0.0, 2.0]),
origin=np.array([0.0, 0.0, 0.0]),
vup=np.array([0.0, 0.0, 1.0]),
fov=45.0)
def run_thread():
MocapVisualizationUtils.viewer.run(
title='BVH viewer',
cam=cam_cur,
size=(1280, 720),
keyboard_callback=None,
render_callback=MocapVisualizationUtils.render_callback_time_independent,
idle_callback=MocapVisualizationUtils.idle_callback,
)
def run_thread():
MocapVisualizationUtils.viewer.run(
title='BVH viewer',
cam=cam_cur,
size=(1280, 720),
keyboard_callback=None,
render_callback=MocapVisualizationUtils.render_callback_time_independent,
idle_callback=MocapVisualizationUtils.idle_callback_return,
)
# Run init before loading animation.
MocapVisualizationUtils.init()
MocapVisualizationUtils.global_positions, MocapVisualizationUtils.joint_parents, MocapVisualizationUtils.time_per_frame = MocapVisualizationUtils.load_animation(filenames[file_num])
thread = threading.Thread(target=run_thread)
thread.start()
print("Going to actually call callback now.")
MocapVisualizationUtils.whether_to_render = True
x_count = 0
while MocapVisualizationUtils.done_with_render==False and MocapVisualizationUtils.whether_to_render==True:
x_count += 1
time.sleep(1)
print("x_count is now: ",x_count)
print("We finished with the visualization!")
| CausalSkillLearning-main | Experiments/MocapVisualizationExample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Debugging cycle consistency transfer.
python Master.py --name=CTdebug --train=1 --setting=cycle_transfer --source_domain=ContinuousNonZero --target_domain=ContinuousNonZero --z_dimensions=64 --number_layers=5 --hidden_size=64 --data=ContinuousNonZero --training_phase_size=10000 --display_freq=1000 --eval_freq=4 --alternating_phase_size=200 --discriminator_phase_size=2 --vae_loss_weight=1. --discriminability_weight=2.0 --kl_weight=0.001
| CausalSkillLearning-main | Experiments/Code_Runs/CycleTransfer_Runs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class GridWorldDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
# FOR NOW: USE ONLY till 3200 images.
return 3200
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = cv2.imread(os.path.join(self.dataset_directory,"Image{0}.png".format(index)))
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Image{0}_Traj1.npy".format(index))).astype(float)
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence | CausalSkillLearning-main | DataLoaders/GridWorld_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
import pdb
import scipy.misc
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
flags.DEFINE_string('MIME_imgs_dir', '/checkpoint/shubhtuls/data/MIME/', 'Data Directory')
flags.DEFINE_integer('img_h', 64, 'Height')
flags.DEFINE_integer('img_w', 128, 'Width')
flags.DEFINE_integer('ds_freq', 20, 'Downsample joint trajectories by this fraction. Original recroding rate = 100Hz')
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Img_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts, split='all'):
self.dataset_directory = opts.MIME_dir
self.imgs_dataset_directory = opts.MIME_imgs_dir
self.img_h = opts.img_h
self.img_w = opts.img_w
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
self.filelist = glob.glob(self.fulltext)
self.ds_freq = opts.ds_freq
with open(self.filelist[0], 'r') as file:
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
if split == 'all':
self.filelist = self.filelist
else:
self.task_lists = np.load(os.path.join(
self.dataset_directory, 'MIME_jointangles/{}_Lists.npy'.format(split.capitalize())))
self.filelist = []
for i in range(20):
self.filelist.extend(self.task_lists[i])
self.filelist = [f.replace('/checkpoint/tanmayshankar/MIME/', opts.MIME_dir) for f in self.filelist]
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
file = self.filelist[index]
file_split = file.split('/')
frames_folder = osp.join(self.imgs_dataset_directory, file_split[-3], file_split[-2], 'frames')
n_frames = len(os.listdir(frames_folder))
imgs = []
frame_inds = [0, n_frames//2, n_frames-1]
for fi in frame_inds:
img = scipy.misc.imread(osp.join(frames_folder, 'im_{}.png'.format(fi+1)))
imgs.append(scipy.misc.imresize(img, (self.img_h, self.img_w)))
imgs = np.stack(imgs)
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
joint_angle_trajectory = []
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(joint_angle_trajectory) // self.ds_freq
elem = {}
elem['imgs'] = imgs
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)/100
elem['right_gripper'] = resample(right_gripper, n_samples)/100
elem['is_valid'] = int(np.linalg.norm(np.diff(elem['joint_angle_trajectory'],axis=0),axis=1).max() < 1.0)
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, split='all', shuffle=True):
dset = MIME_Img_Dataset(opts, split=split)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/MIME_Img_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
from . import MIME_DataLoader
opts = flags.FLAGS
def main(_):
dataset = MIME_DataLoader.MIME_Dataset(opts)
print("Created DataLoader.")
embed()
if __name__ == '__main__':
app.run(main) | CausalSkillLearning-main | DataLoaders/InteractiveDataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
import pdb
# flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
# flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
# flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
flags.DEFINE_enum('arm', 'both', ['left', 'right', 'both'], 'Which arms data to load')
class Plan_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts, split='all'):
self.opts = opts
self.split = split
self.dataset_directory = self.opts.MIME_dir
# # Must consider permutations of arm and split.
# Right Arm: New_Plans / Run*_EE_Plan
# / Run*_Joint_Plan
# / Run*_RG_Traj
# Left Arm: New_Plans_Left / Run*_EE_Plan
# / Run*_Joint_Plan
# / Run*_LG_traj
# Both Arms: Ambidextrous_Plans / Run*_EE_Plan
# / Run*_Joint_Plan
# / Run*_Grip_Traj
# Set these parameters to replace.
if self.opts.arm=='left':
folder = 'New_Plans'
gripper_suffix = "_LG_Traj"
elif self.opts.arm=='right':
folder = 'New_Plans_Left'
gripper_suffix = "_RG_Traj"
elif self.opts.arm=='both':
folder = 'Ambidextrous_Plans'
gripper_suffix = "_Grip_Traj"
# Default: /checkpoint/tanmayshankar/MIME/
if self.split=='all':
# Collect list of all EE Plans, we will select all Joint Angle Plans correspondingly.
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/New_Plans/Run*_EE_Plan.npy')
# Joint angle plans filelist is in same order thanks to glob.
self.jatext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/New_Plans/Run*_Joint_Plan.npy')
# Gripper plans filelist is in same order thanks to glob.
# self.rgtext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/New_Plans/Run*_RG_Traj.npy')
self.filelist = sorted(glob.glob(self.fulltext))
self.joint_filelist = sorted(glob.glob(self.jatext))
# self.gripper_filelist = sorted(glob.glob(self.rgtext))
elif self.split=='train':
self.filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanTrainList.npy"))
self.joint_filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanJointTrainList.npy"))
elif self.split=='val':
self.filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanValList.npy"))
self.joint_filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanJointValList.npy"))
elif self.split=='test':
self.filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanTestList.npy"))
self.joint_filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanJointTestList.npy"))
# the loaded np arrays give byte strings, and not strings, which breaks later code
if not isinstance(self.filelist[0], str):
self.filelist = [f.decode() for f in self.filelist]
self.joint_filelist = [f.decode() for f in self.joint_filelist]
# Now replace terms in filelists based on what arm it is.
# The EE file list only needs folder replaced.
self.filelist = [f.replace("New_Plans",folder).replace('/checkpoint/tanmayshankar/MIME',self.opts.MIME_dir) for f in self.filelist]
# The Joint file list also only needs folder replaced.
self.joint_filelist = [f.replace("New_Plans",folder).replace('/checkpoint/tanmayshankar/MIME',self.opts.MIME_dir) for f in self.joint_filelist]
# Since we didn't create split lists for Gripper, use the filelist and replace to Gripper.
self.gripper_filelist = [f.replace("New_Plans",folder).replace("_EE_Plan",gripper_suffix).replace('/checkpoint/tanmayshankar/MIME',self.opts.MIME_dir) for f in self.filelist]
# Set joint names.
self.left_joint_names = ['left_s0','left_s1','left_e0','left_e1','left_w0','left_w1','left_w2']
self.right_joint_names = ['right_s0','right_s1','right_e0','right_e1','right_w0','right_w1','right_w2']
self.both_joint_names = self.left_joint_names+self.right_joint_names
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
file = self.filelist[index]
joint_file = self.joint_filelist[index]
gripper_file = self.gripper_filelist[index]
# Load items.
elem = {}
elem['EE_Plan'] = np.load(file)
elem['JA_Plan'] = np.load(joint_file)
elem['Grip_Plan'] = np.load(gripper_file)/100
return elem
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, split='all', shuffle=True):
dset = Plan_Dataset(opts, split=split)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/Plan_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class GridWorldDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
return 4000
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = np.load(os.path.join(self.dataset_directory,"Map{0}.npy".format(index)))
time_limit = 20
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Map{0}_Traj1.npy".format(index))).astype(float)[:time_limit]
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence | CausalSkillLearning-main | DataLoaders/SmallMaps_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import random as stdlib_random, string
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from absl import flags, app
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from ..utils import plotting as plot_util
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_integer('n_segments_min', 4, 'Min Number of gt segments per trajectory')
flags.DEFINE_integer('n_segments_max', 4, 'Max number of gt segments per trajectory')
dirs_2d = np.array([
[1,0],
[0,1],
[-1,0],
[0,-1]
])
def vis_walk(walk):
'''
Args:
walk: (nT+1) X 2 array
Returns:
im: 200 X 200 X 4 numpy array
'''
t = walk.shape[0]
xs = walk[:,0]
ys = walk[:,1]
color_inds = np.linspace(0, 255, t).astype(np.int).tolist()
cs = plot_util.colormap[color_inds, :]
fig = plt.figure(figsize=(4, 4), dpi=50)
ax = fig.subplots()
ax.scatter(xs, ys, c=cs)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect('equal', 'box')
ax.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
ax.tick_params(
axis='y',
which='both',
left=False,
right=False,
labelleft=False)
fig.tight_layout()
fname = '/tmp/' + ''.join(stdlib_random.choices(string.ascii_letters, k=8)) + '.png'
fig.savefig(fname)
plt.close(fig)
im = plt.imread(fname)
os.remove(fname)
return im
def walk_segment(origin, direction, n_steps=10, step_size=0.1, noise=0.02, rng=None):
'''
Args:
origin: nd numpy array
direction: nd numpy array with unit norm
n_steps: length of time seq
step_size: size of each step
noise: magintude of max actuation noise
Returns:
segment: n_steps X nd array
note that the first position in segment is different from origin
'''
if rng is None:
rng = np.random
nd = origin.shape[0]
segment = np.zeros((n_steps, nd)) + origin
segment += np.arange(1, n_steps+1).reshape((-1,1))*direction*step_size
segment += rng.uniform(low=-1, high=1, size=(n_steps, nd)) * noise/nd
return segment
def random_walk2d(origin, num_segments=4, rng=None):
'''
Args:
origin: 2d numpy array
num_segments: length of time seq
Returns:
walk: (nT+1) X 2 array
'''
if rng is None:
rng = np.random
dir_ind = rng.randint(4)
walk = origin.reshape(1,2)
seg_lengths = []
for s in range(num_segments):
seg_length = rng.randint(6,10)
seg_lengths.append(seg_length)
step_size = 0.1 + (rng.uniform() - 0.5)*0.05
segment = walk_segment(origin, dirs_2d[dir_ind], n_steps=seg_length, step_size=step_size, rng=rng)
origin = segment[-1]
walk = np.concatenate((walk, segment), axis=0)
dir_ind += 2 * rng.randint(2) -1
dir_ind = dir_ind % 4
return walk, seg_lengths
class RandomWalksDataset(Dataset):
def __init__(self, opts):
self.opts = opts
self.n_segments_min = self.opts.n_segments_min
self.n_segments_max = self.opts.n_segments_max
def __len__(self):
return int(1e6)
def __getitem__(self, ix):
rng = np.random.RandomState(ix)
ns = rng.randint(self.n_segments_min, self.n_segments_max+1)
trajectory, self.seg_lengths_ix = random_walk2d(np.zeros(2), num_segments=ns, rng=rng)
return trajectory
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, shuffle=True):
dset = RandomWalksDataset(opts)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
if __name__ == '__main__':
walk = random_walk2d(np.zeros(2), num_segments=4)
print(walk)
| CausalSkillLearning-main | DataLoaders/RandomWalks.py |
CausalSkillLearning-main | DataLoaders/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
# flags.DEFINE_boolean('downsampling', True, 'Whether to downsample trajectories. ')
flags.DEFINE_integer('ds_freq', 20, 'Downsample joint trajectories by this fraction. Original recroding rate = 100Hz')
flags.DEFINE_boolean('remote', False, 'Whether operating from a remote server or not.')
# opts = flags.FLAGS
def select_baxter_angles(trajectory, joint_names, arm='right'):
# joint names in order as used via mujoco visualizer
baxter_joint_names = ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2', 'left_s0', 'left_s1', 'left_e0', 'left_e1', 'left_w0', 'left_w1', 'left_w2']
if arm == 'right':
select_joints = baxter_joint_names[:7]
elif arm == 'left':
select_joints = baxter_joint_names[7:]
elif arm == 'both':
select_joints = baxter_joint_names
inds = [joint_names.index(j) for j in select_joints]
return trajectory[:, inds]
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts, split='all'):
self.dataset_directory = opts.MIME_dir
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
if opts.remote:
self.suff_filelist = np.load(osp.join(self.dataset_directory,"Suffix_Filelist.npy"))
self.filelist = []
for j in range(len(self.suff_filelist)):
self.filelist.append(osp.join(self.dataset_directory,self.suff_filelist[j]))
else:
self.filelist = glob.glob(self.fulltext)
self.ds_freq = opts.ds_freq
with open(self.filelist[0], 'r') as file:
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
if split == 'all':
self.filelist = self.filelist
else:
self.task_lists = np.load(os.path.join(
self.dataset_directory, 'MIME_jointangles/{}_Lists.npy'.format(split.capitalize())))
self.filelist = []
for i in range(20):
self.filelist.extend(self.task_lists[i])
self.filelist = [f.replace('/checkpoint/tanmayshankar/MIME/', opts.MIME_dir) for f in self.filelist]
# print(len(self.filelist))
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)/100
elem['right_gripper'] = resample(right_gripper, n_samples)/100
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['ra_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='right')
elem['la_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='left')
# If max norm of differences is <1.0, valid.
elem['is_valid'] = int(np.linalg.norm(np.diff(elem['joint_angle_trajectory'],axis=0),axis=1).max() < 1.0)
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, split='all', shuffle=True):
dset = MIME_Dataset(opts, split=split)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/MIME_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
A convenience script to playback random demonstrations from
a set of demonstrations stored in a hdf5 file.
Arguments:
--folder (str): Path to demonstrations
--use_actions (optional): If this flag is provided, the actions are played back
through the MuJoCo simulator, instead of loading the simulator states
one by one.
Example:
$ python playback_demonstrations_from_hdf5.py --folder ../models/assets/demonstrations/SawyerPickPlace/
"""
import os
import h5py
import argparse
import random
import numpy as np
import robosuite
from robosuite.utils.mjcf_utils import postprocess_model_xml
from IPython import embed
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--folder",
type=str,
default=os.path.join(
robosuite.models.assets_root, "demonstrations/SawyerNutAssembly"
),
)
parser.add_argument(
"--use-actions",
action='store_true',
)
args = parser.parse_args()
demo_path = args.folder
hdf5_path = os.path.join(demo_path, "demo.hdf5")
f = h5py.File(hdf5_path, "r")
env_name = f["data"].attrs["env"]
env = robosuite.make(
env_name,
has_renderer=False,
# has_renderer=True,
ignore_done=True,
use_camera_obs=False,
gripper_visualization=True,
reward_shaping=True,
control_freq=100,
)
# list of all demonstrations episodes
demos = list(f["data"].keys())
while True:
print("Playing back random episode... (press ESC to quit)")
# # select an episode randomly
ep = random.choice(demos)
# read the model xml, using the metadata stored in the attribute for this episode
model_file = f["data/{}".format(ep)].attrs["model_file"]
model_path = os.path.join(demo_path, "models", model_file)
with open(model_path, "r") as model_f:
model_xml = model_f.read()
env.reset()
xml = postprocess_model_xml(model_xml)
env.reset_from_xml_string(xml)
env.sim.reset()
# env.viewer.set_camera(0)
# load the flattened mujoco states
states = f["data/{}/states".format(ep)].value
if args.use_actions:
# load the initial state
env.sim.set_state_from_flattened(states[0])
env.sim.forward()
# load the actions and play them back open-loop
jvels = f["data/{}/joint_velocities".format(ep)].value
grip_acts = f["data/{}/gripper_actuations".format(ep)].value
actions = np.concatenate([jvels, grip_acts], axis=1)
num_actions = actions.shape[0]
for j, action in enumerate(actions):
env.step(action)
# env.render()
if j < num_actions - 1:
# ensure that the actions deterministically lead to the same recorded states
state_playback = env.sim.get_state().flatten()
embed()
assert(np.all(np.equal(states[j + 1], state_playback)))
else:
print("Embedding in not use actions branch")
embed()
# force the sequence of internal mujoco states one by one
for state in states:
env.sim.set_state_from_flattened(state)
env.sim.forward()
# env.render()
f.close() | CausalSkillLearning-main | DataLoaders/RoboturkeExp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .headers import *
import os.path as osp
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('lang_dir', '/private/home/shubhtuls/code/sfd/cachedir/data/lang/', 'Data Directory')
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(data_dir, lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open(osp.join(data_dir, '%s-%s.txt' % (lang1, lang2)), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH
# and \
# p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(data_dir, lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(data_dir, lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
class TranslationDataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts):
self.dataset_directory = opts.lang_dir
self.l1, self.l2, self.pairs = prepareData(self.dataset_directory, 'eng', 'fra', reverse=False)
def __len__(self):
# Return length of file list.
return len(self.l1)
def tensorsFromPair(self, pair):
input_tensor = tensorFromSentence(self.l1, pair[0])
target_tensor = tensorFromSentence(self.l2, pair[1])
return (input_tensor, target_tensor)
def __getitem__(self, index):
elem = {}
elem['pair'] = self.pairs[index]
elem['l1'], elem['l2'] = self.tensorsFromPair(elem['pair'])
return elem | CausalSkillLearning-main | DataLoaders/Translation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import glob, cv2, os
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from absl import flags
from IPython import embed
from absl import flags, app | CausalSkillLearning-main | DataLoaders/headers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
# flags.DEFINE_boolean('downsampling', True, 'Whether to downsample trajectories. ')
flags.DEFINE_integer('ds_freq', 20, 'Downsample joint trajectories by this fraction. Original recroding rate = 100Hz')
flags.DEFINE_boolean('remote', False, 'Whether operating from a remote server or not.')
# opts = flags.FLAGS
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts):
self.dataset_directory = opts.MIME_dir
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
if opts.remote:
self.suff_filelist = np.load(osp.join(self.dataset_directory,"Suffix_Filelist.npy"))
self.filelist = []
for j in range(len(self.suff_filelist)):
self.filelist.append(osp.join(self.dataset_directory,self.suff_filelist[j]))
else:
self.filelist = sorted(glob.glob(self.fulltext))
self.ds_freq = opts.ds_freq
with open(self.filelist[0], 'r') as file:
print(self.filelist[0])
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
self.train_lists = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Train_Lists.npy"))
self.val_lists = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Val_Lists.npy"))
self.test_lists = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Test_Lists.npy"))
def __len__(self):
# Return length of file list.
return len(self.filelist)
def setup_splits(self):
self.train_filelist = []
self.val_filelist = []
self.test_filelist = []
for i in range(20):
self.train_filelist.extend(self.train_lists[i])
self.val_filelist.extend(self.val_lists[i])
self.test_filelist.extend(self.test_lists[i])
def getit(self, index, split=None, return_plan_run=None):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
if split=="train":
file = self.train_filelist[index]
elif split=="val":
file = self.val_filelist[index]
elif split=="test":
file = self.test_filelist[index]
elif split is None:
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
folder = "New_Plans"
if return_plan_run is not None:
ee_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_EE_Plan.npy".format(folder,return_plan_run)))
ja_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_Joint_Plan.npy".format(folder,return_plan_run)))
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)
elem['right_gripper'] = resample(right_gripper, n_samples)
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['JA_Plan'] = ja_plan
elem['EE_Plan'] = ee_plan
return elem
def __getitem__(self, index, split=None, return_plan_run=None):
# def __getitem__(self, inputs):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
if split=="train":
file = self.train_filelist[index]
elif split=="val":
file = self.val_filelist[index]
elif split=="test":
file = self.test_filelist[index]
elif split is None:
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
folder = "New_Plans"
if return_plan_run is not None:
ee_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_EE_Plan.npy".format(folder,return_plan_run)))
ja_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_JA_Plan.npy".format(folder,return_plan_run)))
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)
elem['right_gripper'] = resample(right_gripper, n_samples)
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['JA_Plan'] = ja_plan
elem['EE_Plan'] = ee_plan
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, shuffle=True):
dset = MIME_Dataset(opts)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/MIMEandPlan_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
# For both arms and grippers.
python -m SkillsfromDemonstrations.Experiments.UseSkillsRL.TrainZPolicyRL --train --transformer --nz=64 --nh=64 --variable_nseg=False --network_dir=saved_models/T356_fnseg_vae_sl2pt0_kldwt0pt002_finetune --variable_ns=False --st_space=joint_both_gripper --vae_enc
"""
from __future__ import absolute_import
import os, sys, torch
import matplotlib.pyplot as plt
from ...DataLoaders import MIME_DataLoader
from ..abstraction import mime_eval
from ..abstraction.abstraction_utils import ScoreFunctionEstimator
from .PolicyNet import PolicyNetwork, PolicyNetworkSingleTimestep, AltPolicyNetworkSingleTimestep
from absl import app, flags
import imageio, numpy as np, copy, os, shutil
from IPython import embed
import robosuite
import tensorboard, tensorboardX
flags.DEFINE_boolean('train',False,'Whether to run train.')
flags.DEFINE_boolean('debug',False,'Whether to debug.')
# flags.DEFINE_float('sf_loss_wt', 0.1, 'Weight of pseudo loss for SF estimator')
# flags.DEFINE_float('kld_loss_wt', 0, 'Weight for KL Divergence loss if using VAE encoder.')
flags.DEFINE_float('reinforce_loss_wt', 1., 'Weight for primary reinforce loss.')
# flags.DEFINE_string('name',None,'Name to give run.')
class ZPolicyTrainer(object):
def __init__(self, opts):
self.opts = opts
self.input_size = self.opts.n_state
self.zpolicy_input_size = 85
self.hidden_size = 20
self.output_size = self.opts.nz
self.primitive_length = 10
self.learning_rate = 1e-4
self.number_epochs = 200
self.number_episodes = 500
self.save_every_epoch = 5
self.maximum_skills = 6
def initialize_plots(self):
self.log_dir = os.path.join("SkillsfromDemonstrations/cachedir/logs/RL",self.opts.name)
if not(os.path.isdir(self.log_dir)):
os.mkdir(self.log_dir)
self.writer = tensorboardX.SummaryWriter(self.log_dir)
def setup_networks(self):
# Set up evaluator to load mime model and stuff.
self.evaluator = mime_eval.PrimitiveDiscoverEvaluator(self.opts)
self.evaluator.setup_testing(split='val')
# Also create a ZPolicy.
# self.z_policy = PolicyNetworkSingleTimestep(opts=self.opts, input_size=self.zpolicy_input_size, hidden_size=self.hidden_size, output_size=self.output_size).cuda()
self.z_policy = AltPolicyNetworkSingleTimestep(opts=self.opts, input_size=self.zpolicy_input_size, hidden_size=self.hidden_size, output_size=self.output_size).cuda()
if self.opts.variable_nseg:
self.sf_loss_fn = ScoreFunctionEstimator()
# Creating optimizer.
self.z_policy_optimizer = torch.optim.Adam(self.z_policy.parameters(), lr=self.learning_rate)
def load_network(self, network_dir):
# Load the evaluator networks (Abstraction network and skill network)
self.evaluator.load_network(self.evaluator.model, 'pred', 'latest', network_dir=network_dir)
# Freeze parameters of the IntendedTrajectoryPredictorModel.
for parameter in self.evaluator.model.parameters():
parameter.require_grad = False
def save_zpolicy_model(self, path, suffix):
if not(os.path.isdir(path)):
os.mkdir(path)
save_object = {}
save_object['ZPolicy'] = self.z_policy.state_dict()
torch.save(save_object,os.path.join(path,"ZPolicyModel"+suffix))
def load_all_models(self, path):
load_object = torch.load(path)
self.z_policy.load_state_dict(load_object['ZPolicy'])
# def update_plots(self, counter, sample_map, loglikelihood):
def update_plots(self, counter):
if self.opts.variable_nseg:
self.writer.add_scalar('Stop_Prob_Reinforce_Loss', torch.mean(self.stop_prob_reinforce_loss), counter)
self.writer.add_scalar('Predicted_Zs_Reinforce_Loss', torch.mean(self.reinforce_predicted_Zs), counter)
self.writer.add_scalar('KL_Divergence_Loss', torch.mean(self.kld_loss_seq), counter)
self.writer.add_scalar('Total_Loss', torch.mean(self.total_loss), counter)
def assemble_input(self, trajectory):
traj_start = trajectory[0]
traj_end = trajectory[-1]
return torch.cat([torch.tensor(traj_start).cuda(),torch.tensor(traj_end).cuda()],dim=0)
# def update_networks(self, state_traj, reward_traj, predicted_Zs):
def update_networks(self, state_traj_torch, reward_traj, latent_z_seq, log_prob_seq, stop_prob_seq, stop_seq, kld_loss_seq):
# embed()
# Get cummulative rewards corresponding to actions executed after selecting a particular Z. -# This is basically adding up the rewards from the end of the array.
# cumm_reward_to_go = torch.cumsum(torch.tensor(reward_traj[::-1]).cuda().float())[::-1]
cumm_reward_to_go_numpy = copy.deepcopy(np.cumsum(copy.deepcopy(reward_traj[::-1]))[::-1])
cumm_reward_to_go = torch.tensor(cumm_reward_to_go_numpy).cuda().float()
self.total_loss = 0.
if self.opts.variable_nseg:
# Remember, this stop probability loss is for stopping predicting Z's, #NOT INTERMEDIATE TIMESTEPS!
# So we still use cumm_reward_to_go rather than cumm_reward_to_go_array
self.stop_prob_reinforce_loss = self.sf_loss_fn.forward(cumm_reward_to_go, stop_prob_seq.unsqueeze(1), stop_seq.long())
# Add reinforce loss and loss value.
self.total_loss += self.opts.sf_loss_wt*self.stop_prob_reinforce_loss
# Now adding the reinforce loss associated with predicted Zs.
# (Remember, we want to maximize reward times log prob, so multiply by -1 to minimize.)
self.reinforce_predicted_Zs = (self.opts.reinforce_loss_wt * -1. * cumm_reward_to_go*log_prob_seq.view(-1)).sum()
self.total_loss += self.reinforce_predicted_Zs
# Add loss term with KL Divergence between 0 mean Gaussian and predicted Zs.
self.kld_loss_seq = kld_loss_seq
self.total_loss += self.opts.kld_loss_wt*self.kld_loss_seq[0]
# Zero gradients of optimizer, compute backward, then step optimizer.
self.z_policy_optimizer.zero_grad()
self.total_loss.sum().backward()
self.z_policy_optimizer.step()
def reorder_actions(self, actions):
# Assume that the actions are 16 dimensional, and are ordered as:
# 7 DoF for left arm, 7 DoF for right arm, 1 for left gripper, and 1 for right gripper.
# The original trajectory has gripper values from 0 (Close) to 1 (Open), but we've to rescale to -1 (Open) to 1 (Close) for Mujoco.
# And handle joint velocities.
# MIME Gripper values are from 0 to 100 (Close to Open), but we assume actions has values from 0 to 1 (Close to Open), and then rescale to (-1 Open to 1 Close) for Mujoco.
# Mujoco needs them flipped.
indices = np.array([ 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 15, 14])
reordered_actions = actions[:,indices]
reordered_actions[:,14:] = 1 - 2*reordered_actions[:,14:]
return reordered_actions
def run_episode(self, counter):
# For number of epochs:
# # 1) Given start and goal (for reaching task, say)
# # 2) Run Z_Policy on start and goal to retrieve predicted Zs.
# # 3) Decode predicted Zs into trajectory.
# # 4) Retrieve "actions" from trajectory.
# # 5) Feed "actions" into RL environment and collect reward.
# # 6) Train ZPolicy to maximize cummulative reward with favorite RL algorithm.
# Reset environment.
state = self.environment.reset()
terminal = False
reward_traj = None
state_traj_torch = None
t_out = 0
stop = False
hidden = None
latent_z_seq = None
stop_prob_seq = None
stop_seq = None
log_prob_seq = None
kld_loss_seq = 0.
previous_state = None
while terminal==False and stop==False:
########################################################
######## 1) Collect input for first timestep. ##########
########################################################
zpolicy_input = np.concatenate([state['robot-state'],state['object-state']]).reshape(1,self.zpolicy_input_size)
########################################################
# 2) Feed into the Z policy to retrieve the predicted Z.
########################################################
latent_z, stop_probability, stop, log_prob, kld_loss, hidden = self.z_policy.forward(zpolicy_input, hidden=hidden)
latent_z = latent_z.squeeze(1)
########################################################
############## 3) Decode into trajectory. ##############
########################################################
primitive_and_skill_stop_prob = self.evaluator.model.primitive_decoder(latent_z)
traj_seg = primitive_and_skill_stop_prob[0].squeeze(1).detach().cpu().numpy()
if previous_state is None:
previous_state = traj_seg[-1].reshape(1,self.opts.n_state)
else:
# Concatenate previous state to trajectory, so that when we take actions we get an action from previous segment to the current one.
traj_seg = np.concatenate([previous_state,traj_seg],axis=0)
previous_state = traj_seg[-1].reshape(-1,self.opts.n_state)
########################################################
## 4) Finite diff along time axis to retrieve actions ##
########################################################
actions = np.diff(traj_seg,axis=0)
actions = self.reorder_actions(actions)
actions_torch = torch.tensor(actions).cuda().float()
cummulative_reward_in_segment = 0.
# Run step into evironment for all actions in this segment.
t = 0
while t<actions_torch.shape[0] and terminal==False:
# Step.
state, onestep_reward, terminal, success = self.environment.step(actions[t])
# Collect onestep_rewards within this segment.
cummulative_reward_in_segment += float(onestep_reward)
# Assuming we have fixed_ns (i.e. novariable_ns), we can use the set decoding length of primitives to assign cummulative reward-to-go values to the various predicted Z variables.
# (This is also why we need the reward history, and not just the cummulative rewards obtained over the course of training.
t+=1
# Everything is going to be set to None, so set variables.
# Do some bookkeeping in life.
if t_out==0:
state_traj_torch = torch.tensor(zpolicy_input).cuda().float().view(-1,self.zpolicy_input_size)
latent_z_seq = latent_z.view(-1,self.opts.nz)
stop_seq = stop.clone().detach().view(-1,1)
stop_prob_seq = stop_probability.view(-1,2)
log_prob_seq = log_prob.view(-1,1)
# reward_traj = torch.tensor(copy.deepcopy(cummulative_reward_in_segment)).cuda().float().view(-1,1)
reward_traj = np.array(cummulative_reward_in_segment).reshape((1,1))
else:
state_traj_torch = torch.cat([state_traj_torch, torch.tensor(zpolicy_input).cuda().float().view(-1,self.zpolicy_input_size)],dim=0)
latent_z_seq = torch.cat([latent_z_seq, latent_z.view(-1,self.opts.nz)], dim=0)
stop_seq = torch.cat([stop_seq, stop.view(-1,1)], dim=0)
stop_prob_seq = torch.cat([stop_prob_seq, stop_probability.view(-1,2)], dim=0)
log_prob_seq = torch.cat([log_prob_seq, log_prob.view(-1,1)], dim=0)
# reward_traj = torch.cat([reward_traj.view(-1,1), torch.tensor(copy.deepcopy(cummulative_reward_in_segment)).cuda().float().view(-1,1)])
reward_traj = np.concatenate([reward_traj, np.array(cummulative_reward_in_segment).reshape((1,1))], axis=0)
# Either way:
kld_loss_seq += kld_loss
t_out += 1
# print(t_out)
# Set to false by default.
if self.opts.variable_nseg==False:
stop = False
if t_out>=self.maximum_skills:
stop = True
# if self.opts.debug==True:
# embed()
if self.opts.train:
# 6) Feed states, actions, reward, and predicted Zs to update. (These are all lists of tensors.)
# self.update_networks(state_traj_torch, action_torch, reward_traj, latent_zs)
self.update_networks(state_traj_torch, reward_traj, latent_z_seq, log_prob_seq, stop_prob_seq, stop_seq, kld_loss_seq)
self.update_plots(counter)
def setup_RL_environment(self, has_display=False):
# Create Mujoco environment.
self.environment = robosuite.make("BaxterLift", has_renderer=has_display)
self.initialize_plots()
def trainRL(self):
# Basic function to train.
counter = 0
for e in range(self.number_epochs):
# Number of episodes per epoch.
for i in range(self.number_episodes):
print("#########################################")
print("Epoch: ",e,"Traj: ",i)
# Run an episode.
self.run_episode(counter)
counter += 1
if self.opts.train and e%self.save_every_epoch==0:
self.save_zpolicy_model(os.path.join("saved_models/RL",self.opts.name), "epoch{0}".format(e))
def main(_):
# This is only to be executed for notebooks.
# flags.FLAGS([''])
opts = flags.FLAGS
# Set state space.
if opts.st_space == 'ee_r' or opts.st_space == 'ee_l':
opts.n_state = 7
if opts.st_space == 'joint_ra' or opts.st_space == 'joint_la':
opts.n_state = 7
if opts.st_space == 'joint_both':
opts.n_state = 14
elif opts.st_space == 'ee_all':
opts.n_state = 14
elif opts.st_space == 'joint':
opts.n_state = 17
elif opts.st_space =='joint_both_gripper':
opts.n_state = 16
opts.logging_dir = os.path.join(opts.logging_dir, 'mime')
opts.transformer = True
torch.manual_seed(0)
# Create instance of class.
zpolicy_trainer = ZPolicyTrainer(opts)
zpolicy_trainer.setup_networks()
zpolicy_trainer.setup_RL_environment()
# Still need this to load primitive decoder network.
zpolicy_trainer.load_network(opts.network_dir)
zpolicy_trainer.trainRL()
if __name__ == '__main__':
app.run(main)
| CausalSkillLearning-main | DownstreamRL/TrainZPolicyRL.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ..SkillNetwork.headers import *
from ..SkillNetwork.LSTMNetwork import LSTMNetwork, LSTMNetwork_Fixed
class PolicyNetwork(torch.nn.Module):
def __init__(self, opts, input_size, hidden_size, output_size, fixed=True):
super(PolicyNetwork, self).__init__()
self.opts = opts
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
if fixed:
self.lstmnet = LSTMNetwork_Fixed(input_size=input_size, hidden_size=hidden_size, output_size=output_size).cuda()
else:
self.lstmnet = LSTMNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size).cuda()
# Create linear layer to split prediction into mu and sigma.
self.mu_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.sig_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
# Stopping probability predictor. (Softmax, not sigmoid)
self.stopping_probability_layer = torch.nn.Linear(self.hidden_size, 2)
self.softmax_layer = torch.nn.Softmax(dim=-1)
def forward(self, input):
format_input = torch.tensor(input).view(1,1,self.input_size).cuda().float()
predicted_Z_preparam, stop_probabilities = self.lstmnet.forward(format_input)
predicted_Z_preparam = predicted_Z_preparam.squeeze(1)
self.latent_z_seq = []
self.latent_mu_seq = []
self.latent_log_sigma_seq = []
self.kld_loss = 0.
t = 0
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu_seq = self.mu_linear_layer(predicted_Z_preparam)
latent_z_log_sig_seq = self.sig_linear_layer(predicted_Z_preparam)
# Compute standard deviation.
std = torch.exp(0.5*latent_z_log_sig_seq).cuda()
# Sample random variable.
eps = torch.randn_like(std).cuda()
self.latent_z_seq = latent_z_mu_seq+eps*std
# Compute KL Divergence Loss term here, so we don't have to return mu's and sigma's.
self.kld_loss = torch.zeros(1)
for t in range(latent_z_mu_seq.shape[0]):
# Taken from mime_plan_skill.py Line 159 - KL Divergence for Gaussian prior and Gaussian prediction.
self.kld_loss += -0.5 * torch.sum(1. + latent_z_log_sig_seq[t] - latent_z_mu_seq[t].pow(2) - latent_z_log_sig_seq[t].exp())
# Create distributions so that we can evaluate log probability.
self.dists = [torch.distributions.MultivariateNormal(loc = latent_z_mu_seq[t], covariance_matrix = std[t]*torch.eye((self.opts.nz)).cuda()) for t in range(latent_z_mu_seq.shape[0])]
# Evaluate log probability in forward so we don't have to do it elswhere.
self.log_probs = [self.dists[i].log_prob(self.latent_z_seq[i]) for i in range(self.latent_z_seq.shape[0])]
return self.latent_z_seq, stop_probabilities
class PolicyNetworkSingleTimestep(torch.nn.Module):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, opts, input_size, hidden_size, output_size):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(PolicyNetworkSingleTimestep, self).__init__()
self.opts = opts
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 4
self.maximum_length = 15
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
# Create linear layer to split prediction into mu and sigma.
self.mu_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.sig_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
# Stopping probability predictor. (Softmax, not sigmoid)
self.stopping_probability_layer = torch.nn.Linear(self.hidden_size, 2)
self.softmax_layer = torch.nn.Softmax(dim=-1)
self.logsoftmax_layer = torch.nn.LogSoftmax(dim=-1)
def forward(self, input, hidden=None):
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = torch.tensor(input).view(input.shape[0],1,self.input_size).cuda().float()
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input, hidden)
# Predict parameters
latentz_preparam = self.output_layer(outputs[-1])
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu = self.mu_linear_layer(latentz_preparam)
latent_z_log_sig = self.sig_linear_layer(latentz_preparam)
# Predict stop probability.
preact_stop_probs = self.stopping_probability_layer(outputs[-1])
stop_probability = self.softmax_layer(preact_stop_probs)
stop = self.sample_action(stop_probability)
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu = self.mu_linear_layer(latentz_preparam)
latent_z_log_sig = self.sig_linear_layer(latentz_preparam)
# Compute standard deviation.
std = torch.exp(0.5*latent_z_log_sig).cuda()
# Sample random variable.
eps = torch.randn_like(std).cuda()
latent_z = latent_z_mu+eps*std
# Compute KL Divergence Loss term here, so we don't have to return mu's and sigma's.
# Taken from mime_plan_skill.py Line 159 - KL Divergence for Gaussian prior and Gaussian prediction.
kld_loss = -0.5 * torch.sum(1. + latent_z_log_sig - latent_z_mu.pow(2) - latent_z_log_sig.exp())
# Create distributions so that we can evaluate log probability.
dist = torch.distributions.MultivariateNormal(loc = latent_z_mu, covariance_matrix = std*torch.eye((self.opts.nz)).cuda())
# Evaluate log probability in forward so we don't have to do it elswhere.
log_prob = dist.log_prob(latent_z)
return latent_z, stop_probability, stop, log_prob, kld_loss, hidden
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample().squeeze(0)
return sample_action
class AltPolicyNetworkSingleTimestep(torch.nn.Module):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, opts, input_size, hidden_size, output_size):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(AltPolicyNetworkSingleTimestep, self).__init__()
self.opts = opts
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 4
self.maximum_length = 15
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
# Create linear layer to split prediction into mu and sigma.
self.mu_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.sig_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.softplus_activation_layer = torch.nn.Softplus()
# Stopping probability predictor. (Softmax, not sigmoid)
self.stopping_probability_layer = torch.nn.Linear(self.hidden_size, 2)
self.softmax_layer = torch.nn.Softmax(dim=-1)
self.logsoftmax_layer = torch.nn.LogSoftmax(dim=-1)
def forward(self, input, hidden=None):
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = torch.tensor(input).view(input.shape[0],1,self.input_size).cuda().float()
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input, hidden)
# Predict parameters
latentz_preparam = self.output_layer(outputs[-1])
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu = self.mu_linear_layer(latentz_preparam)
latent_z_log_sig = self.sig_linear_layer(latentz_preparam)
latent_z_sig = self.softplus_activation_layer(self.sig_linear_layer(latentz_preparam))
# Predict stop probability.
preact_stop_probs = self.stopping_probability_layer(outputs[-1])
stop_probability = self.softmax_layer(preact_stop_probs)
stop = self.sample_action(stop_probability)
# Create distributions so that we can evaluate log probability.
dist = torch.distributions.MultivariateNormal(loc = latent_z_mu, covariance_matrix = torch.diag_embed(latent_z_sig))
latent_z = dist.sample()
# Evaluate log probability in forward so we don't have to do it elswhere.
log_prob = dist.log_prob(latent_z)
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).cuda(),torch.eye((self.output_size)).cuda())
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(dist, standard_distribution)
return latent_z, stop_probability, stop, log_prob, kl_divergence, hidden
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample().squeeze(0)
return sample_action | CausalSkillLearning-main | DownstreamRL/PolicyNet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
# Select one of four starting points. (-2,-2), (-2,2), (2,-2), (2,2)
goal_array_dataset[i] = np.random.random_integers(0,high=3)
x_array_dataset[i,0] = start_states[goal_array_dataset[i]]
goal = -start_states[goal_array_dataset[i]]
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
axes = -goal/abs(goal)
step1 = 30*np.ones((2))-axes*np.abs(x_array_dataset[i,t]-x_array_dataset[i,0])
# baseline = t*20*np.sqrt(2)/20
baseline = t
step2 = step1-baseline
step3 = step2/step2.sum()
y_array_dataset[i,t] = np.random.choice(valid_options[goal_array_dataset[i][0]])
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
np.save("X_array_directed_continuous.npy",x_array_dataset)
np.save("Y_array_directed_continuous.npy",y_array_dataset)
np.save("B_array_directed_continuous.npy",b_array_dataset)
np.save("A_array_directed_continuous.npy",a_array_dataset) | CausalSkillLearning-main | DataGenerator/DirectedContinuousTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 20
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
y_array_dataset[i,t] = np.random.random_integers(0,high=3)
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
# embed()
np.save("X_array_continuous.npy",x_array_dataset)
np.save("Y_array_continuous.npy",y_array_dataset)
np.save("B_array_continuous.npy",b_array_dataset)
np.save("A_array_continuous.npy",a_array_dataset) | CausalSkillLearning-main | DataGenerator/ContinuousTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 20
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
x_array_dataset[i,0] = 5*(np.random.random((2))-0.5)
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
y_array_dataset[i,t] = np.random.random_integers(0,high=3)
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
# -0.05 is because the noise is from 0-0.1, so to balance this we make it -0.05
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
# embed()
np.save("X_array_continuous_nonzero.npy",x_array_dataset)
np.save("Y_array_continuous_nonzero.npy",y_array_dataset)
np.save("B_array_continuous_nonzero.npy",b_array_dataset)
np.save("A_array_continuous_nonzero.npy",a_array_dataset) | CausalSkillLearning-main | DataGenerator/ContinuousNonZero.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
#number_datapoints = 20
number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
lim = 25
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
# b_array_dataset[i,0] = 1.
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
x_array_dataset[i,-1] = goal_states[goal_array_dataset[i]] + 0.1*(np.random.random(2)-0.5)
goal = goal_states[goal_array_dataset[i]]
reset_counter = 0
# for t in range(number_timesteps-1):
for t in reversed(range(number_timesteps-1)):
# GET B # Must end on b==0.
if t<(number_timesteps-2):
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if t<3:
b_array_dataset[i,t] = 0
elif reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
elif t==(number_timesteps-2):
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t+1]
unnorm_directions = current_state-goal.squeeze(0)
directions = unnorm_directions/abs(unnorm_directions)
# Set valid options.
dot_product = np.dot(action_map, directions)
# valid_options = np.where(dot_product>=0)[0]
# Sincer we're going backwards in time,
valid_options = np.where(dot_product<=0)[0]
# Compare states. If x-g_x>y_g_y, choose to go along...
# embed()
# y_array_dataset[i,t] = np.random.choice(valid_options)
y_array_dataset[i,t] = valid_options[np.argmax(np.dot(action_map,unnorm_directions)[valid_options])]
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t+1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
# x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
x_array_dataset[i,t] = x_array_dataset[i,t+1]-a_array_dataset[i,t]
plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(25))
plt.xlim(-lim, lim)
plt.ylim(-lim, lim)
plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_deter_goal_directed.npy",x_array_dataset)
np.save("Y_deter_goal_directed.npy",y_array_dataset)
np.save("B_deter_goal_directed.npy",b_array_dataset)
np.save("A_deter_goal_directed.npy",a_array_dataset)
np.save("G_deter_goal_directed.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/DeterministicGoalDirectedTraj.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
number_datapoints = 1
# number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
# b_array_dataset[i,0] = 1.
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
x_array_dataset[i,-1] = goal_states[goal_array_dataset[i]] + 0.1*(np.random.random(2)-0.5)
goal = goal_states[goal_array_dataset[i]]
reset_counter = 0
# for t in range(number_timesteps-1):
for t in reversed(range(number_timesteps-1)):
# GET B # Must end on b==0.
if t<(number_timesteps-2):
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if t<3:
b_array_dataset[i,t] = 0
elif reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
elif t==(number_timesteps-2):
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t+1]
# directions = current_state-goal.squeeze(0)
directions = goal.squeeze(0)-current_state
norm_directions = directions/abs(directions)
# # Set valid options.
dot_product = np.dot(action_map, norm_directions)
# valid_options = np.where(dot_product>=0)[0]
# # Sincer we're going backwards in time,
valid_options = np.where(dot_product<=0)[0]
# # axes = -goal/abs(goal)
# # step1 = 30*np.ones((2))-axes*np.abs(x_array_dataset[i,t]-x_array_dataset[i,0])
# # # baseline = t*20*np.sqrt(2)/20
# # baseline = t
# # step2 = step1-baseline
# # step3 = step2/step2.sum()
# # y_array_dataset[i,t] = np.random.choice(valid_options[goal_array_dataset[i][0]])
# embed()
dot_product = np.dot(action_map,directions)
y_array_dataset[i,t] = np.argmax(dot_product)
# y_array_dataset[i,t] = np.random.choice(valid_options)
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t+1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
# x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
x_array_dataset[i,t] = x_array_dataset[i,t+1]-a_array_dataset[i,t]
plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(25))
plt.xlim(-25,25)
plt.ylim(-25,25)
plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_goal_directed.npy",x_array_dataset)
np.save("Y_goal_directed.npy",y_array_dataset)
np.save("B_goal_directed.npy",b_array_dataset)
np.save("A_goal_directed.npy",a_array_dataset)
np.save("G_goal_directed.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/GoalDirectedTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np, copy
from IPython import embed
import matplotlib.pyplot as plt
number_datapoints = 20
# number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
# action_map = np.array([[-1,0],[0,-1],[1,0],[0,1]])
# start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
# Creating a policy map.
size = 9
scale = 5
policy_map = np.zeros((size,size),dtype=int)
# Row wise assignment:
policy_map[0,:] = 2
policy_map[1,:7] = 2
policy_map[1,7:] = 1
policy_map[2:4,0] = 2
policy_map[2:4,1:4] = 3
policy_map[2:4,4:7] = 2
policy_map[2:4,7:] = 1
policy_map[4,:4] = 3
policy_map[4,4] = 3
policy_map[4,5:] = 1
policy_map[5,:3] = 3
policy_map[5,3:5] = 0
policy_map[5,5:] = 1
policy_map[6,:2] = 3
policy_map[6,2:7] = 0
policy_map[6,7:] = 1
policy_map[7:,0] = 3
policy_map[7:,1:7] = 0
policy_map[7:,7:] = 1
policy_map = np.transpose(policy_map)
# x = np.meshgrid(range(9),range(9))
x = np.meshgrid(np.arange(9),np.arange(9))
dxdy = action_map[policy_map[x[0],x[1]]]
traj = np.zeros((10,2))
traj[0] = [0,8]
for t in range(9):
# embed()
action_index = policy_map[int(traj[t,0]),int(traj[t,1])]
action = action_map[action_index]
traj[t+1] = traj[t] + action
print(action_index, action)
plt.ylim(9,-1)
plt.plot(traj[:,0],traj[:,1],'or')
plt.plot(traj[:,0],traj[:,1],'r')
plt.scatter(x[0],x[1])
for i in range(9):
for j in range(9):
plt.arrow(x[0][i,j],x[1][i,j],0.1*dxdy[i,j,0],0.1*dxdy[i,j,1],width=0.01)
plt.show()
# embed()
# Transformed vis.
size = 9
scale = 5
scaled_size = scale*size
# policy_map = np.flipud(np.transpose(policy_map))
policy_map = np.transpose(policy_map)
# goal_based_policy_maps = np.zeros((4,size,size),dtype=int)
# goal_based_policy_maps[0] = copy.deepcopy(policy_map)
# goal_based_policy_maps[1] = np.rot90(policy_map)
# goal_based_policy_maps[2] = np.rot90(policy_map,k=2)
# goal_based_policy_maps[3] = np.rot90(policy_map,k=3)
def get_bucket(state, reference_state):
# baseline = 4*np.ones(2)
baseline = np.zeros(2)
compensated_state = state - reference_state
# compensated_state = (np.round(state - reference_state) + baseline).astype(int)
scaled_size = scale*size
x = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
bucket = np.zeros((2))
bucket[0] = min(np.searchsorted(x,compensated_state[0]),size-1)
bucket[1] = min(np.searchsorted(x,compensated_state[1]),size-1)
return bucket.astype(int)
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*10
# goal_index = 1
# # meshrange = np.arange(-scaled_size/2,scaled_size/2+1,5)
# meshrange = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
# evalrange = (np.arange(-(size-1)/2,(size-1)/2+1)-1)*scale
# x = np.meshgrid(goal_states[goal_index,0]+meshrange,goal_states[goal_index,1]+meshrange)
# dxdy = np.zeros((9,9,2))
# # dxdy = action_map[policy_map[x[0],x[1]]]
# plt.scatter(x[0],x[1])
# plt.ylim(50,-50)
# arr = np.zeros((9,9,2))
# for i in range(9):
# for j in range(9):
# a = goal_states[goal_index,0]+evalrange[i]
# b = goal_states[goal_index,1]+evalrange[j]
# bucket = get_bucket(np.array([a,b]), goal_states[goal_index])
# arr[i,j,0] = i
# arr[i,j,1] = j
# dxdy[bucket[0],bucket[1]] = action_map[policy_map[bucket[0],bucket[1]]]
# plt.arrow(x[0][i,j],x[1][i,j],0.1*dxdy[i,j,0],0.1*dxdy[i,j,1],width=0.01*scale)
# plt.show()
for goal_index in range(4):
# embed()
# meshrange = np.arange(-scaled_size/2,scaled_size/2+1,5)
meshrange = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
evalrange = (np.arange(-(size-1)/2,(size-1)/2+1)-1)*scale
x = np.meshgrid(goal_states[goal_index,0]+meshrange,goal_states[goal_index,1]+meshrange)
dxdy = np.zeros((9,9,2))
# dxdy = action_map[policy_map[x[0],x[1]]]
plt.scatter(x[0],x[1])
plt.ylim(50,-50)
plt.xlim(-50,50)
arr = np.zeros((9,9,2))
for i in range(9):
for j in range(9):
a = goal_states[goal_index,0]+evalrange[i]
b = goal_states[goal_index,1]+evalrange[j]
bucket = get_bucket(np.array([a,b]), goal_states[goal_index])
arr[i,j,0] = i
arr[i,j,1] = j
# dxdy[bucket[0],bucket[1]] = action_map[goal_based_policy_maps[goal_index,bucket[0],bucket[1]]]
dxdy[bucket[0],bucket[1]] = action_map[policy_map[bucket[0],bucket[1]]]
# plt.arrow(x[0][i,j],x[1][i,j],0.1*dxdy[i,j,0],0.1*dxdy[i,j,1],width=0.01*scale)
# plt.quiver(x[0],x[1],0.1*dxdy[:,:,1],0.1*dxdy[:,:,0],width=0.0001,headwidth=4,headlength=2)
plt.quiver(x[0],x[1],0.1*dxdy[:,:,1],0.1*dxdy[:,:,0])
traj_len = 20
traj = np.zeros((20,2))
traj[0] = np.random.randint(-25,high=25,size=2)
for t in range(traj_len-1):
bucket = get_bucket(traj[t], goal_states[goal_index])
action_index = policy_map[bucket[0],bucket[1]]
action = action_map[action_index]
traj[t+1] = traj[t] + action
plt.plot(traj[:,0],traj[:,1],'r')
plt.plot(traj[:,0],traj[:,1],'or')
plt.show()
| CausalSkillLearning-main | DataGenerator/PolicyVisualizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
# Select one of four starting points. (-2,-2), (-2,2), (2,-2), (2,2)
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
x_array_dataset[i,0] = start_states[goal_array_dataset[i]] + 0.2*(np.random.random(2)-0.5)
goal = -start_states[goal_array_dataset[i]]
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
axes = -goal/abs(goal)
step1 = 30*np.ones((2))-axes*np.abs(x_array_dataset[i,t]-x_array_dataset[i,0])
# baseline = t*20*np.sqrt(2)/20
baseline = t
step2 = step1-baseline
step3 = step2/step2.sum()
y_array_dataset[i,t] = np.random.choice(valid_options[goal_array_dataset[i][0]])
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
np.save("X_dir_cont_nonzero.npy",x_array_dataset)
np.save("Y_dir_cont_nonzero.npy",y_array_dataset)
np.save("B_dir_cont_nonzero.npy",b_array_dataset)
np.save("A_dir_cont_nonzero.npy",a_array_dataset)
np.save("G_dir_cont_nonzero.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/DirectedContinuousNonZero.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np, copy
from IPython import embed
import matplotlib.pyplot as plt
number_datapoints = 20
# number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
# start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*10
# Creating a policy map.
lim = 50
size = 9
scale = 5
policy_map = np.zeros((size,size),dtype=int)
# Row wise assignment:
policy_map[0,:] = 2
policy_map[1,:7] = 2
policy_map[1,7:] = 1
policy_map[2:4,0] = 2
policy_map[2:4,1:4] = 3
policy_map[2:4,4:7] = 2
policy_map[2:4,7:] = 1
policy_map[4,:4] = 3
policy_map[4,4] = 3
policy_map[4,5:] = 1
policy_map[5,:3] = 3
policy_map[5,3:5] = 0
policy_map[5,5:] = 1
policy_map[6,:2] = 3
policy_map[6,2:7] = 0
policy_map[6,7:] = 1
policy_map[7:,0] = 3
policy_map[7:,1:7] = 0
policy_map[7:,7:] = 1
# policy_map = np.transpose(policy_map)
goal_based_policy_maps = np.zeros((4,size,size))
goal_based_policy_maps[0] = copy.deepcopy(policy_map)
goal_based_policy_maps[1] = np.flipud(policy_map)
goal_based_policy_maps[2] = np.fliplr(policy_map)
goal_based_policy_maps[3] = np.flipud(np.fliplr(policy_map))
def get_bucket(state, reference_state):
# baseline = 4*np.ones(2)
baseline = np.zeros(2)
compensated_state = state - reference_state
# compensated_state = (np.round(state - reference_state) + baseline).astype(int)
x = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
bucket = np.zeros((2))
bucket[0] = min(np.searchsorted(x,compensated_state[0]),size-1)
bucket[1] = min(np.searchsorted(x,compensated_state[1]),size-1)
return bucket.astype(int)
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
# b_array_dataset[i,0] = 1.
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
# x_array_dataset[i,0] = goal_states[goal_array_dataset[i]] + 0.1*(np.random.random(2)-0.5)
scale = 25
x_array_dataset[i,0] = goal_states[goal_array_dataset[i]] + scale*(np.random.random(2)-0.5)
goal = goal_states[goal_array_dataset[i]]
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t]
# Select options from policy map, based on the bucket the current state falls in.
bucket = get_bucket(current_state, goal_states[goal_array_dataset[i]][0])
# Now that we've the bucket, pick the option we should be executing given the bucket.
if (bucket==0).all():
y_array_dataset[i,t] = np.random.randint(0,high=4)
else:
y_array_dataset[i,t] = goal_based_policy_maps[goal_array_dataset[i], bucket[0], bucket[1]]
y_array_dataset[i,t] = policy_map[bucket[0], bucket[1]]
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.1*(np.random.random((2))-0.5)
# GET X
# Already taking care of backwards generation here, no need to use action_compliments.
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
# plt.scatter()
plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(25))
plt.xlim(-lim,lim)
plt.ylim(-lim,lim)
plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_goal_directed.npy",x_array_dataset)
np.save("Y_goal_directed.npy",y_array_dataset)
np.save("B_goal_directed.npy",b_array_dataset)
np.save("A_goal_directed.npy",a_array_dataset)
np.save("G_goal_directed.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/NewGoalDirectedTraj.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
# number_datapoints = 20
number_datapoints = 50000
number_timesteps = 20
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
start_config_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_scale = 15
start_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*start_scale
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
scale = 5
start_configs = np.zeros((4,5,2),dtype=int)
start_configs[[0,3]] = np.array([[-2,2],[-1,1],[0,0],[1,-1],[2,-2]])*scale
start_configs[[1,2]] = np.array([[-2,-2],[-1,-1],[0,0],[1,1],[2,2]])*scale
# valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
valid_options = np.array([[3,2],[3,0],[2,1],[0,1]])
lim = 50
progression_of_options = np.zeros((5,4),dtype=int)
progression_of_options[1,0] = 1
progression_of_options[2,:2] = 1
progression_of_options[3,1:] = 1
progression_of_options[4,:] = 1
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
goal_array_dataset[i] = np.random.random_integers(0,high=3)
start_config_dataset[i] = np.random.random_integers(0,high=4)
# start_config_dataset[i] = 4
# Adding random noise to start state.
x_array_dataset[i,0] = start_states[goal_array_dataset[i]] + start_configs[goal_array_dataset[i],start_config_dataset[i]] + 0.1*(np.random.random(2)-0.5)
reset_counter = 0
option_counter = 0
for t in range(number_timesteps-1):
# GET B
if t==0:
b_array_dataset[i,t] = 1
if t>0:
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t]
# select new y_array_dataset[i,t]
y_array_dataset[i,t] = valid_options[goal_array_dataset[i]][0][progression_of_options[start_config_dataset[i],min(option_counter,3)]]
option_counter+=1
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]+0.1*(np.random.random((2))-0.5)
# GET X
# Already taking care of backwards generation here, no need to use action_compliments.
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
# plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
# # plt.scatter()
# plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(number_timesteps))
# plt.xlim(-lim,lim)
# plt.ylim(-lim,lim)
# plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_separable.npy",x_array_dataset)
np.save("Y_separable.npy",y_array_dataset)
np.save("B_separable.npy",b_array_dataset)
np.save("A_separable.npy",a_array_dataset)
np.save("G_separable.npy",goal_array_dataset)
np.save("StartConfig_separable.npy",start_config_dataset)
| CausalSkillLearning-main | DataGenerator/SeparableTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import numpy
extensions = [
Extension(
"cpc.eval.ABX.dtw",
["cpc/eval/ABX/dtw.pyx"],
include_dirs=[numpy.get_include()],
),
]
setup(
name='CPC_audio',
version='1.0',
description='An implementation of the contrast predictive coding (CPC) '
'training method for audio data.',
author='Facebook AI Research',
packages=find_packages(),
classifiers=["License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Programming Language :: Python"],
ext_modules=cythonize(extensions, language_level="3")
)
| CPC_audio-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
from cpc.model import CPCModel as cpcmodel
from cpc.cpc_default_config import get_default_cpc_config
from cpc.feature_loader import getEncoder, getAR, loadArgs
dependencies = ['torch', 'torchaudio']
def CPC_audio(pretrained=False,
**kwargs):
"""
Contrast predictive learning model for audio data
pretrained: if True, load a model trained on libri-light 60k
(https://arxiv.org/abs/1912.07875)
**kwargs : see cpc/cpc_default_config to get the list of possible arguments
"""
locArgs = get_default_cpc_config()
if pretrained:
checkpoint_url = 'https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/60k_epoch4-d0f474de.pt'
checkpoint = torch.hub.load_state_dict_from_url(checkpoint_url,
progress=False)
loadArgs(locArgs, argparse.Namespace(**checkpoint["config"]))
else:
args = argparse.Namespace(**kwargs)
loadArgs(locArgs, args)
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
model = cpcmodel(encoderNet, arNet)
if pretrained:
model.load_state_dict(checkpoint["weights"], strict=False)
return model
| CPC_audio-main | hubconf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchaudio
import os
import json
import argparse
from .cpc_default_config import get_default_cpc_config
from .dataset import parseSeqLabels
from .model import CPCModel, ConcatenatedModel
class FeatureModule(torch.nn.Module):
r"""
A simpler interface to handle CPC models. Useful for a smooth workflow when
working with CPC trained features.
"""
def __init__(self, featureMaker, get_encoded, collapse=False):
super(FeatureModule, self).__init__()
self.get_encoded = get_encoded
self.featureMaker = featureMaker
self.collapse = collapse
def getDownsamplingFactor(self):
return self.featureMaker.gEncoder.DOWNSAMPLING
def forward(self, data):
batchAudio, label = data
cFeature, encoded, _ = self.featureMaker(batchAudio.cuda(), label)
if self.get_encoded:
cFeature = encoded
if self.collapse:
cFeature = cFeature.contiguous().view(-1, cFeature.size(2))
return cFeature
class ModelPhoneCombined(torch.nn.Module):
r"""
Concatenates a CPC feature maker and a phone predictor.
"""
def __init__(self, model, criterion, oneHot):
r"""
Arguments:
model (FeatureModule): feature maker
criterion (PhoneCriterion): phone predictor
oneHot (bool): set to True to get a one hot output
"""
super(ModelPhoneCombined, self).__init__()
self.model = model
self.criterion = criterion
self.oneHot = oneHot
def getDownsamplingFactor(self):
return self.model.getDownsamplingFactor()
def forward(self, data):
c_feature = self.model(data)
pred = self.criterion.getPrediction(c_feature)
P = pred.size(2)
if self.oneHot:
pred = pred.argmax(dim=2)
pred = toOneHot(pred, P)
else:
pred = torch.nn.functional.softmax(pred, dim=2)
return pred
def loadArgs(args, locArgs, forbiddenAttr=None):
for k, v in vars(locArgs).items():
if forbiddenAttr is not None:
if k not in forbiddenAttr:
setattr(args, k, v)
else:
setattr(args, k, v)
def loadSupervisedCriterion(pathCheckpoint):
from .criterion import CTCPhoneCriterion, PhoneCriterion
*_, args = getCheckpointData(os.path.dirname(pathCheckpoint))
_, nPhones = parseSeqLabels(args.pathPhone)
if args.CTC:
criterion = CTCPhoneCriterion(args.hiddenGar if not args.onEncoder
else args.hiddenEncoder,
nPhones, args.onEncoder)
else:
criterion = PhoneCriterion(args.hiddenGar, nPhones, args.onEncoder)
state_dict = torch.load(pathCheckpoint)
criterion.load_state_dict(state_dict["cpcCriterion"])
return criterion, nPhones
def getCheckpointData(pathDir):
if not os.path.isdir(pathDir):
return None
checkpoints = [x for x in os.listdir(pathDir)
if os.path.splitext(x)[1] == '.pt'
and os.path.splitext(x[11:])[0].isdigit()]
if len(checkpoints) == 0:
print("No checkpoints found at " + pathDir)
return None
checkpoints.sort(key=lambda x: int(os.path.splitext(x[11:])[0]))
data = os.path.join(pathDir, checkpoints[-1])
with open(os.path.join(pathDir, 'checkpoint_logs.json'), 'rb') as file:
logs = json.load(file)
with open(os.path.join(pathDir, 'checkpoint_args.json'), 'rb') as file:
args = json.load(file)
args = argparse.Namespace(**args)
defaultArgs = get_default_cpc_config()
loadArgs(defaultArgs, args)
return os.path.abspath(data), logs, defaultArgs
def getEncoder(args):
if args.encoder_type == 'mfcc':
from .model import MFCCEncoder
return MFCCEncoder(args.hiddenEncoder)
elif args.encoder_type == 'lfb':
from .model import LFBEnconder
return LFBEnconder(args.hiddenEncoder)
else:
from .model import CPCEncoder
return CPCEncoder(args.hiddenEncoder, args.normMode)
def getAR(args):
if args.arMode == 'transformer':
from .transformers import buildTransformerAR
arNet = buildTransformerAR(args.hiddenEncoder, 1,
args.sizeWindow // 160, args.abspos)
args.hiddenGar = args.hiddenEncoder
elif args.arMode == 'no_ar':
from .model import NoAr
arNet = NoAr()
else:
from .model import CPCAR
arNet = CPCAR(args.hiddenEncoder, args.hiddenGar,
args.samplingType == "sequential",
args.nLevelsGRU,
mode=args.arMode,
reverse=args.cpc_mode == "reverse")
return arNet
def loadModel(pathCheckpoints, loadStateDict=True):
models = []
hiddenGar, hiddenEncoder = 0, 0
for path in pathCheckpoints:
print(f"Loading checkpoint {path}")
_, _, locArgs = getCheckpointData(os.path.dirname(path))
doLoad = locArgs.load is not None and \
(len(locArgs.load) > 1 or
os.path.dirname(locArgs.load[0]) != os.path.dirname(path))
if doLoad:
m_, hg, he = loadModel(locArgs.load, loadStateDict=False)
hiddenGar += hg
hiddenEncoder += he
else:
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
m_ = CPCModel(encoderNet, arNet)
if loadStateDict:
print(f"Loading the state dict at {path}")
state_dict = torch.load(path, 'cpu')
m_.load_state_dict(state_dict["gEncoder"], strict=False)
if not doLoad:
hiddenGar += locArgs.hiddenGar
hiddenEncoder += locArgs.hiddenEncoder
models.append(m_)
if len(models) == 1:
return models[0], hiddenGar, hiddenEncoder
return ConcatenatedModel(models), hiddenGar, hiddenEncoder
def get_module(i_module):
if isinstance(i_module, torch.nn.DataParallel):
return get_module(i_module.module)
if isinstance(i_module, FeatureModule):
return get_module(i_module.module)
return i_module
def save_checkpoint(model_state, criterion_state, optimizer_state, best_state,
path_checkpoint):
state_dict = {"gEncoder": model_state,
"cpcCriterion": criterion_state,
"optimizer": optimizer_state,
"best": best_state}
torch.save(state_dict, path_checkpoint)
def toOneHot(inputVector, nItems):
batchSize, seqSize = inputVector.size()
out = torch.zeros((batchSize, seqSize, nItems),
device=inputVector.device, dtype=torch.long)
out.scatter_(2, inputVector.view(batchSize, seqSize, 1), 1)
return out
def seqNormalization(out):
# out.size() = Batch x Seq x Channels
mean = out.mean(dim=1, keepdim=True)
var = out.var(dim=1, keepdim=True)
return (out - mean) / torch.sqrt(var + 1e-08)
def buildFeature(featureMaker, seqPath, strict=False,
maxSizeSeq=64000, seqNorm=False):
r"""
Apply the featureMaker to the given file.
Arguments:
- featureMaker (FeatureModule): model to apply
- seqPath (string): path of the sequence to load
- strict (bool): if True, always work with chunks of the size
maxSizeSeq
- maxSizeSeq (int): maximal size of a chunk
- seqNorm (bool): if True, normalize the output along the time
dimension to get chunks of mean zero and var 1
Return:
a torch vector of size 1 x Seq_size x Feature_dim
"""
seq = torchaudio.load(seqPath)[0]
sizeSeq = seq.size(1)
start = 0
out = []
while start < sizeSeq:
if strict and start + maxSizeSeq > sizeSeq:
break
end = min(sizeSeq, start + maxSizeSeq)
subseq = (seq[:, start:end]).view(1, 1, -1).cuda(device=0)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
out.append(features.detach().cpu())
start += maxSizeSeq
if strict and start < sizeSeq:
subseq = (seq[:, -maxSizeSeq:]).view(1, 1, -1).cuda(device=0)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
delta = (sizeSeq - start) // featureMaker.getDownsamplingFactor()
out.append(features[:, -delta:].detach().cpu())
out = torch.cat(out, dim=1)
return out
| CPC_audio-main | cpc/feature_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import math
class ScaledDotProductAttention(nn.Module):
def __init__(self,
sizeSeq, # Size of the input sequence
dk, # Dimension of the input sequence
dropout, # Dropout parameter
relpos=False): # Do we retrieve positional information ?
super(ScaledDotProductAttention, self).__init__()
self.drop = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2)
self.relpos = relpos
self.sizeSeq = sizeSeq
if relpos:
self.Krelpos = nn.Parameter(torch.Tensor(dk, sizeSeq))
self.initmat_(self.Krelpos)
self.register_buffer('z', torch.zeros(1, sizeSeq, 1))
# A mask is set so that a node never queries data in the future
mask = torch.tril(torch.ones(sizeSeq, sizeSeq), diagonal=0)
mask = 1 - mask
mask[mask == 1] = -float('inf')
self.register_buffer('mask', mask.unsqueeze(0))
def initmat_(self, mat, dim=0):
stdv = 1. / math.sqrt(mat.size(dim))
mat.data.uniform_(-stdv, stdv)
def forward(self, Q, K, V):
# Input dim : N x sizeSeq x dk
QK = torch.bmm(Q, K.transpose(-2, -1))
if self.relpos:
bsz = Q.size(0)
QP = Q.matmul(self.Krelpos)
# This trick with z fills QP's diagonal with zeros
QP = torch.cat((self.z.expand(bsz, -1, -1), QP), 2)
QK += QP.view(bsz, self.sizeSeq + 1, self.sizeSeq)[:, 1:, :]
A = self.softmax(QK / math.sqrt(K.size(-1)) + self.mask)
return torch.bmm(self.drop(A), V)
class MultiHeadAttention(nn.Module):
def __init__(self,
sizeSeq, # Size of a sequence
dropout, # Dropout parameter
dmodel, # Model's dimension
nheads, # Number of heads in the model
abspos): # Is positional information encoded in the input ?
super(MultiHeadAttention, self).__init__()
self.Wo = nn.Linear(dmodel, dmodel, bias=False)
self.Wk = nn.Linear(dmodel, dmodel, bias=False)
self.Wq = nn.Linear(dmodel, dmodel, bias=False)
self.Wv = nn.Linear(dmodel, dmodel, bias=False)
self.nheads = nheads
self.dk = dmodel // nheads
self.Att = ScaledDotProductAttention(sizeSeq, self.dk,
dropout, not abspos)
def trans_(self, x):
bsz, bptt, h, dk = x.size(0), x.size(1), self.nheads, self.dk
return x.view(bsz, bptt, h, dk).transpose(1, 2).contiguous().view(bsz * h, bptt, dk)
def reverse_trans_(self, x):
bsz, bptt, h, dk = x.size(
0) // self.nheads, x.size(1), self.nheads, self.dk
return x.view(bsz, h, bptt, dk).transpose(1, 2).contiguous().view(bsz, bptt, h * dk)
def forward(self, Q, K, V):
q = self.trans_(self.Wq(Q))
k = self.trans_(self.Wk(K))
v = self.trans_(self.Wv(V))
y = self.reverse_trans_(self.Att(q, k, v))
return self.Wo(y)
class FFNetwork(nn.Module):
def __init__(self, din, dout, dff, dropout):
super(FFNetwork, self).__init__()
self.lin1 = nn.Linear(din, dff, bias=True)
self.lin2 = nn.Linear(dff, dout, bias=True)
self.relu = nn.ReLU()
self.drop = nn.Dropout(dropout)
def forward(self, x):
return self.lin2(self.drop(self.relu(self.lin1(x))))
class TransformerLayer(nn.Module):
def __init__(self, sizeSeq=32, dmodel=512, dff=2048,
dropout=0.1, nheads=8,
abspos=False):
super(TransformerLayer, self).__init__()
self.multihead = MultiHeadAttention(sizeSeq, dropout,
dmodel, nheads, abspos)
self.ln_multihead = nn.LayerNorm(dmodel)
self.ffnetwork = FFNetwork(dmodel, dmodel, dff, dropout)
self.ln_ffnetwork = nn.LayerNorm(dmodel)
def forward(self, x):
y = self.ln_multihead(x + self.multihead(Q=x, K=x, V=x))
return self.ln_ffnetwork(y + self.ffnetwork(y))
class StaticPositionEmbedding(nn.Module):
def __init__(self, seqlen, dmodel):
super(StaticPositionEmbedding, self).__init__()
pos = torch.arange(0., seqlen).unsqueeze(1).repeat(1, dmodel)
dim = torch.arange(0., dmodel).unsqueeze(0).repeat(seqlen, 1)
div = torch.exp(- math.log(10000) * (2*(dim//2)/dmodel))
pos *= div
pos[:, 0::2] = torch.sin(pos[:, 0::2])
pos[:, 1::2] = torch.cos(pos[:, 1::2])
self.register_buffer('pe', pos.unsqueeze(0))
def forward(self, x):
return x + self.pe[:, :x.size(1), :]
def buildTransformerAR(dimEncoded, # Output dimension of the encoder
nLayers, # Number of transformer layers
sizeSeq, # Expected size of the input sequence
abspos):
layerSequence = []
if abspos:
layerSequence += [StaticPositionEmbedding(sizeSeq, dimEncoded)]
layerSequence += [TransformerLayer(sizeSeq=sizeSeq,
dmodel=dimEncoded, abspos=abspos)
for i in range(nLayers)]
return nn.Sequential(*layerSequence)
| CPC_audio-main | cpc/transformers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
import torch
###########################################
# Networks
###########################################
class IDModule(nn.Module):
def __init__(self, *args, **kwargs):
super(IDModule, self).__init__()
def forward(self, x):
return x
class ChannelNorm(nn.Module):
def __init__(self,
numFeatures,
epsilon=1e-05,
affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cumMean = x.mean(dim=1, keepdim=True)
cumVar = x.var(dim=1, keepdim=True)
x = (x - cumMean)*torch.rsqrt(cumVar + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
class CPCEncoder(nn.Module):
def __init__(self,
sizeHidden=512,
normMode="layerNorm"):
super(CPCEncoder, self).__init__()
validModes = ["batchNorm", "instanceNorm", "ID", "layerNorm"]
if normMode not in validModes:
raise ValueError(f"Norm mode must be in {validModes}")
if normMode == "instanceNorm":
def normLayer(x): return nn.InstanceNorm1d(x, affine=True)
elif normMode == "ID":
normLayer = IDModule
elif normMode == "layerNorm":
normLayer = ChannelNorm
else:
normLayer = nn.BatchNorm1d
self.dimEncoded = sizeHidden
self.conv0 = nn.Conv1d(1, sizeHidden, 10, stride=5, padding=3)
self.batchNorm0 = normLayer(sizeHidden)
self.conv1 = nn.Conv1d(sizeHidden, sizeHidden, 8, stride=4, padding=2)
self.batchNorm1 = normLayer(sizeHidden)
self.conv2 = nn.Conv1d(sizeHidden, sizeHidden, 4,
stride=2, padding=1)
self.batchNorm2 = normLayer(sizeHidden)
self.conv3 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm3 = normLayer(sizeHidden)
self.conv4 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm4 = normLayer(sizeHidden)
self.DOWNSAMPLING = 160
def getDimOutput(self):
return self.conv4.out_channels
def forward(self, x):
x = F.relu(self.batchNorm0(self.conv0(x)))
x = F.relu(self.batchNorm1(self.conv1(x)))
x = F.relu(self.batchNorm2(self.conv2(x)))
x = F.relu(self.batchNorm3(self.conv3(x)))
x = F.relu(self.batchNorm4(self.conv4(x)))
return x
class MFCCEncoder(nn.Module):
def __init__(self,
dimEncoded):
super(MFCCEncoder, self).__init__()
melkwargs = {"n_mels": max(128, dimEncoded), "n_fft": 321}
self.dimEncoded = dimEncoded
self.MFCC = torchaudio.transforms.MFCC(n_mfcc=dimEncoded,
melkwargs=melkwargs)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.MFCC(x)
return x.permute(0, 2, 1)
class LFBEnconder(nn.Module):
def __init__(self, dimEncoded, normalize=True):
super(LFBEnconder, self).__init__()
self.dimEncoded = dimEncoded
self.conv = nn.Conv1d(1, 2 * dimEncoded,
400, stride=1)
self.register_buffer('han', torch.hann_window(400).view(1, 1, 400))
self.instancenorm = nn.InstanceNorm1d(dimEncoded, momentum=1) \
if normalize else None
def forward(self, x):
N, C, L = x.size()
x = self.conv(x)
x = x.view(N, self.dimEncoded, 2, -1)
x = x[:, :, 0, :]**2 + x[:, :, 1, :]**2
x = x.view(N * self.dimEncoded, 1, -1)
x = torch.nn.functional.conv1d(x, self.han, bias=None,
stride=160, padding=350)
x = x.view(N, self.dimEncoded, -1)
x = torch.log(1 + torch.abs(x))
# Normalization
if self.instancenorm is not None:
x = self.instancenorm(x)
return x
class CPCAR(nn.Module):
def __init__(self,
dimEncoded,
dimOutput,
keepHidden,
nLevelsGRU,
mode="GRU",
reverse=False):
super(CPCAR, self).__init__()
self.RESIDUAL_STD = 0.1
if mode == "LSTM":
self.baseNet = nn.LSTM(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
elif mode == "RNN":
self.baseNet = nn.RNN(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
else:
self.baseNet = nn.GRU(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
self.hidden = None
self.keepHidden = keepHidden
self.reverse = reverse
def getDimOutput(self):
return self.baseNet.hidden_size
def forward(self, x):
if self.reverse:
x = torch.flip(x, [1])
try:
self.baseNet.flatten_parameters()
except RuntimeError:
pass
x, h = self.baseNet(x, self.hidden)
if self.keepHidden:
if isinstance(h, tuple):
self.hidden = tuple(x.detach() for x in h)
else:
self.hidden = h.detach()
# For better modularity, a sequence's order should be preserved
# by each module
if self.reverse:
x = torch.flip(x, [1])
return x
class NoAr(nn.Module):
def __init__(self, *args):
super(NoAr, self).__init__()
def forward(self, x):
return x
class BiDIRARTangled(nn.Module):
r"""
Research: bidirectionnal model for BERT training.
"""
def __init__(self,
dimEncoded,
dimOutput,
nLevelsGRU):
super(BiDIRARTangled, self).__init__()
assert(dimOutput % 2 == 0)
self.ARNet = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True,
bidirectional=True)
def getDimOutput(self):
return self.ARNet.hidden_size * 2
def forward(self, x):
self.ARNet.flatten_parameters()
xf, _ = self.ARNet(x)
return xf
class BiDIRAR(nn.Module):
r"""
Research: bidirectionnal model for BERT training.
"""
def __init__(self,
dimEncoded,
dimOutput,
nLevelsGRU):
super(BiDIRAR, self).__init__()
assert(dimOutput % 2 == 0)
self.netForward = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True)
self.netBackward = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True)
def getDimOutput(self):
return self.netForward.hidden_size * 2
def forward(self, x):
self.netForward.flatten_parameters()
self.netBackward.flatten_parameters()
xf, _ = self.netForward(x)
xb, _ = self.netBackward(torch.flip(x, [1]))
return torch.cat([xf, torch.flip(xb, [1])], dim=2)
###########################################
# Model
###########################################
class CPCModel(nn.Module):
def __init__(self,
encoder,
AR):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = AR
def forward(self, batchData, label):
encodedData = self.gEncoder(batchData).permute(0, 2, 1)
cFeature = self.gAR(encodedData)
return cFeature, encodedData, label
class ConcatenatedModel(nn.Module):
def __init__(self, model_list):
super(ConcatenatedModel, self).__init__()
self.models = torch.nn.ModuleList(model_list)
def forward(self, batchData, label):
outFeatures = []
outEncoded = []
for model in self.models:
cFeature, encodedData, label = model(batchData, label)
outFeatures.append(cFeature)
outEncoded.append(encodedData)
return torch.cat(outFeatures, dim=2), \
torch.cat(outEncoded, dim=2), label
| CPC_audio-main | cpc/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import time
import tqdm
import torch
import soundfile as sf
from pathlib import Path
from copy import deepcopy
from torch.multiprocessing import Pool
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler, BatchSampler
import torchaudio
class AudioBatchData(Dataset):
def __init__(self,
path,
sizeWindow,
seqNames,
phoneLabelsDict,
nSpeakers,
nProcessLoader=50,
MAX_SIZE_LOADED=4000000000):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabelsDict (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
- nSpeakers (int): number of speakers to expect.
- nProcessLoader (int): number of processes to call when loading the
data from the disk
- MAX_SIZE_LOADED (int): target maximal size of the floating array
containing all loaded data.
"""
self.MAX_SIZE_LOADED = MAX_SIZE_LOADED
self.nProcessLoader = nProcessLoader
self.dbPath = Path(path)
self.sizeWindow = sizeWindow
self.seqNames = [(s, self.dbPath / x) for s, x in seqNames]
self.reload_pool = Pool(nProcessLoader)
self.prepare()
self.speakers = list(range(nSpeakers))
self.data = []
self.phoneSize = 0 if phoneLabelsDict is None else \
phoneLabelsDict["step"]
self.phoneStep = 0 if phoneLabelsDict is None else \
self.sizeWindow // self.phoneSize
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.loadNextPack(first=True)
self.loadNextPack()
self.doubleLabels = False
def resetPhoneLabels(self, newPhoneLabels, step):
self.phoneSize = step
self.phoneStep = self.sizeWindow // self.phoneSize
self.phoneLabelsDict = deepcopy(newPhoneLabels)
self.loadNextPack()
def splitSeqTags(seqName):
path = os.path.normpath(seqName)
return path.split(os.sep)
def getSeqNames(self):
return [str(x[1]) for x in self.seqNames]
def clear(self):
if 'data' in self.__dict__:
del self.data
if 'speakerLabel' in self.__dict__:
del self.speakerLabel
if 'phoneLabels' in self.__dict__:
del self.phoneLabels
if 'seqLabel' in self.__dict__:
del self.seqLabel
def prepare(self):
random.shuffle(self.seqNames)
start_time = time.time()
print("Checking length...")
allLength = self.reload_pool.map(extractLength, self.seqNames)
self.packageIndex, self.totSize = [], 0
start, packageSize = 0, 0
for index, length in tqdm.tqdm(enumerate(allLength)):
packageSize += length
if packageSize > self.MAX_SIZE_LOADED:
self.packageIndex.append([start, index])
self.totSize += packageSize
start, packageSize = index, 0
if packageSize > 0:
self.packageIndex.append([start, len(self.seqNames)])
self.totSize += packageSize
print(f"Done, elapsed: {time.time() - start_time:.3f} seconds")
print(f'Scanned {len(self.seqNames)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f"{len(self.packageIndex)} chunks computed")
self.currentPack = -1
self.nextPack = 0
def getNPacks(self):
return len(self.packageIndex)
def loadNextPack(self, first=False):
self.clear()
if not first:
self.currentPack = self.nextPack
start_time = time.time()
print('Joining pool')
self.r.wait()
print(f'Joined process, elapsed={time.time()-start_time:.3f} secs')
self.nextData = self.r.get()
self.parseNextDataBlock()
del self.nextData
self.nextPack = (self.currentPack + 1) % len(self.packageIndex)
seqStart, seqEnd = self.packageIndex[self.nextPack]
if self.nextPack == 0 and len(self.packageIndex) > 1:
self.prepare()
self.r = self.reload_pool.map_async(loadFile,
self.seqNames[seqStart:seqEnd])
def parseNextDataBlock(self):
# Labels
self.speakerLabel = [0]
self.seqLabel = [0]
self.phoneLabels = []
speakerSize = 0
indexSpeaker = 0
# To accelerate the process a bit
self.nextData.sort(key=lambda x: (x[0], x[1]))
tmpData = []
for speaker, seqName, seq in self.nextData:
while self.speakers[indexSpeaker] < speaker:
indexSpeaker += 1
self.speakerLabel.append(speakerSize)
if self.speakers[indexSpeaker] != speaker:
raise ValueError(f'{speaker} invalid speaker')
if self.phoneLabelsDict is not None:
self.phoneLabels += self.phoneLabelsDict[seqName]
newSize = len(self.phoneLabelsDict[seqName]) * self.phoneSize
seq = seq[:newSize]
sizeSeq = seq.size(0)
tmpData.append(seq)
self.seqLabel.append(self.seqLabel[-1] + sizeSeq)
speakerSize += sizeSeq
del seq
self.speakerLabel.append(speakerSize)
self.data = torch.cat(tmpData, dim=0)
def getPhonem(self, idx):
idPhone = idx // self.phoneSize
return self.phoneLabels[idPhone:(idPhone + self.phoneStep)]
def getSpeakerLabel(self, idx):
idSpeaker = next(x[0] for x in enumerate(
self.speakerLabel) if x[1] > idx) - 1
return idSpeaker
def __len__(self):
return self.totSize // self.sizeWindow
def __getitem__(self, idx):
if idx < 0 or idx >= len(self.data) - self.sizeWindow - 1:
print(idx)
outData = self.data[idx:(self.sizeWindow + idx)].view(1, -1)
label = torch.tensor(self.getSpeakerLabel(idx), dtype=torch.long)
if self.phoneSize > 0:
label_phone = torch.tensor(self.getPhonem(idx), dtype=torch.long)
if not self.doubleLabels:
label = label_phone
else:
label_phone = torch.zeros(1)
if self.doubleLabels:
return outData, label, label_phone
return outData, label
def getNSpeakers(self):
return len(self.speakers)
def getNSeqs(self):
return len(self.seqLabel) - 1
def getNLoadsPerEpoch(self):
return len(self.packageIndex)
def getBaseSampler(self, type, batchSize, offset):
if type == "samespeaker":
return SameSpeakerSampler(batchSize, self.speakerLabel,
self.sizeWindow, offset)
if type == "samesequence":
return SameSpeakerSampler(batchSize, self.seqLabel,
self.sizeWindow, offset)
if type == "sequential":
return SequentialSampler(len(self.data), self.sizeWindow,
offset, batchSize)
sampler = UniformAudioSampler(len(self.data), self.sizeWindow,
offset)
return BatchSampler(sampler, batchSize, True)
def getDataLoader(self, batchSize, type, randomOffset, numWorkers=0,
onLoop=-1):
r"""
Get a batch sampler for the current dataset.
Args:
- batchSize (int): batch size
- groupSize (int): in the case of type in ["speaker", "sequence"]
number of items sharing a same label in the group
(see AudioBatchSampler)
- type (string):
type == "speaker": grouped sampler speaker-wise
type == "sequence": grouped sampler sequence-wise
type == "sequential": sequential sampling
else: uniform random sampling of the full audio
vector
- randomOffset (bool): if True add a random offset to the sampler
at the begining of each iteration
"""
nLoops = len(self.packageIndex)
totSize = self.totSize // (self.sizeWindow * batchSize)
if onLoop >= 0:
self.currentPack = onLoop - 1
self.loadNextPack()
nLoops = 1
def samplerCall():
offset = random.randint(0, self.sizeWindow // 2) \
if randomOffset else 0
return self.getBaseSampler(type, batchSize, offset)
return AudioLoader(self, samplerCall, nLoops, self.loadNextPack,
totSize, numWorkers)
def loadFile(data):
speaker, fullPath = data
seqName = fullPath.stem
# Due to some issues happening when combining torchaudio.load
# with torch.multiprocessing we use soundfile to load the data
seq = torch.tensor(sf.read(fullPath)[0]).float()
if len(seq.size()) == 2:
seq = seq.mean(dim=1)
return speaker, seqName, seq
class AudioLoader(object):
r"""
A DataLoader meant to handle an AudioBatchData object.
In order to handle big datasets AudioBatchData works with big chunks of
audio it loads sequentially in memory: once all batches have been sampled
on a chunk, the AudioBatchData loads the next one.
"""
def __init__(self,
dataset,
samplerCall,
nLoop,
updateCall,
size,
numWorkers):
r"""
Args:
- dataset (AudioBatchData): target dataset
- samplerCall (function): batch-sampler to call
- nLoop (int): number of chunks to load
- updateCall (function): function loading the next chunk
- size (int): total number of batches
- numWorkers (int): see torch.utils.data.DataLoader
"""
self.samplerCall = samplerCall
self.updateCall = updateCall
self.nLoop = nLoop
self.size = size
self.dataset = dataset
self.numWorkers = numWorkers
def __len__(self):
return self.size
def __iter__(self):
for i in range(self.nLoop):
sampler = self.samplerCall()
dataloader = DataLoader(self.dataset,
batch_sampler=sampler,
num_workers=self.numWorkers)
for x in dataloader:
yield x
if i < self.nLoop - 1:
self.updateCall()
class UniformAudioSampler(Sampler):
def __init__(self,
dataSize,
sizeWindow,
offset):
self.len = dataSize // sizeWindow
self.sizeWindow = sizeWindow
self.offset = offset
if self.offset > 0:
self.len -= 1
def __iter__(self):
return iter((self.offset
+ self.sizeWindow * torch.randperm(self.len)).tolist())
def __len__(self):
return self.len
class SequentialSampler(Sampler):
def __init__(self, dataSize, sizeWindow, offset, batchSize):
self.len = (dataSize // sizeWindow) // batchSize
self.sizeWindow = sizeWindow
self.offset = offset
self.startBatches = [x * (dataSize // batchSize)
for x in range(batchSize)]
self.batchSize = batchSize
if self.offset > 0:
self.len -= 1
def __iter__(self):
for idx in range(self.len):
yield [self.offset + self.sizeWindow * idx
+ start for start in self.startBatches]
def __len__(self):
return self.len
class SameSpeakerSampler(Sampler):
def __init__(self,
batchSize,
samplingIntervals,
sizeWindow,
offset):
self.samplingIntervals = samplingIntervals
self.sizeWindow = sizeWindow
self.batchSize = batchSize
self.offset = offset
if self.samplingIntervals[0] != 0:
raise AttributeError("Sampling intervals should start at zero")
nWindows = len(self.samplingIntervals) - 1
self.sizeSamplers = [(self.samplingIntervals[i+1] -
self.samplingIntervals[i]) // self.sizeWindow
for i in range(nWindows)]
if self.offset > 0:
self.sizeSamplers = [max(0, x - 1) for x in self.sizeSamplers]
order = [(x, torch.randperm(val).tolist())
for x, val in enumerate(self.sizeSamplers) if val > 0]
# Build Batches
self.batches = []
for indexSampler, randperm in order:
indexStart, sizeSampler = 0, self.sizeSamplers[indexSampler]
while indexStart < sizeSampler:
indexEnd = min(sizeSampler, indexStart + self.batchSize)
locBatch = [self.getIndex(x, indexSampler)
for x in randperm[indexStart:indexEnd]]
indexStart = indexEnd
self.batches.append(locBatch)
def __len__(self):
return len(self.batches)
def getIndex(self, x, iInterval):
return self.offset + x * self.sizeWindow \
+ self.samplingIntervals[iInterval]
def __iter__(self):
random.shuffle(self.batches)
return iter(self.batches)
def extractLength(couple):
speaker, locPath = couple
info = torchaudio.info(str(locPath))[0]
return info.length
def findAllSeqs(dirName,
extension='.flac',
loadCache=False,
speaker_level=1):
r"""
Lists all the sequences with the given extension in the dirName directory.
Output:
outSequences, speakers
outSequence
A list of tuples seq_path, speaker where:
- seq_path is the relative path of each sequence relative to the
parent directory
- speaker is the corresponding speaker index
outSpeakers
The speaker labels (in order)
The speaker labels are organized the following way
\dirName
\speaker_label
\..
...
seqName.extension
Adjust the value of speaker_level if you want to choose which level of
directory defines the speaker label. Ex if speaker_level == 2 then the
dataset should be organized in the following fashion
\dirName
\crappy_label
\speaker_label
\..
...
seqName.extension
Set speaker_label == 0 if no speaker label will be retrieved no matter the
organization of the dataset.
"""
cache_path = os.path.join(dirName, '_seqs_cache.txt')
if loadCache:
try:
outSequences, speakers = torch.load(cache_path)
print(f'Loaded from cache {cache_path} successfully')
return outSequences, speakers
except OSError as err:
print(f'Ran in an error while loading {cache_path}: {err}')
print('Could not load cache, rebuilding')
if dirName[-1] != os.sep:
dirName += os.sep
prefixSize = len(dirName)
speakersTarget = {}
outSequences = []
for root, dirs, filenames in tqdm.tqdm(os.walk(dirName)):
filtered_files = [f for f in filenames if f.endswith(extension)]
if len(filtered_files) > 0:
speakerStr = (os.sep).join(
root[prefixSize:].split(os.sep)[:speaker_level])
if speakerStr not in speakersTarget:
speakersTarget[speakerStr] = len(speakersTarget)
speaker = speakersTarget[speakerStr]
for filename in filtered_files:
full_path = os.path.join(root[prefixSize:], filename)
outSequences.append((speaker, full_path))
outSpeakers = [None for x in speakersTarget]
for key, index in speakersTarget.items():
outSpeakers[index] = key
try:
torch.save((outSequences, outSpeakers), cache_path)
print(f'Saved cache file at {cache_path}')
except OSError as err:
print(f'Ran in an error while saving {cache_path}: {err}')
return outSequences, outSpeakers
def parseSeqLabels(pathLabels):
with open(pathLabels, 'r') as f:
lines = f.readlines()
output = {"step": 160} # Step in librispeech dataset is 160bits
maxPhone = 0
for line in lines:
data = line.split()
output[data[0]] = [int(x) for x in data[1:]]
maxPhone = max(maxPhone, max(output[data[0]]))
return output, maxPhone + 1
def filterSeqs(pathTxt, seqCouples):
with open(pathTxt, 'r') as f:
inSeqs = [p.replace('\n', '') for p in f.readlines()]
inSeqs.sort()
seqCouples.sort(key=lambda x: os.path.basename(os.path.splitext(x[1])[0]))
output, index = [], 0
for x in seqCouples:
seq = os.path.basename(os.path.splitext(x[1])[0])
while index < len(inSeqs) and seq > inSeqs[index]:
index += 1
if index == len(inSeqs):
break
if seq == inSeqs[index]:
output.append(x)
return output
| CPC_audio-main | cpc/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import numpy as np
import torch
import time
from copy import deepcopy
import random
import psutil
import sys
import cpc.criterion as cr
import cpc.model as model
import cpc.utils.misc as utils
import cpc.feature_loader as fl
from cpc.cpc_default_config import set_default_cpc_config
from cpc.dataset import AudioBatchData, findAllSeqs, filterSeqs, parseSeqLabels
def getCriterion(args, downsampling, nSpeakers, nPhones):
dimFeatures = args.hiddenGar if not args.onEncoder else args.hiddenEncoder
if not args.supervised:
if args.cpc_mode == 'none':
cpcCriterion = cr.NoneCriterion()
else:
sizeInputSeq = (args.sizeWindow // downsampling)
cpcCriterion = cr.CPCUnsupersivedCriterion(args.nPredicts,
args.hiddenGar,
args.hiddenEncoder,
args.negativeSamplingExt,
mode=args.cpc_mode,
rnnMode=args.rnnMode,
dropout=args.dropout,
nSpeakers=nSpeakers,
speakerEmbedding=args.speakerEmbedding,
sizeInputSeq=sizeInputSeq)
elif args.pathPhone is not None:
if not args.CTC:
cpcCriterion = cr.PhoneCriterion(dimFeatures,
nPhones, args.onEncoder,
nLayers=args.nLevelsPhone)
else:
cpcCriterion = cr.CTCPhoneCriterion(dimFeatures,
nPhones, args.onEncoder)
else:
cpcCriterion = cr.SpeakerCriterion(dimFeatures, nSpeakers)
return cpcCriterion
def loadCriterion(pathCheckpoint, downsampling, nSpeakers, nPhones):
_, _, locArgs = fl.getCheckpointData(os.path.dirname(pathCheckpoint))
criterion = getCriterion(locArgs, downsampling, nSpeakers, nPhones)
state_dict = torch.load(pathCheckpoint, 'cpu')
criterion.load_state_dict(state_dict["cpcCriterion"])
return criterion
def trainStep(dataLoader,
cpcModel,
cpcCriterion,
optimizer,
scheduler,
loggingStep):
cpcModel.train()
cpcCriterion.train()
start_time = time.perf_counter()
n_examples = 0
logs, lastlogs = {}, None
iter = 0
for step, fulldata in enumerate(dataLoader):
batchData, label = fulldata
n_examples += batchData.size(0)
batchData = batchData.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
c_feature, encoded_data, label = cpcModel(batchData, label)
allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)
totLoss = allLosses.sum()
totLoss.backward()
# Show grads ?
optimizer.step()
optimizer.zero_grad()
if "locLoss_train" not in logs:
logs["locLoss_train"] = np.zeros(allLosses.size(1))
logs["locAcc_train"] = np.zeros(allLosses.size(1))
iter += 1
logs["locLoss_train"] += (allLosses.mean(dim=0)).detach().cpu().numpy()
logs["locAcc_train"] += (allAcc.mean(dim=0)).cpu().numpy()
if (step + 1) % loggingStep == 0:
new_time = time.perf_counter()
elapsed = new_time - start_time
print(f"Update {step + 1}")
print(f"elapsed: {elapsed:.1f} s")
print(
f"{1000.0 * elapsed / loggingStep:.1f} ms per batch, {1000.0 * elapsed / n_examples:.1f} ms / example")
locLogs = utils.update_logs(logs, loggingStep, lastlogs)
lastlogs = deepcopy(logs)
utils.show_logs("Training loss", locLogs)
start_time, n_examples = new_time, 0
if scheduler is not None:
scheduler.step()
logs = utils.update_logs(logs, iter)
logs["iter"] = iter
utils.show_logs("Average training loss on epoch", logs)
return logs
def valStep(dataLoader,
cpcModel,
cpcCriterion):
cpcCriterion.eval()
cpcModel.eval()
logs = {}
cpcCriterion.eval()
cpcModel.eval()
iter = 0
for step, fulldata in enumerate(dataLoader):
batchData, label = fulldata
batchData = batchData.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
with torch.no_grad():
c_feature, encoded_data, label = cpcModel(batchData, label)
allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)
if "locLoss_val" not in logs:
logs["locLoss_val"] = np.zeros(allLosses.size(1))
logs["locAcc_val"] = np.zeros(allLosses.size(1))
iter += 1
logs["locLoss_val"] += allLosses.mean(dim=0).cpu().numpy()
logs["locAcc_val"] += allAcc.mean(dim=0).cpu().numpy()
logs = utils.update_logs(logs, iter)
logs["iter"] = iter
utils.show_logs("Validation loss:", logs)
return logs
def run(trainDataset,
valDataset,
batchSize,
samplingMode,
cpcModel,
cpcCriterion,
nEpoch,
pathCheckpoint,
optimizer,
scheduler,
logs):
print(f"Running {nEpoch} epochs")
startEpoch = len(logs["epoch"])
bestAcc = 0
bestStateDict = None
start_time = time.time()
for epoch in range(startEpoch, nEpoch):
print(f"Starting epoch {epoch}")
utils.cpu_stats()
trainLoader = trainDataset.getDataLoader(batchSize, samplingMode,
True, numWorkers=0)
valLoader = valDataset.getDataLoader(batchSize, 'sequential', False,
numWorkers=0)
print("Training dataset %d batches, Validation dataset %d batches, batch size %d" %
(len(trainLoader), len(valLoader), batchSize))
locLogsTrain = trainStep(trainLoader, cpcModel, cpcCriterion,
optimizer, scheduler, logs["logging_step"])
locLogsVal = valStep(valLoader, cpcModel, cpcCriterion)
print(f'Ran {epoch + 1} epochs '
f'in {time.time() - start_time:.2f} seconds')
torch.cuda.empty_cache()
currentAccuracy = float(locLogsVal["locAcc_val"].mean())
if currentAccuracy > bestAcc:
bestStateDict = fl.get_module(cpcModel).state_dict()
for key, value in dict(locLogsTrain, **locLogsVal).items():
if key not in logs:
logs[key] = [None for x in range(epoch)]
if isinstance(value, np.ndarray):
value = value.tolist()
logs[key].append(value)
logs["epoch"].append(epoch)
if pathCheckpoint is not None \
and (epoch % logs["saveStep"] == 0 or epoch == nEpoch-1):
modelStateDict = fl.get_module(cpcModel).state_dict()
criterionStateDict = fl.get_module(cpcCriterion).state_dict()
fl.save_checkpoint(modelStateDict, criterionStateDict,
optimizer.state_dict(), bestStateDict,
f"{pathCheckpoint}_{epoch}.pt")
utils.save_logs(logs, pathCheckpoint + "_logs.json")
def main(args):
args = parseArgs(args)
utils.set_seed(args.random_seed)
logs = {"epoch": [], "iter": [], "saveStep": args.save_step}
loadOptimizer = False
if args.pathCheckpoint is not None and not args.restart:
cdata = fl.getCheckpointData(args.pathCheckpoint)
if cdata is not None:
data, logs, locArgs = cdata
print(f"Checkpoint detected at {data}")
fl.loadArgs(args, locArgs,
forbiddenAttr={"nGPU", "pathCheckpoint",
"debug", "restart", "world_size",
"n_nodes", "node_id", "n_gpu_per_node",
"max_size_loaded"})
args.load, loadOptimizer = [data], True
args.loadCriterion = True
logs["logging_step"] = args.logging_step
print(f'CONFIG:\n{json.dumps(vars(args), indent=4, sort_keys=True)}')
print('-' * 50)
seqNames, speakers = findAllSeqs(args.pathDB,
extension=args.file_extension,
loadCache=not args.ignore_cache)
print(f'Found files: {len(seqNames)} seqs, {len(speakers)} speakers')
# Datasets
if args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, seqNames)
else:
seqTrain = seqNames
if args.pathVal is None:
random.shuffle(seqTrain)
sizeTrain = int(0.99 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
print(f'Found files: {len(seqTrain)} train, {len(seqVal)} val')
else:
seqVal = filterSeqs(args.pathVal, seqNames)
if args.debug:
seqTrain = seqTrain[-1000:]
seqVal = seqVal[-100:]
phoneLabels, nPhones = None, None
if args.supervised and args.pathPhone is not None:
print("Loading the phone labels at " + args.pathPhone)
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
print(f"{nPhones} phones found")
print("")
print(f'Loading audio data at {args.pathDB}')
print("Loading the training dataset")
trainDataset = AudioBatchData(args.pathDB,
args.sizeWindow,
seqTrain,
phoneLabels,
len(speakers),
nProcessLoader=args.n_process_loader,
MAX_SIZE_LOADED=args.max_size_loaded)
print("Training dataset loaded")
print("")
print("Loading the validation dataset")
valDataset = AudioBatchData(args.pathDB,
args.sizeWindow,
seqVal,
phoneLabels,
len(speakers),
nProcessLoader=args.n_process_loader)
print("Validation dataset loaded")
print("")
if args.load is not None:
cpcModel, args.hiddenGar, args.hiddenEncoder = \
fl.loadModel(args.load)
else:
# Encoder network
encoderNet = fl.getEncoder(args)
# AR Network
arNet = fl.getAR(args)
cpcModel = model.CPCModel(encoderNet, arNet)
batchSize = args.nGPU * args.batchSizeGPU
cpcModel.supervised = args.supervised
# Training criterion
if args.load is not None and args.loadCriterion:
cpcCriterion = loadCriterion(args.load[0], cpcModel.gEncoder.DOWNSAMPLING,
len(speakers), nPhones)
else:
cpcCriterion = getCriterion(args, cpcModel.gEncoder.DOWNSAMPLING,
len(speakers), nPhones)
if loadOptimizer:
state_dict = torch.load(args.load[0], 'cpu')
cpcCriterion.load_state_dict(state_dict["cpcCriterion"])
cpcCriterion.cuda()
cpcModel.cuda()
# Optimizer
g_params = list(cpcCriterion.parameters()) + list(cpcModel.parameters())
lr = args.learningRate
optimizer = torch.optim.Adam(g_params, lr=lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
if loadOptimizer:
print("Loading optimizer " + args.load[0])
state_dict = torch.load(args.load[0], 'cpu')
if "optimizer" in state_dict:
optimizer.load_state_dict(state_dict["optimizer"])
# Checkpoint
if args.pathCheckpoint is not None:
if not os.path.isdir(args.pathCheckpoint):
os.mkdir(args.pathCheckpoint)
args.pathCheckpoint = os.path.join(args.pathCheckpoint, "checkpoint")
scheduler = None
if args.schedulerStep > 0:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
args.schedulerStep,
gamma=0.5)
if args.schedulerRamp is not None:
n_epoch = args.schedulerRamp
print(f"Ramp activated. n_e = {n_epoch}")
scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=lambda epoch: utils.ramp_scheduling_function(
n_epoch, epoch),
last_epoch=-1)
if scheduler is None:
scheduler = scheduler_ramp
else:
scheduler = utils.SchedulerCombiner([scheduler_ramp, scheduler],
[0, args.schedulerRamp])
if scheduler is not None:
for i in range(len(logs["epoch"])):
scheduler.step()
cpcModel = torch.nn.DataParallel(cpcModel,
device_ids=range(args.nGPU)).cuda()
cpcCriterion = torch.nn.DataParallel(cpcCriterion,
device_ids=range(args.nGPU)).cuda()
run(trainDataset,
valDataset,
batchSize,
args.samplingType,
cpcModel,
cpcCriterion,
args.nEpoch,
args.pathCheckpoint,
optimizer,
scheduler,
logs)
def parseArgs(argv):
# Run parameters
parser = argparse.ArgumentParser(description='Trainer')
# Default arguments:
parser = set_default_cpc_config(parser)
group_db = parser.add_argument_group('Dataset')
group_db.add_argument('--pathDB', type=str, default=None,
help='Path to the directory containing the '
'data.')
group_db.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in the dataset.")
group_db.add_argument('--pathTrain', type=str, default=None,
help='Path to a .txt file containing the list of the '
'training sequences.')
group_db.add_argument('--pathVal', type=str, default=None,
help='Path to a .txt file containing the list of the '
'validation sequences.')
group_db.add_argument('--n_process_loader', type=int, default=8,
help='Number of processes to call to load the '
'dataset')
group_db.add_argument('--ignore_cache', action='store_true',
help='Activate if the dataset has been modified '
'since the last training session.')
group_db.add_argument('--max_size_loaded', type=int, default=4000000000,
help='Maximal amount of data (in byte) a dataset '
'can hold in memory at any given time')
group_supervised = parser.add_argument_group(
'Supervised mode (depreciated)')
group_supervised.add_argument('--supervised', action='store_true',
help='(Depreciated) Disable the CPC loss and activate '
'the supervised mode. By default, the supervised '
'training method is the speaker classification.')
group_supervised.add_argument('--pathPhone', type=str, default=None,
help='(Supervised mode only) Path to a .txt '
'containing the phone labels of the dataset. If given '
'and --supervised, will train the model using a '
'phone classification task.')
group_supervised.add_argument('--CTC', action='store_true')
group_save = parser.add_argument_group('Save')
group_save.add_argument('--pathCheckpoint', type=str, default=None,
help="Path of the output directory.")
group_save.add_argument('--logging_step', type=int, default=1000)
group_save.add_argument('--save_step', type=int, default=5,
help="Frequency (in epochs) at which a checkpoint "
"should be saved")
group_load = parser.add_argument_group('Load')
group_load.add_argument('--load', type=str, default=None, nargs='*',
help="Load an exsiting checkpoint. Should give a path "
"to a .pt file. The directory containing the file to "
"load should also have a 'checkpoint.logs' and a "
"'checkpoint.args'")
group_load.add_argument('--loadCriterion', action='store_true',
help="If --load is activated, load the state of the "
"training criterion as well as the state of the "
"feature network (encoder + AR)")
group_load.add_argument('--restart', action='store_true',
help="If any checkpoint is found, ignore it and "
"restart the training from scratch.")
group_gpu = parser.add_argument_group('GPUs')
group_gpu.add_argument('--nGPU', type=int, default=-1,
help="Number of GPU to use (default: use all "
"available GPUs)")
group_gpu.add_argument('--batchSizeGPU', type=int, default=8,
help='Number of batches per GPU.')
parser.add_argument('--debug', action='store_true',
help="Load only a very small amount of files for "
"debugging purposes.")
args = parser.parse_args(argv)
if args.pathDB is None and (args.pathCheckpoint is None or args.restart):
parser.print_help()
print("Either provides an input dataset or a checkpoint to load")
sys.exit()
if args.pathCheckpoint is not None:
args.pathCheckpoint = os.path.abspath(args.pathCheckpoint)
if args.load is not None:
args.load = [os.path.abspath(x) for x in args.load]
# set it up if needed, so that it is dumped along with other args
if args.random_seed is None:
args.random_seed = random.randint(0, 2**31)
if args.nGPU < 0:
args.nGPU = torch.cuda.device_count()
assert args.nGPU <= torch.cuda.device_count(),\
f"number of GPU asked: {args.nGPU}," \
f"number GPU detected: {torch.cuda.device_count()}"
print(f"Let's use {args.nGPU} GPUs!")
if args.arMode == 'no_ar':
args.hiddenGar = args.hiddenEncoder
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
args = sys.argv[1:]
main(args)
| CPC_audio-main | cpc/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import os
import cpc.feature_loader as fl
from .dataset import AudioBatchData, findAllSeqs, filterSeqs
from nose.tools import eq_, ok_
from math import log
from pathlib import Path
class TestDataLoader(unittest.TestCase):
def setUp(self):
self.seq_names = ['6476/57446/6476-57446-0019.flac',
'5678/43303/5678-43303-0032.flac',
'5678/43303/5678-43303-0024.flac',
'5678/43301/5678-43301-0021.flac',
'5393/19218/5393-19218-0024.flac',
'4397/15668/4397-15668-0007.flac',
'4397/15668/4397-15668-0003.flac']
self.test_data_dir = Path(__file__).parent / 'test_data'
self.path_db = self.test_data_dir / 'test_db'
self.seq_list = self.test_data_dir / 'seq_list.txt'
self.size_window = 20480
def testFindAllSeqs(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
expected_output = [(0, '2911/12359/2911-12359-0007.flac'),
(1, '4051/11218/4051-11218-0044.flac'),
(2, '4397/15668/4397-15668-0003.flac'),
(2, '4397/15668/4397-15668-0007.flac'),
(3, '5393/19218/5393-19218-0024.flac'),
(4, '5678/43301/5678-43301-0021.flac'),
(4, '5678/43303/5678-43303-0024.flac'),
(4, '5678/43303/5678-43303-0032.flac'),
(5, '6476/57446/6476-57446-0019.flac')]
# We do not expect the findAllSeqs function to retrieve all sequences
# in a specific order. However, it should retrieve them all correctly
# Check the number of speakers
eq_(len(speakers), 6)
# Check the speakers names
eq_(set(speakers), {'2911', '4051', '4397', '5393', '5678', '6476'})
# Check that all speakers from 0 to 5 are represented
speaker_set = {x[0] for x in seq_names}
eq_(speaker_set, {x[0] for x in expected_output})
# Check the number of sequences
eq_(len(seq_names), len(expected_output))
# Check that the sequences are correct
sequence_set = {x[1] for x in seq_names}
eq_(sequence_set, {x[1] for x in expected_output})
# Check that the speakers are properly matched
for index_speaker, seq_name in seq_names:
speaker_name = str(Path(seq_name).stem).split('-')[0]
eq_(speakers[index_speaker], speaker_name)
def testFindAllSeqsCustomSpeakers(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac",
speaker_level=2)
expected_speakers = {'2911/12359', '4051/11218', '4397/15668',
'5393/19218', '5678/43301', '5678/43303',
'6476/57446'}
eq_(set(speakers), expected_speakers)
for index_speaker, seq_name in seq_names:
speaker_name = '/'.join(str(Path(seq_name).stem).split('-')[:2])
eq_(speakers[index_speaker], speaker_name)
expected_output = [(0, '2911/12359/2911-12359-0007.flac'),
(1, '4051/11218/4051-11218-0044.flac'),
(2, '4397/15668/4397-15668-0003.flac'),
(2, '4397/15668/4397-15668-0007.flac'),
(3, '5393/19218/5393-19218-0024.flac'),
(4, '5678/43301/5678-43301-0021.flac'),
(5, '5678/43303/5678-43303-0024.flac'),
(5, '5678/43303/5678-43303-0032.flac'),
(6, '6476/57446/6476-57446-0019.flac')]
# Check that the sequences are correct
sequence_set = {x[1] for x in seq_names}
eq_(sequence_set, {x[1] for x in expected_output})
def testFindAllSeqs0Speakers(self):
seq_names, speakers = findAllSeqs(str(self.path_db / '2911/12359/'),
extension=".flac")
eq_(speakers, [''])
def testFindAllSeqs0SpeakersForced(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac", speaker_level=0)
eq_(speakers, [''])
def testLoadData(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
seq_names = filterSeqs(self.seq_list, seq_names)
expected_output = [(2, '4397/15668/4397-15668-0003.flac'),
(2, '4397/15668/4397-15668-0007.flac'),
(3, '5393/19218/5393-19218-0024.flac'),
(4, '5678/43301/5678-43301-0021.flac'),
(4, '5678/43303/5678-43303-0024.flac'),
(4, '5678/43303/5678-43303-0032.flac'),
(5, '6476/57446/6476-57446-0019.flac')]
eq_(len(seq_names), len(expected_output))
eq_({x[1] for x in seq_names}, {x[1] for x in expected_output})
phone_labels_dict = None
n_speakers = 9
test_data = AudioBatchData(self.path_db, self.size_window,
seq_names, phone_labels_dict, n_speakers)
assert(test_data.getNSpeakers() == 9)
assert(test_data.getNSeqs() == 7)
def testDataLoader(self):
batch_size = 2
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
seq_names = filterSeqs(self.seq_list, seq_names)
test_data = AudioBatchData(self.path_db, self.size_window, seq_names,
None, len(speakers))
test_data_loader = test_data.getDataLoader(batch_size, "samespeaker",
True, numWorkers=2)
visted_labels = set()
for index, item in enumerate(test_data_loader):
_, labels = item
p = labels[0].item()
visted_labels.add(p)
eq_(torch.sum(labels == p), labels.size(0))
eq_(len(visted_labels), 4)
def testPartialLoader(self):
batch_size = 16
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
seq_names = filterSeqs(self.seq_list, seq_names)
test_data = AudioBatchData(self.path_db, self.size_window,
seq_names, None, len(speakers),
MAX_SIZE_LOADED=1000000)
eq_(test_data.getNPacks(), 2)
test_data_loader = test_data.getDataLoader(batch_size, "samespeaker",
True, numWorkers=2)
visted_labels = set()
for index, item in enumerate(test_data_loader):
_, labels = item
p = labels[0].item()
eq_(torch.sum(labels == p), labels.size(0))
visted_labels.add(p)
eq_(len(visted_labels), 4)
class TestPhonemParser(unittest.TestCase):
def setUp(self):
from .train import parseSeqLabels
self.seqLoader = parseSeqLabels
self.test_data_dir = Path(__file__).parent / 'test_data'
self.pathPhone = self.test_data_dir / 'phone_labels.txt'
self.path_db = self.test_data_dir / 'test_db'
def testSeqLoader(self):
phone_data, nPhones = self.seqLoader(self.pathPhone)
eq_(len(phone_data), 7)
eq_(phone_data['step'], 160)
eq_(phone_data['4051-11218-0044'][43], 14)
eq_(len(phone_data['4051-11218-0044']), 1119)
eq_(nPhones, 41)
def testSeqLabels(self):
size_window = 640
seq_names = [(0, '2911/12359/2911-12359-0007.flac'),
(1, '4051/11218/4051-11218-0044.flac')]
speakers = list(set([x[0] for x in seq_names]))
phone_data, _ = self.seqLoader(self.pathPhone)
test_data = AudioBatchData(
self.path_db, size_window, seq_names, phone_data, len(speakers))
eq_(test_data.getPhonem(81280), [0, 0, 0, 0])
eq_(test_data.getPhonem(84841), [0, 0, 0, 18])
eq_(test_data.getPhonem(88201), [14, 14, 14, 14])
class TestLabelProcess(unittest.TestCase):
def setUp(self):
pass
def testLabelCollapse(self):
from .criterion.seq_alignment import collapseLabelChain
input_chain = torch.tensor([[0, 0, 0, 1, 1, 2, 0, 2, 2],
[1, 1, 1, 1, 1, 2, 2, 2, 0]],
dtype=torch.int64)
out_chain, sizes = collapseLabelChain(input_chain)
target = torch.tensor([[0, 1, 2, 0, 2],
[1, 2, 0, 0, 0]],
dtype=torch.int64)
target_size = torch.tensor([5, 3], dtype=torch.int64)
eq_((out_chain - target).sum().item(), 0)
eq_((target_size - sizes).sum().item(), 0)
def test_beam_search(self):
from .criterion.seq_alignment import beam_search
import numpy as np
blank_label = 2
n_keep = 10
data = np.array([[0.1, 0.2, 0.],
[0.4, 0.2, 0.6],
[0.01, 0.3, 0.]])
output = beam_search(data, n_keep, blank_label)
expected_pos_output = [(0.036, [1, 1]), (0.0004, [0]), (0.012, [1]),
(0.024, [1, 0, 1]), (0.0002, [
0, 1, 0]), (0.0, [1, 1, 1]),
(0.0, [1, 1, 0]), (0.0006,
[0, 0]), (0.036, [0, 1]),
(0.0024, [1, 0])]
expected_pos_output.sort(reverse=True)
for index, item in enumerate(expected_pos_output):
eq_(item[1], output[index][1])
ok_(abs(item[0] - output[index][0]) < 1e-08)
def test_big_beam_search(self):
from .criterion.seq_alignment import beam_search
import numpy as np
blank_label = 11
n_keep = 10
data = np.array([[0.1, 0.2, 0., 0., 0., 0., 0., 0.01, 0., 0.1, 0.99, 0.1],
[0.1, 0.2, 0.6, 0.1, 0.9, 0., 0., 0.01, 0., 0.9, 1., 0.]])
output = beam_search(data, n_keep, blank_label)[0]
expected_output = (1.09, [10])
eq_(output[0], expected_output[0])
eq_(output[1], expected_output[1])
class TestPER(unittest.TestCase):
def setUp(self):
pass
def testPER(self):
from .criterion.seq_alignment import get_seq_PER
ref_seq = [0, 1, 1, 2, 0, 2, 2]
pred_seq = [1, 1, 2, 2, 0, 0]
expected_PER = 4. / 7.
eq_(get_seq_PER(ref_seq, pred_seq), expected_PER)
class TestEncoderBuilder(unittest.TestCase):
def setUp(self):
from cpc.cpc_default_config import get_default_cpc_config
self.default_args = get_default_cpc_config()
def testBuildMFCCEncoder(self):
from cpc.model import MFCCEncoder
self.default_args.encoder_type = 'mfcc'
self.default_args.hiddenEncoder = 30
test_encoder = fl.getEncoder(self.default_args)
ok_(isinstance(test_encoder, MFCCEncoder))
eq_(test_encoder.dimEncoded, 30)
def testBuildLFBEnconder(self):
from cpc.model import LFBEnconder
self.default_args.encoder_type = 'lfb'
self.default_args.hiddenEncoder = 12
test_encoder = fl.getEncoder(self.default_args)
ok_(isinstance(test_encoder, LFBEnconder))
eq_(test_encoder.dimEncoded, 12)
def testBuildCPCEncoder(self):
from cpc.model import CPCEncoder
test_encoder = fl.getEncoder(self.default_args)
ok_(isinstance(test_encoder, CPCEncoder))
eq_(test_encoder.dimEncoded, 256)
class TestARBuilder(unittest.TestCase):
def setUp(self):
from cpc.cpc_default_config import get_default_cpc_config
self.default_args = get_default_cpc_config()
def testbuildNoAR(self):
from cpc.model import NoAr
self.default_args.arMode = 'no_ar'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, NoAr))
def testbuildNoAR(self):
from cpc.model import CPCAR
self.default_args.arMode = 'LSTM'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, CPCAR))
ok_(isinstance(test_ar.baseNet, torch.nn.LSTM))
def testbuildNoAR(self):
from cpc.model import CPCAR
self.default_args.arMode = 'GRU'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, CPCAR))
ok_(isinstance(test_ar.baseNet, torch.nn.GRU))
def testbuildNoAR(self):
from cpc.model import CPCAR
self.default_args.arMode = 'RNN'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, CPCAR))
ok_(isinstance(test_ar.baseNet, torch.nn.RNN))
| CPC_audio-main | cpc/unit_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def get_default_cpc_config():
parser = set_default_cpc_config(argparse.ArgumentParser())
return parser.parse_args([])
def set_default_cpc_config(parser):
# Run parameters
group = parser.add_argument_group('Architecture configuration',
description="The arguments defining the "
"model's architecture.")
group.add_argument('--hiddenEncoder', type=int, default=256,
help='Hidden dimension of the encoder network.')
group.add_argument('--hiddenGar', type=int, default=256,
help='Hidden dimension of the auto-regressive network')
group.add_argument('--nPredicts', type=int, default=12,
help='Number of steps to predict.')
group.add_argument('--negativeSamplingExt', type=int, default=128,
help='Number of negative samples to take.')
group.add_argument('--learningRate', type=float, default=2e-4)
group.add_argument('--schedulerStep', type=int, default=-1,
help='Step of the learning rate scheduler: at each '
'step the learning rate is divided by 2. Default: '
'no scheduler.')
group.add_argument('--schedulerRamp', type=int, default=None,
help='Enable a warm up phase for the learning rate: '
'adds a linear ramp of the given size.')
group.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer')
group.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer')
group.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer')
group.add_argument('--sizeWindow', type=int, default=20480,
help='Number of frames to consider at each batch.')
group.add_argument('--nEpoch', type=int, default=200,
help='Number of epoch to run')
group.add_argument('--samplingType', type=str, default='samespeaker',
choices=['samespeaker', 'uniform',
'samesequence', 'sequential'],
help='How to sample the negative examples in the '
'CPC loss.')
group.add_argument('--nLevelsPhone', type=int, default=1,
help='(Supervised mode only). Number of layers in '
'the phone classification network.')
group.add_argument('--cpc_mode', type=str, default=None,
choices=['reverse', 'none'],
help='Some variations on CPC.')
group.add_argument('--encoder_type', type=str,
choices=['cpc', 'mfcc', 'lfb'],
default='cpc',
help='Replace the encoder network by mfcc features '
'or learned filter banks')
group.add_argument('--normMode', type=str, default='layerNorm',
choices=['instanceNorm', 'ID', 'layerNorm',
'batchNorm'],
help="Type of normalization to use in the encoder "
"network (default is layerNorm).")
group.add_argument('--onEncoder', action='store_true',
help="(Supervised mode only) Perform the "
"classification on the encoder's output.")
group.add_argument('--random_seed', type=int, default=None,
help="Set a specific random seed.")
group.add_argument('--speakerEmbedding', type=int, default=0,
help="(Depreciated) Feed the prediction network with "
"speaker embeddings along with the usual sequence.")
group.add_argument('--arMode', default='LSTM',
choices=['GRU', 'LSTM', 'RNN', 'no_ar', 'transformer'],
help="Architecture to use for the auto-regressive "
"network (default is lstm).")
group.add_argument('--nLevelsGRU', type=int, default=1,
help='Number of layers in the autoregressive network.')
group.add_argument('--rnnMode', type=str, default='transformer',
choices=['transformer', 'RNN', 'LSTM', 'linear',
'ffd', 'conv4', 'conv8', 'conv12'],
help="Architecture to use for the prediction network")
group.add_argument('--dropout', action='store_true',
help="Add a dropout layer at the output of the "
"prediction network.")
group.add_argument('--abspos', action='store_true',
help='If the prediction network is a transformer, '
'active to use absolute coordinates.')
return parser
| CPC_audio-main | cpc/cpc_default_config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import random
import torch
import sys
import psutil
from copy import deepcopy
from bisect import bisect_left
def untensor(d):
if isinstance(d, list):
return [untensor(v) for v in d]
if isinstance(d, dict):
return dict((k, untensor(v)) for k, v in d.items())
if hasattr(d, 'tolist'):
return d.tolist()
return d
def save_logs(data, pathLogs):
with open(pathLogs, 'w') as file:
json.dump(data, file, indent=2)
def update_logs(logs, logStep, prevlogs=None):
out = {}
for key in logs:
out[key] = deepcopy(logs[key])
if prevlogs is not None:
out[key] -= prevlogs[key]
out[key] /= logStep
return out
def show_logs(text, logs):
print("")
print('-'*50)
print(text)
for key in logs:
if key == "iter":
continue
nPredicts = logs[key].shape[0]
strSteps = ['Step'] + [str(s) for s in range(1, nPredicts + 1)]
formatCommand = ' '.join(['{:>16}' for x in range(nPredicts + 1)])
print(formatCommand.format(*strSteps))
strLog = [key] + ["{:10.6f}".format(s) for s in logs[key]]
print(formatCommand.format(*strLog))
print('-'*50)
def set_seed(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def cpu_stats():
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory())
def ramp_scheduling_function(n_epoch_ramp, epoch):
if epoch >= n_epoch_ramp:
return 1
else:
return (epoch + 1) / n_epoch_ramp
class SchedulerCombiner:
r"""
An object which applies a list of learning rate schedulers sequentially.
"""
def __init__(self, scheduler_list, activation_step, curr_step=0):
r"""
Args:
- scheduler_list (list): a list of learning rate schedulers
- activation_step (list): a list of int. activation_step[i]
indicates at which step scheduler_list[i] should be activated
- curr_step (int): the starting step. Must be lower than
activation_step[0]
"""
if len(scheduler_list) != len(activation_step):
raise ValueError("The number of scheduler must be the same as "
"the number of activation step")
if activation_step[0] > curr_step:
raise ValueError("The first activation step cannot be higher than "
"the current step.")
self.scheduler_list = scheduler_list
self.activation_step = deepcopy(activation_step)
self.curr_step = curr_step
def step(self):
self.curr_step += 1
index = bisect_left(self.activation_step, self.curr_step) - 1
for i in reversed(range(index, len(self.scheduler_list))):
self.scheduler_list[i].step()
def __str__(self):
out = "SchedulerCombiner \n"
out += "(\n"
for index, scheduler in enumerate(self.scheduler_list):
out += f"({index}) {scheduler.__str__()} \n"
out += ")\n"
return out
| CPC_audio-main | cpc/utils/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import os
from nose.tools import eq_, ok_
from .misc import SchedulerCombiner, ramp_scheduling_function
class TestCombineSchedulers(unittest.TestCase):
def setUp(self):
self.baseLR = 1
self.module = torch.nn.Linear(1, 1)
self.optimizer = torch.optim.SGD(
list(self.module.parameters()), lr=self.baseLR)
def testCombineRamp(self):
scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lambda epoch: ramp_scheduling_function(
3, epoch))
self.optimizer.step()
eq_(self.optimizer.param_groups[0]['lr'], self.baseLR / 3)
scheduler.step()
eq_(self.optimizer.param_groups[0]['lr'], 2 * self.baseLR / 3)
scheduler.step()
eq_(self.optimizer.param_groups[0]['lr'], 1)
for i in range(12):
scheduler.step()
eq_(self.optimizer.param_groups[0]['lr'], 1)
def testCombineRampStep(self):
scheduler_step = torch.optim.lr_scheduler.StepLR(
self.optimizer, 6, gamma=0.5)
scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lambda epoch: ramp_scheduling_function(
3, epoch))
scheduler = SchedulerCombiner([scheduler_ramp, scheduler_step], [0, 3])
self.optimizer.step()
# Epoch 0
eq_(self.optimizer.param_groups[0]['lr'], self.baseLR / 3)
scheduler.step()
# Epoch 1
eq_(self.optimizer.param_groups[0]['lr'], 2 * self.baseLR / 3)
scheduler.step()
# Epoch 2
eq_(self.optimizer.param_groups[0]['lr'], 1)
scheduler.step()
# Epoch 3, 4, 5
for i in range(3):
eq_(self.optimizer.param_groups[0]['lr'], 1)
scheduler.step()
# Epoch 6
eq_(self.optimizer.param_groups[0]['lr'], 0.5)
| CPC_audio-main | cpc/utils/unit_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import torch.nn as nn
from numpy import prod
class NormalizationLayer(nn.Module):
def __init__(self):
super(NormalizationLayer, self).__init__()
def forward(self, x, epsilon=1e-8):
return x * (((x**2).mean(dim=1, keepdim=True) + epsilon).rsqrt())
def Upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1:
return x
s = x.size()
x = x.view(-1, s[1], s[2], 1, s[3], 1)
x = x.expand(-1, s[1], s[2], factor, s[3], factor)
x = x.contiguous().view(-1, s[1], s[2] * factor, s[3] * factor)
return x
def getLayerNormalizationFactor(x):
r"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
r"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self,
module,
equalized=True,
lrMul=1.0,
initBiasToZero=True):
r"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
stride=1,
**kwargs):
r"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Conv1d(nChannelsPrevious, nChannels,
kernelSize, padding=padding,
bias=bias, stride=stride),
**kwargs)
class EqualizedConv2d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
**kwargs):
r"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Conv2d(nChannelsPrevious, nChannels,
kernelSize, padding=padding,
bias=bias),
**kwargs)
class EqualizedLinear(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
bias=True,
**kwargs):
r"""
A nn.Linear module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Linear(nChannelsPrevious, nChannels,
bias=bias), **kwargs)
| CPC_audio-main | cpc/criterion/custom_layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .criterion import CPCUnsupersivedCriterion, SpeakerCriterion, \
PhoneCriterion, NoneCriterion, CTCPhoneCriterion
| CPC_audio-main | cpc/criterion/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import progressbar
import torch
from multiprocessing import Lock, Manager, Process
from copy import deepcopy
def beam_search(score_preds, nKeep, blankLabel):
T, P = score_preds.shape
beams = set([''])
pb_t_1 = {"": 1}
pnb_t_1 = {"": 0}
def getLastNumber(b):
return int(b.split(',')[-1])
for t in range(T):
nextBeams = set()
pb_t = {}
pnb_t = {}
for i_beam, b in enumerate(beams):
if b not in pb_t:
pb_t[b] = 0
pnb_t[b] = 0
if len(b) > 0:
pnb_t[b] += pnb_t_1[b] * score_preds[t, getLastNumber(b)]
pb_t[b] = (pnb_t_1[b] + pb_t_1[b]) * score_preds[t, blankLabel]
nextBeams.add(b)
for c in range(P):
if c == blankLabel:
continue
b_ = b + "," + str(c)
if b_ not in pb_t:
pb_t[b_] = 0
pnb_t[b_] = 0
if b != "" and getLastNumber(b) == c:
pnb_t[b_] += pb_t_1[b] * score_preds[t, c]
else:
pnb_t[b_] += (pb_t_1[b] + pnb_t_1[b]) * score_preds[t, c]
nextBeams.add(b_)
allPreds = [(pb_t[b] + pnb_t[b], b) for b in nextBeams]
allPreds.sort(reverse=True)
beams = [x[1] for x in allPreds[:nKeep]]
pb_t_1 = deepcopy(pb_t)
pnb_t_1 = deepcopy(pnb_t)
output = []
for score, x in allPreds[:nKeep]:
output.append((score, [int(y) for y in x.split(',') if len(y) > 0]))
return output
def collapseLabelChain(inputLabels):
# Shape N,T
N, T = inputLabels.size()
outSizes = torch.zeros(N, device=inputLabels.device, dtype=torch.int64)
output = []
for l in range(N):
status = inputLabels[l, :-1] - inputLabels[l, 1:]
status = torch.cat([torch.ones(1, device=status.device,
dtype=status.dtype),
status], dim=0)
outSizes[l] = (status != 0).sum()
output.append(inputLabels[l][status != 0])
maxSize = int(outSizes.max().item())
paddedOutput = torch.zeros(N, maxSize,
device=inputLabels.device,
dtype=torch.int64)
for l in range(N):
S = int(outSizes[l])
paddedOutput[l, :S] = output[l]
return paddedOutput, outSizes
def NeedlemanWunschAlignScore(seq1, seq2, d, m, r, normalize=True):
N1, N2 = len(seq1), len(seq2)
# Fill up the errors
tmpRes_ = [[None for x in range(N2 + 1)] for y in range(N1 + 1)]
for i in range(N1 + 1):
tmpRes_[i][0] = i * d
for j in range(N2 + 1):
tmpRes_[0][j] = j * d
for i in range(N1):
for j in range(N2):
match = r if seq1[i] == seq2[j] else m
v1 = tmpRes_[i][j] + match
v2 = tmpRes_[i + 1][j] + d
v3 = tmpRes_[i][j + 1] + d
tmpRes_[i + 1][j + 1] = max(v1, max(v2, v3))
i = j = 0
res = -tmpRes_[N1][N2]
if normalize:
res /= float(N1)
return res
def get_seq_PER(seqLabels, detectedLabels):
return NeedlemanWunschAlignScore(seqLabels, detectedLabels, -1, -1, 0,
normalize=True)
def getPER(dataLoader, featureMaker, blankLabel):
bar = progressbar.ProgressBar(len(dataLoader))
bar.start()
out = 0
n_items = 0
n_keep_beam_search = 100
for index, data in enumerate(dataLoader):
bar.update(index)
with torch.no_grad():
output = featureMaker(data).cpu().numpy()
labels = data[1]
labels, targetSize = collapseLabelChain(labels)
lock = Lock()
def per(rank, outScore):
S = int(targetSize[rank])
seqLabels = labels[rank, :S]
preds = beam_search(output[rank],
n_keep_beam_search, blankLabel)[0][1]
value = get_seq_PER(seqLabels, preds)
with lock:
outScore.value += value
manager = Manager()
outScore = manager.Value('f', 0.)
N, S, D = output.shape
processes = []
for rank in range(N):
p = Process(
target=per, args=(rank, outScore))
p.start()
processes.append(p)
for p in processes:
p.join()
out += outScore.value
n_items += N
bar.finish()
return (out / n_items)
| CPC_audio-main | cpc/criterion/seq_alignment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from .seq_alignment import collapseLabelChain
from .custom_layers import EqualizedLinear, EqualizedConv1d
class FFNetwork(nn.Module):
def __init__(self, din, dout, dff, dropout):
super(FFNetwork, self).__init__()
self.lin1 = EqualizedLinear(din, dff, bias=True, equalized=True)
self.lin2 = EqualizedLinear(dff, dout, bias=True, equalized=True)
self.relu = nn.ReLU()
self.drop = nn.Dropout(dropout)
def forward(self, x):
return self.lin2(self.drop(self.relu(self.lin1(x))))
class ShiftedConv(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConv, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True,
padding=0)
self.kernelSize = kernelSize
def forward(self, x):
# Input format: N, S, C -> need to move to N, C, S
N, S, C = x.size()
x = x.permute(0, 2, 1)
padding = torch.zeros(N, C, self.kernelSize - 1, device=x.device)
x = torch.cat([padding, x], dim=2)
x = self.module(x)
x = x.permute(0, 2, 1)
return x
class PredictionNetwork(nn.Module):
def __init__(self,
nPredicts,
dimOutputAR,
dimOutputEncoder,
rnnMode=None,
dropout=False,
sizeInputSeq=116):
super(PredictionNetwork, self).__init__()
self.predictors = nn.ModuleList()
self.RESIDUAL_STD = 0.01
self.dimOutputAR = dimOutputAR
self.dropout = nn.Dropout(p=0.5) if dropout else None
for i in range(nPredicts):
if rnnMode == 'RNN':
self.predictors.append(
nn.RNN(dimOutputAR, dimOutputEncoder))
self.predictors[-1].flatten_parameters()
elif rnnMode == 'LSTM':
self.predictors.append(
nn.LSTM(dimOutputAR, dimOutputEncoder, batch_first=True))
self.predictors[-1].flatten_parameters()
elif rnnMode == 'ffd':
self.predictors.append(
FFNetwork(dimOutputAR, dimOutputEncoder,
dimOutputEncoder, 0))
elif rnnMode == 'conv4':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 4))
elif rnnMode == 'conv8':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 8))
elif rnnMode == 'conv12':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 12))
elif rnnMode == 'transformer':
from transformers import buildTransformerAR
self.predictors.append(
buildTransformerAR(dimOutputEncoder,
1,
sizeInputSeq,
False))
else:
self.predictors.append(
nn.Linear(dimOutputAR, dimOutputEncoder, bias=False))
if dimOutputEncoder > dimOutputAR:
residual = dimOutputEncoder - dimOutputAR
self.predictors[-1].weight.data.copy_(torch.cat([torch.randn(
dimOutputAR, dimOutputAR), self.RESIDUAL_STD * torch.randn(residual, dimOutputAR)], dim=0))
def forward(self, c, candidates):
assert(len(candidates) == len(self.predictors))
out = []
# UGLY
if isinstance(self.predictors[0], EqualizedConv1d):
c = c.permute(0, 2, 1)
for k in range(len(self.predictors)):
locC = self.predictors[k](c)
if isinstance(locC, tuple):
locC = locC[0]
if isinstance(self.predictors[k], EqualizedConv1d):
locC = locC.permute(0, 2, 1)
if self.dropout is not None:
locC = self.dropout(locC)
locC = locC.view(locC.size(0), 1, locC.size(1), locC.size(2))
outK = (locC*candidates[k]).mean(dim=3)
out.append(outK)
return out
class BaseCriterion(nn.Module):
def warmUp(self):
return False
def update(self):
return
class NoneCriterion(BaseCriterion):
def __init__(self):
super(NoneCriterion, self).__init__()
def forward(self, cFeature, encodedData, label):
return torch.zeros(1, 1, device=cFeature.device), \
torch.zeros(1, 1, device=cFeature.device)
class CPCUnsupersivedCriterion(BaseCriterion):
def __init__(self,
nPredicts, # Number of steps
dimOutputAR, # Dimension of G_ar
dimOutputEncoder, # Dimension of the convolutional net
negativeSamplingExt, # Number of negative samples to draw
mode=None,
rnnMode=False,
dropout=False,
speakerEmbedding=0,
nSpeakers=0,
sizeInputSeq=128):
super(CPCUnsupersivedCriterion, self).__init__()
if speakerEmbedding > 0:
print(
f"Using {speakerEmbedding} speaker embeddings for {nSpeakers} speakers")
self.speakerEmb = torch.nn.Embedding(nSpeakers, speakerEmbedding)
dimOutputAR += speakerEmbedding
else:
self.speakerEmb = None
self.wPrediction = PredictionNetwork(
nPredicts, dimOutputAR, dimOutputEncoder, rnnMode=rnnMode,
dropout=dropout, sizeInputSeq=sizeInputSeq - nPredicts)
self.nPredicts = nPredicts
self.negativeSamplingExt = negativeSamplingExt
self.lossCriterion = nn.CrossEntropyLoss()
if mode not in [None, "reverse"]:
raise ValueError("Invalid mode")
self.mode = mode
def sampleClean(self, encodedData, windowSize):
batchSize, nNegativeExt, dimEncoded = encodedData.size()
outputs = []
negExt = encodedData.contiguous().view(-1, dimEncoded)
# Draw nNegativeExt * batchSize negative samples anywhere in the batch
batchIdx = torch.randint(low=0, high=batchSize,
size=(self.negativeSamplingExt
* windowSize * batchSize, ),
device=encodedData.device)
seqIdx = torch.randint(low=1, high=nNegativeExt,
size=(self.negativeSamplingExt
* windowSize * batchSize, ),
device=encodedData.device)
baseIdx = torch.arange(0, windowSize, device=encodedData.device)
baseIdx = baseIdx.view(1, 1,
windowSize).expand(1,
self.negativeSamplingExt,
windowSize).expand(batchSize, self.negativeSamplingExt, windowSize)
seqIdx += baseIdx.contiguous().view(-1)
seqIdx = torch.remainder(seqIdx, nNegativeExt)
extIdx = seqIdx + batchIdx * nNegativeExt
negExt = negExt[extIdx].view(batchSize, self.negativeSamplingExt,
windowSize, dimEncoded)
labelLoss = torch.zeros((batchSize * windowSize),
dtype=torch.long,
device=encodedData.device)
for k in range(1, self.nPredicts + 1):
# Positive samples
if k < self.nPredicts:
posSeq = encodedData[:, k:-(self.nPredicts-k)]
else:
posSeq = encodedData[:, k:]
posSeq = posSeq.view(batchSize, 1, posSeq.size(1), dimEncoded)
fullSeq = torch.cat((posSeq, negExt), dim=1)
outputs.append(fullSeq)
return outputs, labelLoss
def getInnerLoss(self):
return "orthoLoss", self.orthoLoss * self.wPrediction.orthoCriterion()
def forward(self, cFeature, encodedData, label):
if self.mode == "reverse":
encodedData = torch.flip(encodedData, [1])
cFeature = torch.flip(cFeature, [1])
batchSize, seqSize, dimAR = cFeature.size()
windowSize = seqSize - self.nPredicts
cFeature = cFeature[:, :windowSize]
sampledData, labelLoss = self.sampleClean(encodedData, windowSize)
if self.speakerEmb is not None:
l_ = label.view(batchSize, 1).expand(batchSize, windowSize)
embeddedSpeaker = self.speakerEmb(l_)
cFeature = torch.cat([cFeature, embeddedSpeaker], dim=2)
predictions = self.wPrediction(cFeature, sampledData)
outLosses = [0 for x in range(self.nPredicts)]
outAcc = [0 for x in range(self.nPredicts)]
for k, locPreds in enumerate(predictions[:self.nPredicts]):
locPreds = locPreds.permute(0, 2, 1)
locPreds = locPreds.contiguous().view(-1, locPreds.size(2))
lossK = self.lossCriterion(locPreds, labelLoss)
outLosses[k] += lossK.view(1, -1)
_, predsIndex = locPreds.max(1)
outAcc[k] += torch.sum(predsIndex == labelLoss).float().view(1, -1)
return torch.cat(outLosses, dim=1), \
torch.cat(outAcc, dim=1) / (windowSize * batchSize)
class SpeakerCriterion(BaseCriterion):
def __init__(self, dimEncoder, nSpeakers):
super(SpeakerCriterion, self).__init__()
self.linearSpeakerClassifier = nn.Linear(
dimEncoder, nSpeakers)
self.lossCriterion = nn.CrossEntropyLoss()
self.entropyCriterion = nn.LogSoftmax(dim=1)
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
batchSize = cFeature.size(0)
cFeature = cFeature[:, -1, :]
cFeature = cFeature.view(batchSize, -1)
predictions = self.linearSpeakerClassifier(cFeature)
loss = self.lossCriterion(predictions, label).view(1, -1)
acc = (predictions.max(1)[1] == label).double().mean().view(1, -1)
return loss, acc
class PhoneCriterion(BaseCriterion):
def __init__(self, dimEncoder, nPhones, onEncoder,
nLayers=1):
super(PhoneCriterion, self).__init__()
if nLayers == 1:
self.PhoneCriterionClassifier = nn.Linear(dimEncoder, nPhones)
else:
outLayers = [nn.Linear(dimEncoder, nPhones)]
for l in range(nLayers - 1):
outLayers.append(nn.ReLU())
outLayers.append(nn.Linear(nPhones, nPhones))
self.PhoneCriterionClassifier = nn.Sequential(*outLayers)
self.lossCriterion = nn.CrossEntropyLoss()
self.onEncoder = onEncoder
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
if self.onEncoder:
predictions = self.getPrediction(otherEncoded)
else:
predictions = self.getPrediction(cFeature)
predictions = predictions.view(-1, predictions.size(2))
label = label.view(-1)
loss = self.lossCriterion(predictions, label).view(1, -1)
acc = (predictions.max(1)[1] == label).double().mean().view(1, -1)
return loss, acc
def getPrediction(self, cFeature):
batchSize, seqSize = cFeature.size(0), cFeature.size(1)
cFeature = cFeature.contiguous().view(batchSize * seqSize, -1)
output = self.PhoneCriterionClassifier(cFeature)
return output.view(batchSize, seqSize, -1)
class CTCPhoneCriterion(BaseCriterion):
def __init__(self, dimEncoder, nPhones, onEncoder):
super(CTCPhoneCriterion, self).__init__()
self.PhoneCriterionClassifier = nn.Linear(dimEncoder, nPhones + 1)
self.lossCriterion = nn.CTCLoss(blank=nPhones, zero_infinity=True)
self.onEncoder = onEncoder
if onEncoder:
raise ValueError("On encoder version not implemented yet")
self.BLANK_LABEL = nPhones
def getPrediction(self, cFeature):
B, S, H = cFeature.size()
cFeature = cFeature.contiguous().view(B*S, H)
return self.PhoneCriterionClassifier(cFeature).view(B, S, -1)
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature)
label = label.to(predictions.device)
label, sizeLabels = collapseLabelChain(label)
avgPER = 0.
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
targetSizePred = torch.ones(B, dtype=torch.int64,
device=predictions.device) * S
loss = self.lossCriterion(predictions, label,
targetSizePred, sizeLabels).view(1, -1)
return loss, avgPER * torch.ones(1, 1, device=loss.device)
class ModelCriterionCombined(torch.nn.Module):
def __init__(self, model, criterion):
super(ModelCriterionCombined, self).__init__()
self.model = model
self.criterion = criterion
def forward(self, data, label):
c_feature, encoded_data, label = self.model(data, label)
loss, acc = self.criterion(c_feature, encoded_data, label)
return loss, acc
| CPC_audio-main | cpc/criterion/criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import torch
import json
from pathlib import Path
import ABX.abx_group_computation as abx_g
import ABX.abx_iterators as abx_it
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, loadModel
def reduce_sparse_data(quotient, divisor):
return quotient / (1e-08 * (divisor == 0) + divisor)
def ABX(feature_function,
path_item_file,
seq_list,
distance_mode,
step_feature,
modes,
seq_norm=True,
cuda=False,
max_x_across=5,
max_size_group=30):
# ABX dataset
ABXDataset = abx_it.ABXFeatureLoader(path_item_file, seq_list,
feature_function, step_feature, True)
if cuda:
ABXDataset.cuda()
# Distance function
distance_function = abx_g.get_distance_function_from_name(distance_mode)
# Output
scores = {}
# ABX within
if 'within' in modes:
print("Computing ABX within speakers...")
ABXIterator = ABXDataset.get_iterator('within', max_size_group)
group_confusion = abx_g.get_abx_scores_dtw_on_group(ABXIterator,
distance_function,
ABXIterator.symmetric)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(group_confusion._indices(),
torch.ones((n_data),
dtype=torch.float),
group_confusion.size())
divisor_context = torch.sparse.sum(index_, dim=3).to_dense()
group_confusion = torch.sparse.sum(group_confusion, dim=3).to_dense()
group_confusion = reduce_sparse_data(group_confusion, divisor_context)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = reduce_sparse_data(group_confusion.sum(dim=0),
divisor_speaker)
scores['within'] = (phone_confusion.sum() /
(divisor_speaker > 0).sum()).item()
print(f"...done. ABX within : {scores['within']}")
# ABX across
if 'across' in modes:
print("Computing ABX across speakers...")
ABXIterator = ABXDataset.get_iterator('across', max_size_group)
ABXIterator.max_x = max_x_across
group_confusion = abx_g.get_abx_scores_dtw_on_group(ABXIterator,
distance_function,
ABXIterator.symmetric)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(group_confusion._indices(),
torch.ones((n_data),
dtype=torch.float),
group_confusion.size())
divisor_context = torch.sparse.sum(index_, dim=[3, 4]).to_dense()
group_confusion = torch.sparse.sum(
group_confusion, dim=[3, 4]).to_dense()
group_confusion = reduce_sparse_data(group_confusion, divisor_context)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = reduce_sparse_data(group_confusion.sum(dim=0),
divisor_speaker)
scores['across'] = (phone_confusion.sum() /
(divisor_speaker > 0).sum()).item()
print(f"...done. ABX across : {scores['across']}")
return scores
def update_base_parser(parser):
parser.add_argument('--debug', action='store_true')
parser.add_argument('--feature_size', type=int, default=0.01,
help="Size (in s) of one feature")
parser.add_argument('--cuda', action='store_true',
help="Use the GPU to compute distances")
parser.add_argument('--mode', type=str, default='all',
choices=['all', 'within', 'across'],
help="Type of ABX score to compute")
parser.add_argument("--max_size_group", type=int, default=10,
help="Max size of a group while computing the"
"ABX score")
parser.add_argument("--max_x_across", type=int, default=5,
help="When computing the ABX across score, maximum"
"number of speaker X to sample per couple A,B")
parser.add_argument("--out", type=str, default=None,
help="Path where the results should be saved")
def parse_args(argv):
base_parser = argparse.ArgumentParser(description='ABX metric')
subparsers = base_parser.add_subparsers(dest='load')
parser_checkpoint = subparsers.add_parser('from_checkpoint')
update_base_parser(parser_checkpoint)
parser_checkpoint.add_argument('path_checkpoint', type=str,
help="Path to the model's checkpoint")
parser_checkpoint.add_argument('path_item_file', type=str,
help="Path to the ABX .item file containing "
"the triplets labels")
parser_checkpoint.add_argument('path_dataset', type=str,
help="Path to the dataset")
parser_checkpoint.add_argument('--seq_norm', action='store_true',
help='If activated, normalize each batch '
'of feature across the time channel before '
'computing ABX.')
parser_checkpoint.add_argument('--max_size_seq', default=64000, type=int,
help='Maximal number of frames to consider '
'when computing a batch of features.')
parser_checkpoint.add_argument('--strict', action='store_true',
help='If activated, each batch of feature '
'will contain exactly max_size_seq frames.')
parser_checkpoint.add_argument('--file_extension', type=str,
default='.wav',
help='Extension of ecah audio file in the '
'dataset.')
parser_checkpoint.add_argument('--get_encoded', action='store_true',
help='If activated, compute the ABX score '
'using the output of the encoder network.')
parser_db = subparsers.add_parser('from_pre_computed')
update_base_parser(parser_db)
parser_db.add_argument('path_features', type=str,
help="Path to pre-computed torch features (.pt)")
parser_db.add_argument('--file_extension', type=str,
default='.pt', help='Extension of each feature '
'in the dataset')
# multi-gpu / multi-node
return base_parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
if args.load == 'from_checkpoint':
# Checkpoint
model = loadModel([args.path_checkpoint])[0]
model.gAR.keepHidden = True
# Feature maker
feature_maker = FeatureModule(model, args.get_encoded).cuda().eval()
def feature_function(x): return buildFeature(feature_maker, x,
seqNorm=args.seq_norm,
strict=args.strict,
maxSizeSeq=args.max_size_seq)
elif args.load == 'from_pre_computed':
def feature_function(x): return torch.load(x, 'cpu')
# Modes
if args.mode == 'all':
modes = ["within", "across"]
else:
modes = [args.mode]
distance_mode = 'cosine'
step_feature = 1 / args.feature_size
# Get the list of sequences
seq_list, _ = findAllSeqs(args.path_dataset, extension=args.file_extension)
seq_list = [(str(Path(x).stem), str(Path(args.path_dataset) / x))
for (_, x) in seq_list]
if args.debug:
seq_list = seq_list[:1000]
scores = ABX(feature_function, args.path_item_file,
seq_list, distance_mode,
step_feature, modes,
cuda=args.cuda,
seq_norm=args.seq_norm,
max_x_across=args.max_x_across,
max_size_group=args.max_size_group)
out_dir = Path(args.path_checkpoint).parent if args.out is None \
else Path(args.out)
out_dir.mkdir(exist_ok=True)
path_score = out_dir / 'ABX_scores.json'
with open(path_score, 'w') as file:
json.dump(scores, file, indent=2)
path_args = out_dir / 'ABX_args.json'
with open(path_args, 'w') as file:
json.dump(vars(args), file, indent=2)
if __name__ == "__main__":
args = sys.argv[1:]
main(args)
| CPC_audio-main | cpc/eval/ABX.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torchaudio
from copy import deepcopy
import torch
import time
import random
import math
import json
import subprocess
import sys
import progressbar
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
from cpc.criterion.seq_alignment import get_seq_PER
from cpc.criterion.seq_alignment import beam_search
from cpc.feature_loader import loadModel
from cpc.dataset import findAllSeqs, parseSeqLabels, filterSeqs
def load(path_item):
seq_name = path_item.stem
data = torchaudio.load(str(path_item))[0].view(1, -1)
return seq_name, data
class SingleSequenceDataset(Dataset):
def __init__(self,
pathDB,
seqNames,
phoneLabelsDict,
inDim=1,
transpose=True):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabels (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
"""
self.seqNames = deepcopy(seqNames)
self.pathDB = pathDB
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.inDim = inDim
self.transpose = transpose
self.loadSeqs()
def loadSeqs(self):
# Labels
self.seqOffset = [0]
self.phoneLabels = []
self.phoneOffsets = [0]
self.data = []
self.maxSize = 0
self.maxSizePhone = 0
# Data
nprocess = min(30, len(self.seqNames))
start_time = time.time()
to_load = [Path(self.pathDB) / x for _, x in self.seqNames]
with Pool(nprocess) as p:
poolData = p.map(load, to_load)
tmpData = []
poolData.sort()
totSize = 0
minSizePhone = float('inf')
for seqName, seq in poolData:
self.phoneLabels += self.phoneLabelsDict[seqName]
self.phoneOffsets.append(len(self.phoneLabels))
self.maxSizePhone = max(self.maxSizePhone, len(
self.phoneLabelsDict[seqName]))
minSizePhone = min(minSizePhone, len(
self.phoneLabelsDict[seqName]))
sizeSeq = seq.size(1)
self.maxSize = max(self.maxSize, sizeSeq)
totSize += sizeSeq
tmpData.append(seq)
self.seqOffset.append(self.seqOffset[-1] + sizeSeq)
del seq
self.data = torch.cat(tmpData, dim=1)
self.phoneLabels = torch.tensor(self.phoneLabels, dtype=torch.long)
print(f'Loaded {len(self.phoneOffsets)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f'maxSizeSeq : {self.maxSize}')
print(f'maxSizePhone : {self.maxSizePhone}')
print(f"minSizePhone : {minSizePhone}")
print(f'Total size dataset {totSize / (16000 * 3600)} hours')
def __getitem__(self, idx):
offsetStart = self.seqOffset[idx]
offsetEnd = self.seqOffset[idx+1]
offsetPhoneStart = self.phoneOffsets[idx]
offsetPhoneEnd = self.phoneOffsets[idx + 1]
sizeSeq = int(offsetEnd - offsetStart)
sizePhone = int(offsetPhoneEnd - offsetPhoneStart)
outSeq = torch.zeros((self.inDim, self.maxSize))
outPhone = torch.zeros((self.maxSizePhone))
outSeq[:, :sizeSeq] = self.data[:, offsetStart:offsetEnd]
outPhone[:sizePhone] = self.phoneLabels[offsetPhoneStart:offsetPhoneEnd]
return outSeq, torch.tensor([sizeSeq], dtype=torch.long), outPhone.long(), torch.tensor([sizePhone], dtype=torch.long)
def __len__(self):
return len(self.seqOffset) - 1
class CTCphone_criterion(torch.nn.Module):
def __init__(self, dimEncoder, nPhones, LSTM=False, sizeKernel=8,
seqNorm=False, dropout=False, reduction='sum'):
super(CTCphone_criterion, self).__init__()
self.seqNorm = seqNorm
self.epsilon = 1e-8
self.dropout = torch.nn.Dropout2d(
p=0.5, inplace=False) if dropout else None
self.conv1 = torch.nn.LSTM(dimEncoder, dimEncoder,
num_layers=1, batch_first=True)
self.PhoneCriterionClassifier = torch.nn.Conv1d(
dimEncoder, nPhones + 1, sizeKernel, stride=sizeKernel // 2)
self.lossCriterion = torch.nn.CTCLoss(blank=nPhones,
reduction=reduction,
zero_infinity=True)
self.relu = torch.nn.ReLU()
self.BLANK_LABEL = nPhones
self.useLSTM = LSTM
def getPrediction(self, cFeature, featureSize):
B, S, H = cFeature.size()
if self.seqNorm:
for b in range(B):
size = featureSize[b]
m = cFeature[b, :size].mean(dim=0, keepdim=True)
v = cFeature[b, :size].var(dim=0, keepdim=True)
cFeature[b] = (cFeature[b] - m) / torch.sqrt(v + self.epsilon)
if self.useLSTM:
cFeature = self.conv1(cFeature)[0]
cFeature = cFeature.permute(0, 2, 1)
if self.dropout is not None:
cFeature = self.dropout(cFeature)
cFeature = self.PhoneCriterionClassifier(cFeature)
return cFeature.permute(0, 2, 1)
def forward(self, cFeature, featureSize, label, labelSize):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature, featureSize)
featureSize /= 4
predictions = cut_data(predictions, featureSize)
featureSize = torch.clamp(featureSize, max=predictions.size(1))
label = cut_data(label, labelSize)
if labelSize.min() <= 0:
print(label, labelSize)
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
loss = self.lossCriterion(predictions, label,
featureSize, labelSize).view(1, -1)
if torch.isinf(loss).sum() > 0 or torch.isnan(loss).sum() > 0:
loss = 0
return loss
class IDModule(torch.nn.Module):
def __init__(self):
super(IDModule, self).__init__()
def forward(self, feature, *args):
B, C, S = feature.size()
return feature.permute(0, 2, 1), None, None
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def train_step(train_loader,
model,
criterion,
optimizer,
downsampling_factor):
if model.optimize:
model.train()
criterion.train()
avg_loss = 0
nItems = 0
for data in train_loader:
optimizer.zero_grad()
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
if not model.optimize:
c_feature = c_feature.detach()
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
loss.mean().backward()
avg_loss += loss.mean().item()
nItems += 1
optimizer.step()
return avg_loss / nItems
def val_step(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avg_loss = 0
nItems = 0
for data in val_loader:
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
avg_loss += loss.mean().item()
nItems += 1
return avg_loss / nItems
def get_per(data):
pred, size_pred, gt, size_gt, blank_label = data
l_ = min(size_pred // 4, pred.size(0))
p_ = pred[:l_].view(l_, -1).numpy()
gt_seq = gt[:size_gt].view(-1).tolist()
predSeq = beam_search(p_, 20, blank_label)[0][1]
out = get_seq_PER(gt_seq, predSeq)
return out
def perStep(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(val_loader))
bar.start()
for index, data in enumerate(val_loader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
criterion.module.getPrediction(c_feature, sizeSeq), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
criterion.module.BLANK_LABEL) for b in range(bs)]
with Pool(bs) as p:
poolData = p.map(get_per, data_per)
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
def run(train_loader,
val_loader,
model,
criterion,
optimizer,
downsampling_factor,
nEpochs,
pathCheckpoint):
print(f"Starting the training for {nEpochs} epochs")
bestLoss = float('inf')
for epoch in range(nEpochs):
lossTrain = train_step(train_loader, model, criterion,
optimizer, downsampling_factor)
print(f"Epoch {epoch} loss train : {lossTrain}")
lossVal = val_step(val_loader, model, criterion, downsampling_factor)
print(f"Epoch {epoch} loss val : {lossVal}")
if lossVal < bestLoss:
bestLoss = lossVal
state_dict = {'classifier': criterion.state_dict(),
'model': model.state_dict(),
'bestLoss': bestLoss}
torch.save(state_dict, pathCheckpoint)
def get_PER_args(args):
path_args_training = os.path.join(args.output, "args_training.json")
with open(path_args_training, 'rb') as file:
data = json.load(file)
if args.pathDB is None:
args.pathDB = data["pathDB"]
args.file_extension = data["file_extension"]
if args.pathVal is None and args.pathPhone is None:
args.pathPhone = data["pathPhone"]
args.pathVal = data["pathVal"]
args.pathCheckpoint = data["pathCheckpoint"]
args.no_pretraining = data["no_pretraining"]
args.LSTM = data.get("LSTM", False)
args.seqNorm = data.get("seqNorm", False)
args.dropout = data.get("dropout", False)
args.in_dim = data.get("in_dim", 1)
args.loss_reduction = data.get("loss_reduction", "mean")
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Simple phone recognition pipeline '
'for the common voices datasets')
subparsers = parser.add_subparsers(dest='command')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('pathDB', type=str,
help='Path to the directory containing the '
'audio data / pre-computed features.')
parser_train.add_argument('pathPhone', type=str,
help='Path to the .txt file containing the '
'phone transcription.')
parser_train.add_argument('pathCheckpoint', type=str,
help='Path to the CPC checkpoint to load. '
'Set to ID to work with pre-cimputed features.')
parser_train.add_argument('--freeze', action='store_true',
help="Freeze the CPC features layers")
parser_train.add_argument('--pathTrain', default=None, type=str,
help='Path to the .txt files containing the '
'list of the training sequences.')
parser_train.add_argument('--pathVal', default=None, type=str,
help='Path to the .txt files containing the '
'list of the validation sequences.')
parser_train.add_argument('--file_extension', type=str, default=".mp3",
help='Extension of the files in the '
'dataset')
parser_train.add_argument('--batchSize', type=int, default=8)
parser_train.add_argument('--nEpochs', type=int, default=30)
parser_train.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser_train.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser_train.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer.')
parser_train.add_argument('--lr', type=float, default=2e-04,
help='Learning rate.')
parser_train.add_argument('-o', '--output', type=str, default='out',
help="Output directory")
parser_train.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_train.add_argument('--no_pretraining', action='store_true',
help='Activate use a randmly initialized '
'network')
parser_train.add_argument('--LSTM', action='store_true',
help='Activate to add a LSTM to the phone '
'classifier')
parser_train.add_argument('--seqNorm', action='store_true',
help='Activate if you want to normalize each '
'batch of features through time before the '
'phone classification.')
parser_train.add_argument('--kernelSize', type=int, default=8,
help='Number of features to concatenate before '
'feeding them to the phone classifier.')
parser_train.add_argument('--dropout', action='store_true')
parser_train.add_argument('--in_dim', type=int, default=1,
help='Dimension of the input data: useful when '
'working with pre-computed features or '
'stereo audio.')
parser_train.add_argument('--loss_reduction', type=str, default='mean',
choices=['mean', 'sum'])
parser_per = subparsers.add_parser('per')
parser_per.add_argument('output', type=str)
parser_per.add_argument('--batchSize', type=int, default=8)
parser_per.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_per.add_argument('--pathDB',
help="For computing the PER on another dataset",
type=str, default=None)
parser_per.add_argument('--pathVal',
help="For computing the PER on specific sequences",
type=str, default=None)
parser_per.add_argument('--pathPhone',
help="For computing the PER on specific sequences",
default=None, type=str)
parser_per.add_argument('--file_extension', type=str, default=".mp3")
parser_per.add_argument('--name', type=str, default="0")
args = parser.parse_args()
if args.command == 'per':
args = get_PER_args(args)
# Output Directory
if not os.path.isdir(args.output):
os.mkdir(args.output)
name = f"_{args.name}" if args.command == "per" else ""
pathLogs = os.path.join(args.output, f'logs_{args.command}{name}.txt')
tee = subprocess.Popen(["tee", pathLogs], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
inSeqs, _ = findAllSeqs(args.pathDB,
extension=args.file_extension)
# Datasets
if args.command == 'train' and args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, inSeqs)
else:
seqTrain = inSeqs
if args.pathVal is None and args.command == 'train':
random.shuffle(seqTrain)
sizeTrain = int(0.9 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
elif args.pathVal is not None:
seqVal = filterSeqs(args.pathVal, inSeqs)
else:
raise RuntimeError("No validation dataset found for PER computation")
if args.debug:
seqVal = seqVal[:100]
downsampling_factor = 160
if args.pathCheckpoint == 'ID':
downsampling_factor = 1
feature_maker = IDModule()
hiddenGar = args.in_dim
else:
feature_maker, hiddenGar, _ = loadModel([args.pathCheckpoint],
loadStateDict=not args.no_pretraining)
feature_maker.cuda()
feature_maker = torch.nn.DataParallel(feature_maker)
phone_criterion = CTCphone_criterion(hiddenGar, nPhones, args.LSTM,
seqNorm=args.seqNorm,
dropout=args.dropout,
reduction=args.loss_reduction)
phone_criterion.cuda()
phone_criterion = torch.nn.DataParallel(phone_criterion)
print(f"Loading the validation dataset at {args.pathDB}")
datasetVal = SingleSequenceDataset(args.pathDB, seqVal,
phoneLabels, inDim=args.in_dim)
val_loader = DataLoader(datasetVal, batch_size=args.batchSize,
shuffle=True)
# Checkpoint file where the model should be saved
pathCheckpoint = os.path.join(args.output, 'checkpoint.pt')
if args.command == 'train':
feature_maker.optimize = True
if args.freeze:
feature_maker.eval()
feature_maker.optimize = False
for g in feature_maker.parameters():
g.requires_grad = False
if args.debug:
print("debug")
random.shuffle(seqTrain)
seqTrain = seqTrain[:1000]
seqVal = seqVal[:100]
print(f"Loading the training dataset at {args.pathDB}")
datasetTrain = SingleSequenceDataset(args.pathDB, seqTrain,
phoneLabels, inDim=args.in_dim)
train_loader = DataLoader(datasetTrain, batch_size=args.batchSize,
shuffle=True)
# Optimizer
g_params = list(phone_criterion.parameters())
if not args.freeze:
print("Optimizing model")
g_params += list(feature_maker.parameters())
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
pathArgs = os.path.join(args.output, "args_training.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
run(train_loader, val_loader, feature_maker, phone_criterion,
optimizer, downsampling_factor, args.nEpochs, pathCheckpoint)
else:
print(f"Loading data at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint,
map_location=lambda storage, loc: storage)
if 'bestLoss' in state_dict:
print(f"Best loss : {state_dict['bestLoss']}")
phone_criterion.load_state_dict(state_dict['classifier'])
feature_maker.load_state_dict(state_dict['model'])
pathArgs = os.path.join(args.output,
f"args_validation_{args.name}.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
perStep(val_loader,
feature_maker,
phone_criterion,
downsampling_factor)
| CPC_audio-main | cpc/eval/common_voices_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import torch
import progressbar
import argparse
import numpy as np
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, \
ModelPhoneCombined, loadSupervisedCriterion, loadModel
def getArgs(pathCheckpoints):
pathArgs = os.path.join(os.path.dirname(pathCheckpoints),
"checkpoint_args.json")
with open(pathArgs, 'rb') as file:
return json.load(file)
def buildAllFeature(featureMaker, pathDB, pathOut,
seqList, stepSize=0.01, strict=False,
maxSizeSeq=64000, format='fea',
seqNorm=False):
totSeqs = len(seqList)
startStep = stepSize / 2
bar = progressbar.ProgressBar(maxval=totSeqs)
bar.start()
for nseq, seqPath in enumerate(seqList):
bar.update(nseq)
feature = buildFeature(featureMaker,
os.path.join(pathDB, seqPath),
strict=strict or seqNorm,
maxSizeSeq=maxSizeSeq,
seqNorm=seqNorm)
_, nSteps, hiddenSize = feature.size()
outName = os.path.basename(os.path.splitext(seqPath)[0]) + f'.{format}'
fname = os.path.join(pathOut, outName)
if format == 'npz':
time = [startStep + step * stepSize for step in range(nSteps)]
values = feature.squeeze(0).float().cpu().numpy()
totTime = np.array([stepSize * nSteps], dtype=np.float32)
with open(fname, 'wb') as f:
np.savez(f, time=time, features=values, totTime=totTime)
elif format == 'npy':
time = [startStep + step * stepSize for step in range(nSteps)]
values = feature.squeeze(0).float().cpu().numpy()
with open(fname, 'wb') as f:
np.save(f, values)
elif format == 'af':
import arrayfire as af
time = [startStep + step * stepSize for step in range(nSteps)]
values = feature.squeeze(0).float().cpu().numpy()
totTime = np.array([stepSize * nSteps], dtype=np.float32)
af.save_array("time", af.Array(time, dtype=af.Dtype.f32), fname)
af.save_array("totTime", af.interop.from_ndarray(totTime),
fname, append=True)
af.save_array("features", af.interop.from_ndarray(values),
fname, append=True)
else:
with open(fname, 'w') as f:
_, nSteps, hiddenSize = feature.size()
for step in range(nSteps):
line = [startStep + step * stepSize] + \
feature[0, step, :].tolist()
line = [str(x) for x in line]
linestr = ' '.join(line) + '\n'
f.write(linestr)
bar.finish()
if __name__ == "__main__":
parser = argparse.ArgumentParser('Build features for zerospeech \
Track1 evaluation')
parser.add_argument('pathDB', help='Path to the reference dataset')
parser.add_argument('pathOut', help='Path to the output features')
parser.add_argument('pathCheckpoint', help='Checkpoint to load')
parser.add_argument('--extension', type=str, default='.wav')
parser.add_argument('--addCriterion', action='store_true')
parser.add_argument('--oneHot', action='store_true')
parser.add_argument('--maxSizeSeq', default=64000, type=int)
parser.add_argument('--train_mode', action='store_true')
parser.add_argument('--format', default='fea', type=str,
choices=['npz', 'fea', 'npy', 'af'])
parser.add_argument('--strict', action='store_true')
parser.add_argument('--dimReduction', type=str, default=None)
parser.add_argument('--centroidLimits', type=int, nargs=2, default=None)
parser.add_argument('--getEncoded', action='store_true')
parser.add_argument('--clusters', type=str, default=None)
parser.add_argument('--seqNorm', action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.pathOut):
os.mkdir(args.pathOut)
with open(os.path.join(os.path.dirname(args.pathOut),
f"{os.path.basename(args.pathOut)}.json"), 'w') \
as file:
json.dump(vars(args), file, indent=2)
outData = [x[1] for x in
findAllSeqs(args.pathDB, extension=args.extension,
loadCache=False)[0]]
featureMaker = loadModel([args.pathCheckpoint])[0]
stepSize = featureMaker.gEncoder.DOWNSAMPLING / 16000
print(f"stepSize : {stepSize}")
featureMaker = FeatureModule(featureMaker, args.getEncoded)
featureMaker.collapse = False
if args.addCriterion:
criterion, nPhones = loadSupervisedCriterion(args.pathCheckpoint)
featureMaker = ModelPhoneCombined(featureMaker, criterion,
nPhones, args.oneHot)
featureMaker = featureMaker.cuda(device=0)
if not args.train_mode:
featureMaker.eval()
buildAllFeature(featureMaker, args.pathDB, args.pathOut, outData,
stepSize=stepSize, strict=args.strict,
maxSizeSeq=args.maxSizeSeq,
format=args.format,
seqNorm=args.seqNorm)
| CPC_audio-main | cpc/eval/build_zeroSpeech_features.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import torch
import json
import time
import numpy as np
from pathlib import Path
from copy import deepcopy
import os
import cpc.criterion as cr
import cpc.feature_loader as fl
import cpc.utils.misc as utils
from cpc.dataset import AudioBatchData, findAllSeqs, filterSeqs, parseSeqLabels
def train_step(feature_maker, criterion, data_loader, optimizer):
if feature_maker.optimize:
feature_maker.train()
criterion.train()
logs = {"locLoss_train": 0, "locAcc_train": 0}
for step, fulldata in enumerate(data_loader):
optimizer.zero_grad()
batch_data, label = fulldata
c_feature, encoded_data, _ = feature_maker(batch_data, None)
if not feature_maker.optimize:
c_feature, encoded_data = c_feature.detach(), encoded_data.detach()
all_losses, all_acc = criterion(c_feature, encoded_data, label)
totLoss = all_losses.sum()
totLoss.backward()
optimizer.step()
logs["locLoss_train"] += np.asarray([all_losses.mean().item()])
logs["locAcc_train"] += np.asarray([all_acc.mean().item()])
logs = utils.update_logs(logs, step)
logs["iter"] = step
return logs
def val_step(feature_maker, criterion, data_loader):
feature_maker.eval()
criterion.eval()
logs = {"locLoss_val": 0, "locAcc_val": 0}
for step, fulldata in enumerate(data_loader):
with torch.no_grad():
batch_data, label = fulldata
c_feature, encoded_data, _ = feature_maker(batch_data, None)
all_losses, all_acc = criterion(c_feature, encoded_data, label)
logs["locLoss_val"] += np.asarray([all_losses.mean().item()])
logs["locAcc_val"] += np.asarray([all_acc.mean().item()])
logs = utils.update_logs(logs, step)
return logs
def run(feature_maker,
criterion,
train_loader,
val_loader,
optimizer,
logs,
n_epochs,
path_checkpoint):
start_epoch = len(logs["epoch"])
best_acc = -1
start_time = time.time()
for epoch in range(start_epoch, n_epochs):
logs_train = train_step(feature_maker, criterion, train_loader,
optimizer)
logs_val = val_step(feature_maker, criterion, val_loader)
print('')
print('_'*50)
print(f'Ran {epoch + 1} epochs '
f'in {time.time() - start_time:.2f} seconds')
utils.show_logs("Training loss", logs_train)
utils.show_logs("Validation loss", logs_val)
print('_'*50)
print('')
if logs_val["locAcc_val"] > best_acc:
best_state = deepcopy(fl.get_module(feature_maker).state_dict())
best_acc = logs_val["locAcc_val"]
logs["epoch"].append(epoch)
for key, value in dict(logs_train, **logs_val).items():
if key not in logs:
logs[key] = [None for x in range(epoch)]
if isinstance(value, np.ndarray):
value = value.tolist()
logs[key].append(value)
if (epoch % logs["saveStep"] == 0 and epoch > 0) or epoch == n_epochs - 1:
model_state_dict = fl.get_module(feature_maker).state_dict()
criterion_state_dict = fl.get_module(criterion).state_dict()
fl.save_checkpoint(model_state_dict, criterion_state_dict,
optimizer.state_dict(), best_state,
f"{path_checkpoint}_{epoch}.pt")
utils.save_logs(logs, f"{path_checkpoint}_logs.json")
def parse_args(argv):
parser = argparse.ArgumentParser(description='Linear separability trainer'
' (default test in speaker separability)')
parser.add_argument('pathDB', type=str,
help="Path to the directory containing the audio data.")
parser.add_argument('pathTrain', type=str,
help="Path to the list of the training sequences.")
parser.add_argument('pathVal', type=str,
help="Path to the list of the test sequences.")
parser.add_argument('load', type=str, nargs='*',
help="Path to the checkpoint to evaluate.")
parser.add_argument('--pathPhone', type=str, default=None,
help="Path to the phone labels. If given, will"
" compute the phone separability.")
parser.add_argument('--CTC', action='store_true',
help="Use the CTC loss (for phone separability only)")
parser.add_argument('--pathCheckpoint', type=str, default='out',
help="Path of the output directory where the "
" checkpoints should be dumped.")
parser.add_argument('--nGPU', type=int, default=-1,
help='Bumber of GPU. Default=-1, use all available '
'GPUs')
parser.add_argument('--batchSizeGPU', type=int, default=8,
help='Batch size per GPU.')
parser.add_argument('--n_epoch', type=int, default=10)
parser.add_argument('--debug', action='store_true',
help='If activated, will load only a small number '
'of audio data.')
parser.add_argument('--unfrozen', action='store_true',
help="If activated, update the feature network as well"
" as the linear classifier")
parser.add_argument('--no_pretraining', action='store_true',
help="If activated, work from an untrained model.")
parser.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in pathDB.")
parser.add_argument('--save_step', type=int, default=-1,
help="Frequency at which a checkpoint should be saved,"
" et to -1 (default) to save only the best checkpoint.")
parser.add_argument('--get_encoded', action='store_true',
help="If activated, will work with the output of the "
" convolutional encoder (see CPC's architecture).")
parser.add_argument('--lr', type=float, default=2e-4,
help='Learning rate.')
parser.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser.add_argument('--epsilon', type=float, default=2e-8,
help='Value of epsilon for the Adam optimizer.')
parser.add_argument('--ignore_cache', action='store_true',
help="Activate if the sequences in pathDB have"
" changed.")
parser.add_argument('--size_window', type=int, default=20480,
help="Number of frames to consider in each batch.")
args = parser.parse_args(argv)
if args.nGPU < 0:
args.nGPU = torch.cuda.device_count()
if args.save_step <= 0:
args.save_step = args.n_epoch
args.load = [str(Path(x).resolve()) for x in args.load]
args.pathCheckpoint = str(Path(args.pathCheckpoint).resolve())
return args
def main(argv):
args = parse_args(argv)
logs = {"epoch": [], "iter": [], "saveStep": args.save_step}
load_criterion = False
seqNames, speakers = findAllSeqs(args.pathDB,
extension=args.file_extension,
loadCache=not args.ignore_cache)
model, hidden_gar, hidden_encoder = fl.loadModel(args.load,
loadStateDict=not args.no_pretraining)
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(args.nGPU))
dim_features = hidden_encoder if args.get_encoded else hidden_gar
# Now the criterion
phone_labels = None
if args.pathPhone is not None:
phone_labels, n_phones = parseSeqLabels(args.pathPhone)
if not args.CTC:
print(f"Running phone separability with aligned phones")
criterion = cr.PhoneCriterion(dim_features,
n_phones, args.get_encoded)
else:
print(f"Running phone separability with CTC loss")
criterion = cr.CTCPhoneCriterion(dim_features,
n_phones, args.get_encoded)
else:
print(f"Running speaker separability")
criterion = cr.SpeakerCriterion(dim_features, len(speakers))
criterion.cuda()
criterion = torch.nn.DataParallel(criterion, device_ids=range(args.nGPU))
# Dataset
seq_train = filterSeqs(args.pathTrain, seqNames)
seq_val = filterSeqs(args.pathVal, seqNames)
if args.debug:
seq_train = seq_train[:1000]
seq_val = seq_val[:100]
db_train = AudioBatchData(args.pathDB, args.size_window, seq_train,
phone_labels, len(speakers))
db_val = AudioBatchData(args.pathDB, args.size_window, seq_val,
phone_labels, len(speakers))
batch_size = args.batchSizeGPU * args.nGPU
train_loader = db_train.getDataLoader(batch_size, "uniform", True,
numWorkers=0)
val_loader = db_val.getDataLoader(batch_size, 'sequential', False,
numWorkers=0)
# Optimizer
g_params = list(criterion.parameters())
model.optimize = False
model.eval()
if args.unfrozen:
print("Working in full fine-tune mode")
g_params += list(model.parameters())
model.optimize = True
else:
print("Working with frozen features")
for g in model.parameters():
g.requires_grad = False
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
# Checkpoint directory
args.pathCheckpoint = Path(args.pathCheckpoint)
args.pathCheckpoint.mkdir(exist_ok=True)
args.pathCheckpoint = str(args.pathCheckpoint / "checkpoint")
with open(f"{args.pathCheckpoint}_args.json", 'w') as file:
json.dump(vars(args), file, indent=2)
run(model, criterion, train_loader, val_loader, optimizer, logs,
args.n_epoch, args.pathCheckpoint)
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
args = sys.argv[1:]
main(args)
| CPC_audio-main | cpc/eval/linear_separability.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/eval/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import progressbar
import math
import random
def normalize_with_singularity(x):
r"""
Normalize the given vector across the third dimension.
Extend all vectors by eps=1e-12 to put the null vector at the maximal
cosine distance from any non-null vector.
"""
N, S, H = x.size()
norm_x = (x**2).sum(dim=2, keepdim=True)
x /= torch.sqrt(norm_x)
zero_vals = (norm_x == 0).view(N, S)
x[zero_vals] = 1 / math.sqrt(H)
border_vect = torch.zeros((N, S, 1),
dtype=x.dtype,
device=x.device) + 1e-12
border_vect[zero_vals] = -2*1e12
return torch.cat([x, border_vect], dim=2)
def load_item_file(path_item_file):
r""" Load a .item file indicating the triplets for the ABX score. The
input file must have the following fomat:
line 0 : whatever (not read)
line > 0: #file_ID onset offset #phone prev-phone next-phone speaker
onset : begining of the triplet (in s)
onset : end of the triplet (in s)
"""
with open(path_item_file, 'r') as file:
data = file.readlines()[1:]
data = [x.replace('\n', '') for x in data]
out = {}
phone_match = {}
speaker_match = {}
context_match = {}
for line in data:
items = line.split()
assert(len(items) == 7)
fileID = items[0]
if fileID not in out:
out[fileID] = []
onset, offset = float(items[1]), float(items[2])
context = '+'.join([items[4], items[5]])
phone = items[3]
speaker = items[6]
if phone not in phone_match:
s = len(phone_match)
phone_match[phone] = s
phone_id = phone_match[phone]
if context not in context_match:
s = len(context_match)
context_match[context] = s
context_id = context_match[context]
if speaker not in speaker_match:
s = len(speaker_match)
speaker_match[speaker] = s
speaker_id = speaker_match[speaker]
out[fileID].append([onset, offset, context_id, phone_id, speaker_id])
return out, context_match, phone_match, speaker_match
def get_features_group(in_data, index_order):
in_index = list(range(len(in_data)))
in_index.sort(key=lambda x: [in_data[x][i] for i in index_order])
out_groups = []
last_values = [in_data[in_index[0]][i] for i in index_order]
i_s = 0
curr_group = [[] for i in index_order]
n_orders = len(index_order) - 1
tmp = [in_data[i] for i in in_index]
for index, item in enumerate(tmp):
for order_index, order in enumerate(index_order):
if item[order] != last_values[order_index]:
curr_group[-1].append((i_s, index))
for i in range(n_orders, order_index, -1):
curr_group[i-1].append(curr_group[i])
curr_group[i] = []
if order_index == 0:
out_groups += curr_group[0]
curr_group[0] = []
last_values = [item[i] for i in index_order]
i_s = index
break
if i_s < len(in_data):
curr_group[-1].append((i_s, len(in_data)))
for i in range(n_orders, 0, -1):
curr_group[i-1].append(curr_group[i])
out_groups += curr_group[0]
return in_index, out_groups
class ABXFeatureLoader:
def __init__(self,
path_item_file,
seqList,
featureMaker,
stepFeature,
normalize):
r"""
Args:
path_item_file (str): path to the .item files containing the ABX
triplets
seqList (list): list of items (fileID, path) where fileID refers to
the file's ID as used in path_item_file, and path
is the actual path to the input audio sequence
featureMaker (function): either a function or a callable object.
Takes a path as input and outputs the
feature sequence corresponding to the
given file.
normalize (bool): if True all input features will be noramlized
across the channels dimension.
Note:
You can use this dataset with pre-computed features. For example, if
you have a collection of features files in the torch .pt format then
you can just set featureMaker = torch.load.
"""
files_data, self.context_match, self.phone_match, self.speaker_match = \
load_item_file(path_item_file)
self.seqNorm = True
self.stepFeature = stepFeature
self.loadFromFileData(files_data, seqList, featureMaker, normalize)
def loadFromFileData(self, files_data, seqList, feature_maker, normalize):
# self.features[i]: index_start, size, context_id, phone_id, speaker_id
self.features = []
self.INDEX_CONTEXT = 2
self.INDEX_PHONE = 3
self.INDEX_SPEAKER = 4
data = []
totSize = 0
print("Building the input features...")
bar = progressbar.ProgressBar(maxval=len(seqList))
bar.start()
for index, vals in enumerate(seqList):
fileID, file_path = vals
bar.update(index)
if fileID not in files_data:
continue
features = feature_maker(file_path)
if normalize:
features = normalize_with_singularity(features)
features = features.detach().cpu()
features = features.view(features.size(1), features.size(2))
phone_data = files_data[fileID]
for phone_start, phone_end, context_id, phone_id, speaker_id in phone_data:
index_start = max(
0, int(math.ceil(self.stepFeature * phone_start - 0.5)))
index_end = min(features.size(0),
int(math.floor(self.stepFeature * phone_end - 0.5)))
if index_start >= features.size(0) or index_end <= index_start:
continue
loc_size = index_end - index_start
self.features.append([totSize, loc_size, context_id,
phone_id, speaker_id])
data.append(features[index_start:index_end])
totSize += loc_size
bar.finish()
print("...done")
self.data = torch.cat(data, dim=0)
self.feature_dim = self.data.size(1)
def get_data_device(self):
return self.data.device
def cuda(self):
self.data = self.data.cuda()
def cpu(self):
self.data = self.data.cpu()
def get_max_group_size(self, i_group, i_sub_group):
id_start, id_end = self.group_index[i_group][i_sub_group]
return max([self.features[i][1] for i in range(id_start, id_end)])
def get_ids(self, index):
context_id, phone_id, speaker_id = self.features[index][2:]
return context_id, phone_id, speaker_id
def __getitem__(self, index):
i_data, out_size, context_id, phone_id, speaker_id = self.features[index]
return self.data[i_data:(i_data + out_size)], out_size, (context_id, phone_id, speaker_id)
def __len__(self):
return len(self.features)
def get_n_speakers(self):
return len(self.speaker_match)
def get_n_context(self):
return len(self.context_match)
def get_n_phone(self):
return len(self.phone_match)
def get_n_groups(self):
return len(self.group_index)
def get_n_sub_group(self, index_sub_group):
return len(self.group_index[index_sub_group])
def get_iterator(self, mode, max_size_group):
if mode == 'within':
return ABXWithinGroupIterator(self, max_size_group)
if mode == 'across':
return ABXAcrossGroupIterator(self, max_size_group)
raise ValueError(f"Invalid mode: {mode}")
class ABXIterator:
r"""
Base class building ABX's triplets.
"""
def __init__(self, abxDataset, max_size_group):
self.max_size_group = max_size_group
self.dataset = abxDataset
self.len = 0
self.index_csp, self.groups_csp = \
get_features_group(abxDataset.features,
[abxDataset.INDEX_CONTEXT,
abxDataset.INDEX_SPEAKER,
abxDataset.INDEX_PHONE])
def get_group(self, i_start, i_end):
data = []
max_size = 0
to_take = list(range(i_start, i_end))
if i_end - i_start > self.max_size_group:
to_take = random.sample(to_take, k=self.max_size_group)
for i in to_take:
loc_data, loc_size, loc_id = self.dataset[self.index_csp[i]]
max_size = max(loc_size, max_size)
data.append(loc_data)
N = len(to_take)
out_data = torch.zeros(N, max_size,
self.dataset.feature_dim,
device=self.dataset.get_data_device())
out_size = torch.zeros(N, dtype=torch.long,
device=self.dataset.get_data_device())
for i in range(N):
size = data[i].size(0)
out_data[i, :size] = data[i]
out_size[i] = size
return out_data, out_size, loc_id
def __len__(self):
return self.len
def get_board_size(self):
r"""
Get the output dimension of the triplet's space.
"""
pass
class ABXWithinGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX within score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXWithinGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = True
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
if i_end - i_start > 1:
self.len += (len(speaker_group) - 1)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
if i_end_a - i_start_a == 1:
continue
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
data_b, size_b, id_b = self.get_group(i_start_b,
i_end_b)
data_a, size_a, id_a = self.get_group(i_start_a,
i_end_a)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0]
yield out_coords, (data_a, size_a), (data_b, size_b), \
(data_a, size_a)
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context())
class ABXAcrossGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX across score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXAcrossGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = False
self.get_speakers_from_cp = {}
self.max_x = 5
for context_group in self.groups_csp:
for speaker_group in context_group:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
if c_id not in self.get_speakers_from_cp:
self.get_speakers_from_cp[c_id] = {}
if p_id not in self.get_speakers_from_cp[c_id]:
self.get_speakers_from_cp[c_id][p_id] = {}
self.get_speakers_from_cp[c_id][p_id][s_id] = (
i_start, i_end)
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
self.len += (len(speaker_group) - 1) * (min(self.max_x,
len(self.get_speakers_from_cp[c_id][p_id]) - 1))
def get_other_speakers_in_group(self, i_start_group):
c_id, p_id, s_id = self.dataset.get_ids(self.index_csp[i_start_group])
return [v for k, v in self.get_speakers_from_cp[c_id][p_id].items() if k != s_id]
def get_abx_triplet(self, i_a, i_b, i_x):
i_start_a, i_end_a = i_a
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
i_start_b, i_end_b = i_b
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
i_start_x, i_end_x = i_x
data_x, size_x, id_x = self.get_group(i_start_x, i_end_x)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0], id_x[2]
return out_coords, (data_a, size_a), (data_b, size_b), \
(data_x, size_x)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
ref = self.get_other_speakers_in_group(i_start_a)
if len(ref) > self.max_x:
speakers_a = random.sample(ref, k=self.max_x)
else:
speakers_a = ref
for i_start_x, i_end_x in speakers_a:
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
yield self.get_abx_triplet((i_start_a, i_end_a), (i_start_b, i_end_b), (i_start_x, i_end_x))
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context(),
self.dataset.get_n_speakers())
| CPC_audio-main | cpc/eval/ABX/abx_iterators.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/eval/ABX/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from nose.tools import eq_, ok_
from . import abx_group_computation
from . import abx_iterators
from pathlib import Path
import numpy as np
import math
class TestDistancesDTW(unittest.TestCase):
def testDTWFunction(self):
X = torch.tensor([[[0, 1], [0, 0], [1, 1], [42, 42]],
[[0, 2], [0, 1], [1, 1], [-1, 0]],
[[0, 0], [0, 1], [0, 0], [21, 211]]],
dtype=torch.float)
X_size = torch.tensor([3, 4, 2])
Y = torch.tensor([[[0, 1], [1, 2], [0, 0]]], dtype=torch.float)
Y_size = torch.tensor([3])
distance_mode = abx_group_computation.get_euclidian_distance_batch
dist = abx_group_computation.get_distance_group_dtw(X, Y,
X_size, Y_size,
distance_function=distance_mode)
eq_(dist.size(), (3, 1))
expected_dist = [[(math.sqrt(2)) / 2], [3 / 4],
[(2 + math.sqrt(2)) / 3]]
for i in range(3):
ok_(abs(expected_dist[i][0] - dist[i].item()) < 1e-4)
def testThetaDTWFunctionSymetric(self):
A = torch.tensor([[[0, 1], [0, 0], [1, 1], [42, 42]],
[[0, 2], [0, 1], [1, 1], [-1, 0]],
[[0, 0], [0, 1], [0, 0], [21, 211]]],
dtype=torch.float)
A_size = torch.tensor([3, 4, 2])
B = torch.tensor([[[0, 1], [1, 2], [0, 0]]], dtype=torch.float)
B_size = torch.tensor([3])
distance_mode = abx_group_computation.get_euclidian_distance_batch
symetric = True
theta = abx_group_computation.get_theta_group_dtw(A, B, A, A_size,
B_size, A_size,
distance_mode,
symetric)
eq_(theta, 0.5)
class testSingularityNormalization(unittest.TestCase):
def testCosineNormalized(self):
x = torch.tensor([[[1., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., -1., 0.], [0.5, -0.5, 0.5, -0.5]]])
y = torch.tensor(
[[[-0.5, -0.5, -0.5, 0.5], [0., 0., 0., 0.], [0., 1., 0., 0.]]])
norm_x = abx_iterators.normalize_with_singularity(x)
norm_y = abx_iterators.normalize_with_singularity(y)
dist = abx_group_computation.get_cosine_distance_batch(norm_x, norm_y)
eq_(dist.size(), (2, 1, 2, 3))
ok_(abs(dist[0, 0, 0, 0] - 0.6667) < 1e-4)
ok_(abs(dist[0, 0, 0, 1] - 1.) < 1e-4)
ok_(abs(dist[0, 0, 0, 2] - 0.5) < 1e-4)
ok_(abs(dist[0, 0, 1, 0] - 1) < 1e-4)
ok_(abs(dist[0, 0, 1, 1]) < 1e-4)
ok_(abs(dist[0, 0, 1, 2] - 1) < 1e-4)
ok_(abs(dist[1, 0, 0, 0] - 0.3333) < 1e-4)
ok_(abs(dist[1, 0, 0, 1] - 1.) < 1e-4)
ok_(abs(dist[1, 0, 0, 2] - 0.5) < 1e-4)
ok_(abs(dist[1, 0, 1, 0]-0.6667) < 1e-4)
ok_(abs(dist[1, 0, 1, 1] - 1.) < 1e-4)
ok_(abs(dist[1, 0, 1, 2] - 0.6667) < 1e-4)
class testGroupMaker(unittest.TestCase):
def test1DGroupMaker(self):
data = [[0], [1], [2], [3], [4], [2], [2], [2]]
order = [0]
out_index, out_data = abx_iterators.get_features_group(data, order)
expected_index = [0, 1, 2, 5, 6, 7, 3, 4]
eq_(out_index, expected_index)
expected_output = [(0, 1), (1, 2), (2, 6), (6, 7), (7, 8)]
eq_(out_data, expected_output)
def test2DGroupMaker(self):
data = [[0, 1], [1, 2], [2, 3], [3, 3],
[4, 0], [2, 2], [4, 2], [2, 2], [0, 3]]
order = [1, 0]
out_index, out_data = abx_iterators.get_features_group(data, order)
expected_index = [4, 0, 1, 5, 7, 6, 8, 2, 3]
eq_(out_index, expected_index)
expected_output = [[(0, 1)],
[(1, 2)],
[(2, 3), (3, 5), (5, 6)],
[(6, 7), (7, 8), (8, 9)]]
eq_(out_data, expected_output)
def test3DGroupMaker(self):
data = [[0, 0, 0, 1],
[41, 1, 0, 2],
[-23, 0, 3, 1],
[220, 1, -2, 3],
[40, 2, 1, 0],
[200, 0, 0, 1]]
order = [1, 3, 2]
out_index, out_data = abx_iterators.get_features_group(data, order)
expected_index = [0, 5, 2, 1, 3, 4]
eq_(out_index, expected_index)
expected_output = [[[(0, 2), (2, 3)]], [
[(3, 4)], [(4, 5)]], [[(5, 6)]]]
eq_(out_data, expected_output)
class testItemLoader(unittest.TestCase):
def setUp(self):
self.test_data_dir = Path(__file__).parent / 'test_data'
def testLoadItemFile(self):
path_item_file = self.test_data_dir / "dummy_item_file.item"
out, context_match, phone_match, speaker_match = \
abx_iterators.load_item_file(path_item_file)
eq_(len(out), 4)
eq_(len(phone_match), 5)
eq_(len(speaker_match), 3)
expected_phones = {'n': 0, 'd': 1, 'ih': 2,
's': 3, 'dh': 4}
eq_(phone_match, expected_phones)
expected_speakers = {'8193': 0, '2222': 1, '12': 2}
eq_(speaker_match, expected_speakers)
expected_context = {'ae+d': 0, 'n+l': 1, 'l+n': 2, 'ih+s': 3,
'n+ax': 4, 'ax+dh': 5, 's+ax': 6}
eq_(context_match, expected_context)
expected_output = {'2107': [[0.3225, 0.5225, 0, 0, 0],
[0.4225, 0.5925, 1, 1, 1],
[1.1025, 1.2925, 6, 4, 2]],
'42': [[0.4525, 0.6525, 1, 1, 1],
[0.5225, 0.7325, 2, 2, 0],
[0.5925, 0.8725, 3, 0, 0]],
'23': [[0.6525, 1.1025, 4, 3, 0],
[0.7325, 1.1925, 4, 3, 1]],
'407': [[0.8725, 1.2425, 5, 3, 1]]}
eq_(expected_output, out)
def testLoadWithinItemFile(self):
path_item_file = self.test_data_dir / "dummy_item_within.item"
out, context_match, phone_match, speaker_match = \
abx_iterators.load_item_file(path_item_file)
expected_output = {'2107': [[0., 0.2, 0, 0, 0],
[0.3225, 0.5225, 1, 0, 0],
[0.6, 0.75, 1, 0, 0],
[0.4225, 0.5925, 2, 1, 1]],
'42': [[0.4525, 0.6525, 2, 1, 1],
[0.1301, 0.2501, 2, 2, 1],
[0.5225, 0.7325, 2, 1, 0],
[0.0025, 0.3561, 3, 1, 1],
[0.5925, 0.8725, 3, 1, 0]]}
eq_(expected_output, out)
class testABXFeatureLoader(unittest.TestCase):
def setUp(self):
self.stepFeature = 10
self.test_data_dir = Path(__file__).parent / 'test_data'
def dummy_feature_maker(path_file, *args):
data = torch.tensor(np.load(path_file))
assert(len(data.size()) == 1)
return data.view(1, -1, 1)
def testBaseLoader(self):
seqList = [('2107', self.test_data_dir / '2107.npy'),
('42', self.test_data_dir / '42.npy'),
('23', self.test_data_dir / '23.npy'),
('407', self.test_data_dir / '407.npy')]
dataset = abx_iterators.ABXFeatureLoader(self.test_data_dir / "dummy_item_file.item",
seqList,
testABXFeatureLoader.dummy_feature_maker,
self.stepFeature,
False)
print(dataset.features)
eq_(dataset.feature_dim, 1)
eq_(len(dataset), 9)
eq_(len(dataset.data.size()), 2)
eq_(len(dataset.data), 16)
data, size, coords = dataset[0]
eq_(size, 1)
eq_(coords, (0, 0, 0))
eq_(data.tolist(), [[3]])
data, size, coords = dataset[3]
eq_(size, 1)
eq_(coords, (1, 1, 1))
eq_(data.tolist(), [[5]])
def testWithinIterator(self):
seqList = [('2107', self.test_data_dir / '2107.npy'),
('42', self.test_data_dir / '42.npy')]
dataset = abx_iterators.ABXFeatureLoader(self.test_data_dir / "dummy_item_within.item",
seqList,
testABXFeatureLoader.dummy_feature_maker,
self.stepFeature,
False)
iterator = dataset.get_iterator('within', 40)
eq_(iterator.index_csp, [0, 1, 2, 6, 3, 4, 5, 8, 7])
eq_(iterator.groups_csp, [[[(0, 1)]], [[(1, 3)]], [
[(3, 4)], [(4, 6), (6, 7)]], [[(7, 8)], [(8, 9)]]])
eq_(len(iterator), 1)
it = iter(iterator)
c1, a_01, b_01, x_01 = next(it)
eq_(c1, (1, 1, 2, 2))
a_1, s_a = a_01
eq_(s_a.tolist(), [1, 1])
eq_(a_1.tolist(), [[[4.]], [[5.]]])
eq_(x_01[0].tolist(), a_1.tolist())
eq_(x_01[1].tolist(), s_a.tolist())
eq_(b_01[0].tolist(), [[[1.]]])
eq_(b_01[1].item(), 1)
eq_(next(it, False), False)
eq_(iterator.get_board_size(), (2, 3, 3, 4))
| CPC_audio-main | cpc/eval/ABX/unit_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
from . import dtw
import progressbar
def get_distance_function_from_name(name_str):
if name_str == 'euclidian':
return get_euclidian_distance_batch
if name_str == 'cosine':
return get_cosine_distance_batch
raise ValueError(f"Invalid distance mode")
def check_dtw_group_validity(a, b, x):
assert(len(a.size()) == len(b.size()))
assert(len(a.size()) == len(x.size()))
assert(a.size(2) == x.size(2))
assert(a.size(2) == b.size(2))
def get_cosine_distance_batch(a1, a2, epsilon=1e-8):
r""" a1 and a2 must be normalized"""
N1, S1, D = a1.size() # Batch x Seq x Channel
N2, S2, D = a2.size() # Batch x Seq x Channel
prod = (a1.view(N1, 1, S1, 1, D)) * (a2.view(1, N2, 1, S2, D))
# Sum accross the channel dimension
prod = torch.clamp(prod.sum(dim=4), -1, 1).acos() / math.pi
return prod
def get_euclidian_distance_batch(a1, a2):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
diff = a1.view(N1, 1, S1, 1, D) - a2.view(1, N2, 1, S2, D)
return torch.sqrt((diff**2).sum(dim=4))
def get_distance_group_dtw(a1, a2, size1, size2,
ignore_diag=False, symmetric=False,
distance_function=get_cosine_distance_batch):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
if size1.size(0) != N1:
print(a1.size(), size1.size())
print(a2.size(), size2.size())
assert(size1.size(0) == N1)
assert(size2.size(0) == N2)
distance_mat = distance_function(a1, a2).detach().cpu().numpy()
return dtw.dtw_batch(a1, a2, size1, size2,
distance_mat,
ignore_diag, symmetric)
def get_theta_group_dtw(a, b, x, sa, sb, sx, distance_function, symmetric):
check_dtw_group_validity(a, b, x)
dxb = get_distance_group_dtw(
x, b, sx, sb, distance_function=distance_function)
dxa = get_distance_group_dtw(x, a, sx, sa, ignore_diag=symmetric,
symmetric=symmetric,
distance_function=distance_function)
Nx, Na = dxa.size()
Nx, Nb = dxb.size()
if symmetric:
n_pos = Na * (Na - 1)
max_val = dxb.max().item()
for i in range(Na):
dxa[i, i] = max_val + 1
else:
n_pos = Na * Nx
dxb = dxb.view(Nx, 1, Nb).expand(Nx, Na, Nb)
dxa = dxa.view(Nx, Na, 1).expand(Nx, Na, Nb)
sc = (dxa < dxb).sum() + 0.5 * (dxa == dxb).sum()
sc /= (n_pos * Nb)
return sc.item()
def loc_dtw(data, distance_function, symmetric):
coords, group_a, group_b, group_x = data
group_a_data, group_a_size = group_a
group_b_data, group_b_size = group_b
group_x_data, group_x_size = group_x
theta = get_theta_group_dtw(group_a_data,
group_b_data,
group_x_data,
group_a_size,
group_b_size,
group_x_size,
distance_function,
symmetric)
return (coords, 1 - theta)
def get_abx_scores_dtw_on_group(group_iterator,
distance_function,
symmetric):
data_list = []
coords_list = []
bar = progressbar.ProgressBar(maxval=len(group_iterator))
bar.start()
with torch.no_grad():
for index, group in enumerate(group_iterator):
bar.update(index)
coords, abx = loc_dtw(group, distance_function, symmetric)
data_list.append(abx)
coords_list.append(coords)
bar.finish()
return torch.sparse.FloatTensor(torch.LongTensor(coords_list).t(),
torch.FloatTensor(data_list),
group_iterator.get_board_size())
| CPC_audio-main | cpc/eval/ABX/abx_group_computation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchaudio
import progressbar
import os
import sys
from pathlib import Path
def adjust_sample_rate(path_db, file_list, path_db_out,
target_sr):
bar = progressbar.ProgressBar(maxval=len(file_list))
bar.start()
for index, item in enumerate(file_list):
path_in = os.path.join(path_db, item)
path_out = os.path.join(path_db_out, item)
bar.update(index)
data, sr = torchaudio.load(path_in)
transform = torchaudio.transforms.Resample(orig_freq=sr,
new_freq=target_sr,
resampling_method='sinc_interpolation')
data = transform(data)
torchaudio.save(path_out, data, target_sr,
precision=16, channels_first=True)
bar.finish()
def get_names_list(path_tsv_file):
with open(path_tsv_file, 'r') as file:
data = file.readlines()
return [x.split()[0] for x in data]
def parse_args(argv):
parser = argparse.ArgumentParser(description='Adjust the sample rate of '
'a given group of audio files')
parser.add_argument('path_db', type=str,
help='Path to the directory containing the audio '
'files')
parser.add_argument("path_phone_files", type=str,
help='Path to the .txt file containing the list of '
'the files with a phone transcription')
parser.add_argument("path_out", type=str,
help='Path out the output directory')
parser.add_argument("--out_sample_rate", type=int, default=16000,
help="Sample rate of the output audio files "
"(default is 160000)")
parser.add_argument('--file_extension', type=str, default='.mp3')
return parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
file_list_db = [f for f in os.listdir(args.path_db)
if Path(f).suffix == args.file_extension]
print(f"Found {len(file_list_db)} in the dataset")
file_list_phone = get_names_list(args.path_phone_files)
print(f"Found {len(file_list_phone)} with a phone transcription")
file_list_db.sort()
file_list_phone.sort()
out_list = []
index_phone = 0
for file_name in file_list_db:
while Path(file_name).stem > file_list_phone[index_phone]:
index_phone += 1
if index_phone >= len(file_list_phone):
break
if Path(file_name).stem == file_list_phone[index_phone]:
out_list.append(file_name)
print(f"Converting {len(out_list)} files")
Path(args.path_out).mkdir(parents=True, exist_ok=True)
adjust_sample_rate(args.path_db, out_list,
args.path_out, args.out_sample_rate)
if __name__ == '__main__':
main(sys.argv[1:])
| CPC_audio-main | cpc/eval/utils/adjust_sample_rate.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import glob
import argparse
import numpy as np
import resampy
from scikits.audiolab import Sndfile, Format
def load_wav(fname, rate=None):
fp = Sndfile(fname, 'r')
_signal = fp.read_frames(fp.nframes)
_signal = _signal.reshape((-1, fp.channels))
_rate = fp.samplerate
if _signal.ndim == 1:
_signal.reshape((-1, 1))
if rate is not None and rate != _rate:
signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_best')
else:
signal = _signal
rate = _rate
return signal, rate
def save_wav(fname, signal, rate):
fp = Sndfile(fname, 'w', Format('wav'), signal.shape[1], rate)
fp.write_frames(signal)
fp.close()
def reEncodeAudio(audio_path, new_rate):
audio, audio_rate = load_wav(audio_path,new_rate)
save_wav(audio_path, audio, new_rate)
def main():
parser = argparse.ArgumentParser(description="re-encode all audios under a directory")
parser.add_argument("--audio_dir_path", type=str, required=True)
parser.add_argument("--new_rate", type=int, default=16000)
args = parser.parse_args()
audio_list = glob.glob(args.audio_dir_path + '/*.wav')
print "Total number of audios to re-encode: ", len(audio_list)
for audio_path in audio_list:
reEncodeAudio(os.path.join(args.audio_dir_path, audio_path), args.new_rate)
if __name__ == '__main__':
main()
| 2.5D-Visual-Sound-main | reEncodeAudio.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from torch.autograd import Variable
from tensorboardX import SummaryWriter
def create_optimizer(nets, opt):
(net_visual, net_audio) = nets
param_groups = [{'params': net_visual.parameters(), 'lr': opt.lr_visual},
{'params': net_audio.parameters(), 'lr': opt.lr_audio}]
if opt.optimizer == 'sgd':
return torch.optim.SGD(param_groups, momentum=opt.beta1, weight_decay=opt.weight_decay)
elif opt.optimizer == 'adam':
return torch.optim.Adam(param_groups, betas=(opt.beta1,0.999), weight_decay=opt.weight_decay)
def decrease_learning_rate(optimizer, decay_factor=0.94):
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_factor
#used to display validation loss
def display_val(model, loss_criterion, writer, index, dataset_val, opt):
losses = []
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
loss = loss_criterion(output['binaural_spectrogram'], output['audio_gt'])
losses.append(loss.item())
else:
break
avg_loss = sum(losses)/len(losses)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss, index)
print('val loss: %.3f' % avg_loss)
return avg_loss
#parse arguments
opt = TrainOptions().parse()
opt.device = torch.device("cuda")
#construct data loader
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training clips = %d' % dataset_size)
#create validation set data loader if validation_on option is set
if opt.validation_on:
#temperally set to val to load val data
opt.mode = 'val'
data_loader_val = CreateDataLoader(opt)
dataset_val = data_loader_val.load_data()
dataset_size_val = len(data_loader_val)
print('#validation clips = %d' % dataset_size_val)
opt.mode = 'train' #set it back
if opt.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(comment=opt.name)
else:
writer = None
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
# set up optimizer
optimizer = create_optimizer(nets, opt)
# set up loss function
loss_criterion = torch.nn.MSELoss()
if(len(opt.gpu_ids) > 0):
loss_criterion.cuda(opt.gpu_ids[0])
# initialization
total_steps = 0
data_loading_time = []
model_forward_time = []
model_backward_time = []
batch_loss = []
best_err = float("inf")
for epoch in range(1, opt.niter+1):
torch.cuda.synchronize()
epoch_start_time = time.time()
if(opt.measure_time):
iter_start_time = time.time()
for i, data in enumerate(dataset):
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_loaded_time = time.time()
total_steps += opt.batchSize
# forward pass
model.zero_grad()
output = model.forward(data)
# compute loss
loss = loss_criterion(output['binaural_spectrogram'], Variable(output['audio_gt'], requires_grad=False))
batch_loss.append(loss.item())
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_forwarded_time = time.time()
# update optimizer
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(opt.measure_time):
iter_model_backwarded_time = time.time()
data_loading_time.append(iter_data_loaded_time - iter_start_time)
model_forward_time.append(iter_data_forwarded_time - iter_data_loaded_time)
model_backward_time.append(iter_model_backwarded_time - iter_data_forwarded_time)
if(total_steps // opt.batchSize % opt.display_freq == 0):
print('Display training progress at (epoch %d, total_steps %d)' % (epoch, total_steps))
avg_loss = sum(batch_loss) / len(batch_loss)
print('Average loss: %.3f' % (avg_loss))
batch_loss = []
if opt.tensorboard:
writer.add_scalar('data/loss', avg_loss, total_steps)
if(opt.measure_time):
print('average data loading time: ' + str(sum(data_loading_time)/len(data_loading_time)))
print('average forward time: ' + str(sum(model_forward_time)/len(model_forward_time)))
print('average backward time: ' + str(sum(model_backward_time)/len(model_backward_time)))
data_loading_time = []
model_forward_time = []
model_backward_time = []
print('end of display \n')
if(total_steps // opt.batchSize % opt.save_latest_freq == 0):
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_latest.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_latest.pth'))
if(total_steps // opt.batchSize % opt.validation_freq == 0 and opt.validation_on):
model.eval()
opt.mode = 'val'
print('Display validation results at (epoch %d, total_steps %d)' % (epoch, total_steps))
val_err = display_val(model, loss_criterion, writer, total_steps, dataset_val, opt)
print('end of display \n')
model.train()
opt.mode = 'train'
#save the model that achieves the smallest validation error
if val_err < best_err:
best_err = val_err
print('saving the best model (epoch %d, total_steps %d) with validation error %.3f\n' % (epoch, total_steps, val_err))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_best.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_best.pth'))
if(opt.measure_time):
iter_start_time = time.time()
if(epoch % opt.save_epoch_freq == 0):
print('saving the model at the end of epoch %d, total_steps %d' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_visual.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_audio.pth'))
#decrease learning rate 6% every opt.learning_rate_decrease_itr epochs
if(opt.learning_rate_decrease_itr > 0 and epoch % opt.learning_rate_decrease_itr == 0):
decrease_learning_rate(optimizer, opt.decay_factor)
print('decreased learning rate by ', opt.decay_factor)
| 2.5D-Visual-Sound-main | train.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import librosa
import argparse
import numpy as np
from numpy import linalg as LA
from scipy.signal import hilbert
from data.audioVisual_dataset import generate_spectrogram
import statistics as stat
def normalize(samples):
return samples / np.maximum(1e-20, np.max(np.abs(samples)))
def STFT_L2_distance(predicted_binaural, gt_binaural):
#channel1
predicted_spect_channel1 = librosa.core.stft(np.asfortranarray(predicted_binaural[0,:]), n_fft=512, hop_length=160, win_length=400, center=True)
gt_spect_channel1 = librosa.core.stft(np.asfortranarray(gt_binaural[0,:]), n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(predicted_spect_channel1), axis=0)
imag = np.expand_dims(np.imag(predicted_spect_channel1), axis=0)
predicted_realimag_channel1 = np.concatenate((real, imag), axis=0)
real = np.expand_dims(np.real(gt_spect_channel1), axis=0)
imag = np.expand_dims(np.imag(gt_spect_channel1), axis=0)
gt_realimag_channel1 = np.concatenate((real, imag), axis=0)
channel1_distance = np.mean(np.power((predicted_realimag_channel1 - gt_realimag_channel1), 2))
#channel2
predicted_spect_channel2 = librosa.core.stft(np.asfortranarray(predicted_binaural[1,:]), n_fft=512, hop_length=160, win_length=400, center=True)
gt_spect_channel2 = librosa.core.stft(np.asfortranarray(gt_binaural[1,:]), n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(predicted_spect_channel2), axis=0)
imag = np.expand_dims(np.imag(predicted_spect_channel2), axis=0)
predicted_realimag_channel2 = np.concatenate((real, imag), axis=0)
real = np.expand_dims(np.real(gt_spect_channel2), axis=0)
imag = np.expand_dims(np.imag(gt_spect_channel2), axis=0)
gt_realimag_channel2 = np.concatenate((real, imag), axis=0)
channel2_distance = np.mean(np.power((predicted_realimag_channel2 - gt_realimag_channel2), 2))
#sum the distance between two channels
stft_l2_distance = channel1_distance + channel2_distance
return float(stft_l2_distance)
def Envelope_distance(predicted_binaural, gt_binaural):
#channel1
pred_env_channel1 = np.abs(hilbert(predicted_binaural[0,:]))
gt_env_channel1 = np.abs(hilbert(gt_binaural[0,:]))
channel1_distance = np.sqrt(np.mean((gt_env_channel1 - pred_env_channel1)**2))
#channel2
pred_env_channel2 = np.abs(hilbert(predicted_binaural[1,:]))
gt_env_channel2 = np.abs(hilbert(gt_binaural[1,:]))
channel2_distance = np.sqrt(np.mean((gt_env_channel2 - pred_env_channel2)**2))
#sum the distance between two channels
envelope_distance = channel1_distance + channel2_distance
return float(envelope_distance)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--results_root', type=str, required=True)
parser.add_argument('--audio_sampling_rate', default=16000, type=int, help='audio sampling rate')
parser.add_argument('--real_mono', default=False, type=bool, help='whether the input predicted binaural audio is mono audio')
parser.add_argument('--normalization', default=False, type=bool)
args = parser.parse_args()
stft_distance_list = []
envelope_distance_list = []
audioNames = os.listdir(args.results_root)
index = 1
for audio_name in audioNames:
if index % 10 == 0:
print "Evaluating testing example " + str(index) + " :", audio_name
#check whether input binaural is mono, replicate to two channels if it's mono
if args.real_mono:
mono_sound, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'mixed_mono.wav'), sr=args.audio_sampling_rate)
predicted_binaural = np.repeat(np.expand_dims(mono_sound, 0), 2, axis=0)
if args.normalization:
predicted_binaural = normalize(predicted_binaural)
else:
predicted_binaural, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'predicted_binaural.wav'), sr=args.audio_sampling_rate, mono=False)
if args.normalization:
predicted_binaural = normalize(predicted_binaural)
gt_binaural, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'input_binaural.wav'), sr=args.audio_sampling_rate, mono=False)
if args.normalization:
gt_binaural = normalize(gt_binaural)
#get results for this audio
stft_distance_list.append(STFT_L2_distance(predicted_binaural, gt_binaural))
envelope_distance_list.append(Envelope_distance(predicted_binaural, gt_binaural))
index = index + 1
#print the results
print "STFT L2 Distance: ", stat.mean(stft_distance_list), stat.stdev(stft_distance_list), stat.stdev(stft_distance_list) / np.sqrt(len(stft_distance_list))
print "Average Envelope Distance: ", stat.mean(envelope_distance_list), stat.stdev(envelope_distance_list), stat.stdev(envelope_distance_list) / np.sqrt(len(envelope_distance_list))
if __name__ == '__main__':
main()
| 2.5D-Visual-Sound-main | evaluate.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import librosa
import numpy as np
from PIL import Image
import subprocess
from options.test_options import TestOptions
import torchvision.transforms as transforms
import torch
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from data.audioVisual_dataset import generate_spectrogram
def audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return rms / desired_rms, samples
def main():
#load test arguments
opt = TestOptions().parse()
opt.device = torch.device("cuda")
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
model.eval()
#load the audio to perform separation
audio, audio_rate = librosa.load(opt.input_audio_path, sr=opt.audio_sampling_rate, mono=False)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#define the transformation to perform on visual frames
vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]
vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
vision_transform = transforms.Compose(vision_transform_list)
#perform spatialization over the whole audio using a sliding window approach
overlap_count = np.zeros((audio.shape)) #count the number of times a data point is calculated
binaural_audio = np.zeros((audio.shape))
#perform spatialization over the whole spectrogram in a siliding-window fashion
sliding_window_start = 0
data = {}
samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)
while sliding_window_start + samples_per_window < audio.shape[-1]:
sliding_window_end = sliding_window_start + samples_per_window
normalizer, audio_segment = audio_normalize(audio[:,sliding_window_start:sliding_window_end])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
audio_segment_mix = audio_segment_channel1 + audio_segment_channel2
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for current window
frame_index = int(round((((sliding_window_start + samples_per_window / 2.0) / audio.shape[-1]) * opt.input_audio_length + 0.05) * 10 ))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
binaural_audio[:,sliding_window_start:sliding_window_end] = binaural_audio[:,sliding_window_start:sliding_window_end] + reconstructed_binaural
overlap_count[:,sliding_window_start:sliding_window_end] = overlap_count[:,sliding_window_start:sliding_window_end] + 1
sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)
#deal with the last segment
normalizer, audio_segment = audio_normalize(audio[:,-samples_per_window:])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for last window
frame_index = int(round(((opt.input_audio_length - opt.audio_length / 2.0) + 0.05) * 10))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
#add the spatialized audio to reconstructed_binaural
binaural_audio[:,-samples_per_window:] = binaural_audio[:,-samples_per_window:] + reconstructed_binaural
overlap_count[:,-samples_per_window:] = overlap_count[:,-samples_per_window:] + 1
#divide aggregated predicted audio by their corresponding counts
predicted_binaural_audio = np.divide(binaural_audio, overlap_count)
#check output directory
if not os.path.isdir(opt.output_dir_root):
os.mkdir(opt.output_dir_root)
mixed_mono = (audio_channel1 + audio_channel2) / 2
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'predicted_binaural.wav'), predicted_binaural_audio, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'mixed_mono.wav'), mixed_mono, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'input_binaural.wav'), audio, opt.audio_sampling_rate)
if __name__ == '__main__':
main()
| 2.5D-Visual-Sound-main | demo.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--input_audio_path', required=True, help='path to the input audio file')
self.parser.add_argument('--video_frame_path', required=True, help='path to the input video frames')
self.parser.add_argument('--output_dir_root', type=str, default='test_output', help='path to the output files')
self.parser.add_argument('--input_audio_length', type=float, default=10, help='length of the testing video/audio')
self.parser.add_argument('--hop_size', default=0.05, type=float, help='the hop length to perform audio spatialization in a sliding window approach')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
self.mode = "test"
self.isTrain = False
| 2.5D-Visual-Sound-main | options/test_options.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=50, help='frequency of displaying average loss')
self.parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
self.parser.add_argument('--niter', type=int, default=1000, help='# of epochs to train')
self.parser.add_argument('--learning_rate_decrease_itr', type=int, default=-1, help='how often is the learning rate decreased by six percent')
self.parser.add_argument('--decay_factor', type=float, default=0.94, help='learning rate decay factor')
self.parser.add_argument('--tensorboard', type=bool, default=False, help='use tensorboard to visualize loss change ')
self.parser.add_argument('--measure_time', type=bool, default=False, help='measure time of different steps during training')
self.parser.add_argument('--validation_on', action='store_true', help='whether to test on validation set during training')
self.parser.add_argument('--validation_freq', type=int, default=100, help='frequency of testing on validation set')
self.parser.add_argument('--validation_batches', type=int, default=10, help='number of batches to test for validation')
self.parser.add_argument('--enable_data_augmentation', type=bool, default=True, help='whether to augment input frame')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
#optimizer arguments
self.parser.add_argument('--lr_visual', type=float, default=0.0001, help='learning rate for visual stream')
self.parser.add_argument('--lr_audio', type=float, default=0.001, help='learning rate for unet')
self.parser.add_argument('--optimizer', default='adam', type=str, help='adam or sgd for optimization')
self.parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam')
self.parser.add_argument('--weight_decay', default=0.0005, type=float, help='weights regularizer')
self.mode = "train"
self.isTrain = True
self.enable_data_augmentation = True
| 2.5D-Visual-Sound-main | options/train_options.py |
2.5D-Visual-Sound-main | options/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--hdf5FolderPath', help='path to the folder that contains train.h5, val.h5 and test.h5')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='spatialAudioVisual', help='name of the experiment. It decides where to store models')
self.parser.add_argument('--checkpoints_dir', type=str, default='checkpoints/', help='models are saved here')
self.parser.add_argument('--model', type=str, default='audioVisual', help='chooses how datasets are loaded.')
self.parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
self.parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
self.parser.add_argument('--audio_sampling_rate', default=16000, type=int, help='audio sampling rate')
self.parser.add_argument('--audio_length', default=0.63, type=float, help='audio length, default 0.63s')
self.enable_data_augmentation = True
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.mode = self.mode
self.opt.isTrain = self.isTrain
self.opt.enable_data_augmentation = self.enable_data_augmentation
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
#I should process the opt here, like gpu ids, etc.
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| 2.5D-Visual-Sound-main | options/base_options.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| 2.5D-Visual-Sound-main | util/util.py |
2.5D-Visual-Sound-main | util/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision
from .networks import VisualNet, AudioNet, weights_init
class ModelBuilder():
# builder for visual stream
def build_visual(self, weights=''):
pretrained = True
original_resnet = torchvision.models.resnet18(pretrained)
net = VisualNet(original_resnet)
if len(weights) > 0:
print('Loading weights for visual stream')
net.load_state_dict(torch.load(weights))
return net
#builder for audio stream
def build_audio(self, ngf=64, input_nc=2, output_nc=2, weights=''):
#AudioNet: 5 layer UNet
net = AudioNet(ngf, input_nc, output_nc)
net.apply(weights_init)
if len(weights) > 0:
print('Loading weights for audio stream')
net.load_state_dict(torch.load(weights))
return net
| 2.5D-Visual-Sound-main | models/models.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.